hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9f40a11d291c7b6a22ac89e14bcc0b90daa9b78f
| 9,736
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/documentdb/v20150401/database_account_gremlin_graph.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/documentdb/v20150401/database_account_gremlin_graph.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/documentdb/v20150401/database_account_gremlin_graph.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['DatabaseAccountGremlinGraph']
class DatabaseAccountGremlinGraph(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
database_name: Optional[pulumi.Input[str]] = None,
graph_name: Optional[pulumi.Input[str]] = None,
options: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
resource: Optional[pulumi.Input[pulumi.InputType['GremlinGraphResourceArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
An Azure Cosmos DB Gremlin graph.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: Cosmos DB database account name.
:param pulumi.Input[str] database_name: Cosmos DB database name.
:param pulumi.Input[str] graph_name: Cosmos DB graph name.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] options: A key-value pair of options to be applied for the request. This corresponds to the headers sent with the request.
:param pulumi.Input[pulumi.InputType['GremlinGraphResourceArgs']] resource: The standard JSON format of a Gremlin graph
:param pulumi.Input[str] resource_group_name: Name of an Azure resource group.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__['account_name'] = account_name
if database_name is None and not opts.urn:
raise TypeError("Missing required property 'database_name'")
__props__['database_name'] = database_name
__props__['graph_name'] = graph_name
if options is None and not opts.urn:
raise TypeError("Missing required property 'options'")
__props__['options'] = options
if resource is None and not opts.urn:
raise TypeError("Missing required property 'resource'")
__props__['resource'] = resource
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['conflict_resolution_policy'] = None
__props__['default_ttl'] = None
__props__['etag'] = None
__props__['indexing_policy'] = None
__props__['location'] = None
__props__['name'] = None
__props__['partition_key'] = None
__props__['rid'] = None
__props__['tags'] = None
__props__['ts'] = None
__props__['type'] = None
__props__['unique_key_policy'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:documentdb:DatabaseAccountGremlinGraph"), pulumi.Alias(type_="azure-nextgen:documentdb/latest:DatabaseAccountGremlinGraph"), pulumi.Alias(type_="azure-nextgen:documentdb/v20150408:DatabaseAccountGremlinGraph"), pulumi.Alias(type_="azure-nextgen:documentdb/v20151106:DatabaseAccountGremlinGraph"), pulumi.Alias(type_="azure-nextgen:documentdb/v20160319:DatabaseAccountGremlinGraph"), pulumi.Alias(type_="azure-nextgen:documentdb/v20160331:DatabaseAccountGremlinGraph")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(DatabaseAccountGremlinGraph, __self__).__init__(
'azure-nextgen:documentdb/v20150401:DatabaseAccountGremlinGraph',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'DatabaseAccountGremlinGraph':
"""
Get an existing DatabaseAccountGremlinGraph resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return DatabaseAccountGremlinGraph(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="conflictResolutionPolicy")
def conflict_resolution_policy(self) -> pulumi.Output[Optional['outputs.ConflictResolutionPolicyResponse']]:
"""
The conflict resolution policy for the graph.
"""
return pulumi.get(self, "conflict_resolution_policy")
@property
@pulumi.getter(name="defaultTtl")
def default_ttl(self) -> pulumi.Output[Optional[int]]:
"""
Default time to live
"""
return pulumi.get(self, "default_ttl")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
"""
A system generated property representing the resource etag required for optimistic concurrency control.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="indexingPolicy")
def indexing_policy(self) -> pulumi.Output[Optional['outputs.IndexingPolicyResponse']]:
"""
The configuration of the indexing policy. By default, the indexing is automatic for all document paths within the graph
"""
return pulumi.get(self, "indexing_policy")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
The location of the resource group to which the resource belongs.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the database account.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="partitionKey")
def partition_key(self) -> pulumi.Output[Optional['outputs.ContainerPartitionKeyResponse']]:
"""
The configuration of the partition key to be used for partitioning data into multiple partitions
"""
return pulumi.get(self, "partition_key")
@property
@pulumi.getter
def rid(self) -> pulumi.Output[Optional[str]]:
"""
A system generated property. A unique identifier.
"""
return pulumi.get(self, "rid")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def ts(self) -> pulumi.Output[Optional[Any]]:
"""
A system generated property that denotes the last updated timestamp of the resource.
"""
return pulumi.get(self, "ts")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of Azure resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="uniqueKeyPolicy")
def unique_key_policy(self) -> pulumi.Output[Optional['outputs.UniqueKeyPolicyResponse']]:
"""
The unique key policy configuration for specifying uniqueness constraints on documents in the collection in the Azure Cosmos DB service.
"""
return pulumi.get(self, "unique_key_policy")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 45.283721
| 556
| 0.660025
|
c419857388002549f95c972dc3084e59d8cda51d
| 2,038
|
py
|
Python
|
azure/mgmt/iothub/models/routing_event_hub_properties.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 2
|
2020-07-29T14:22:17.000Z
|
2020-11-06T18:47:40.000Z
|
azure/mgmt/iothub/models/routing_event_hub_properties.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1
|
2016-08-01T07:37:04.000Z
|
2016-08-01T07:37:04.000Z
|
azure/mgmt/iothub/models/routing_event_hub_properties.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1
|
2020-12-12T21:04:41.000Z
|
2020-12-12T21:04:41.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RoutingEventHubProperties(Model):
"""The properties related to an event hub endpoint.
:param connection_string: The connection string of the event hub endpoint.
:type connection_string: str
:param name: The name of the event hub endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum
length of 64 characters. The following names are reserved; events,
operationsMonitoringEvents, fileNotifications, $default. Endpoint names
must be unique across endpoint types.
:type name: str
:param subscription_id: The subscription identifier of the event hub
endpoint.
:type subscription_id: str
:param resource_group: The name of the resource group of the event hub
endpoint.
:type resource_group: str
"""
_validation = {
'connection_string': {'required': True},
'name': {'required': True, 'pattern': '^[A-Za-z0-9-._]{1,64}$'},
}
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'resource_group': {'key': 'resourceGroup', 'type': 'str'},
}
def __init__(self, connection_string, name, subscription_id=None, resource_group=None):
self.connection_string = connection_string
self.name = name
self.subscription_id = subscription_id
self.resource_group = resource_group
| 39.960784
| 91
| 0.640824
|
6dbf32bb145ef547e386504e78aaa8614acabad9
| 5,761
|
py
|
Python
|
dymos/examples/battery_multibranch/doc/test_multibranch_trajectory_for_docs.py
|
yonghoonlee/dymos
|
602109eee4a1b061444dd2b45c7b1ed0ac1aa0f4
|
[
"Apache-2.0"
] | 104
|
2018-09-08T16:52:27.000Z
|
2022-03-10T23:35:30.000Z
|
dymos/examples/battery_multibranch/doc/test_multibranch_trajectory_for_docs.py
|
yonghoonlee/dymos
|
602109eee4a1b061444dd2b45c7b1ed0ac1aa0f4
|
[
"Apache-2.0"
] | 628
|
2018-06-27T20:32:59.000Z
|
2022-03-31T19:24:32.000Z
|
dymos/examples/battery_multibranch/doc/test_multibranch_trajectory_for_docs.py
|
yonghoonlee/dymos
|
602109eee4a1b061444dd2b45c7b1ed0ac1aa0f4
|
[
"Apache-2.0"
] | 46
|
2018-06-27T20:54:07.000Z
|
2021-12-19T07:23:32.000Z
|
"""
Integration test for a battery+motor example that demonstrates phase branching in trajectories.
"""
import unittest
import matplotlib
matplotlib.use('Agg')
from openmdao.utils.testing_utils import use_tempdirs
@use_tempdirs
class TestBatteryBranchingPhasesForDocs(unittest.TestCase):
def test_basic(self):
import matplotlib.pyplot as plt
import openmdao.api as om
from openmdao.utils.assert_utils import assert_near_equal
import dymos as dm
from dymos.examples.battery_multibranch.battery_multibranch_ode import BatteryODE
from dymos.utils.lgl import lgl
prob = om.Problem()
opt = prob.driver = om.ScipyOptimizeDriver()
opt.declare_coloring()
opt.options['optimizer'] = 'SLSQP'
num_seg = 5
seg_ends, _ = lgl(num_seg + 1)
traj = prob.model.add_subsystem('traj', dm.Trajectory())
# First phase: normal operation.
transcription = dm.Radau(num_segments=num_seg, order=5, segment_ends=seg_ends, compressed=False)
phase0 = dm.Phase(ode_class=BatteryODE, transcription=transcription)
traj_p0 = traj.add_phase('phase0', phase0)
traj_p0.set_time_options(fix_initial=True, fix_duration=True)
traj_p0.add_state('state_of_charge', fix_initial=True, fix_final=False,
targets=['SOC'], rate_source='dXdt:SOC')
# Second phase: normal operation.
phase1 = dm.Phase(ode_class=BatteryODE, transcription=transcription)
traj_p1 = traj.add_phase('phase1', phase1)
traj_p1.set_time_options(fix_initial=False, fix_duration=True)
traj_p1.add_state('state_of_charge', fix_initial=False, fix_final=False,
targets=['SOC'], rate_source='dXdt:SOC')
traj_p1.add_objective('time', loc='final')
# Second phase, but with battery failure.
phase1_bfail = dm.Phase(ode_class=BatteryODE, ode_init_kwargs={'num_battery': 2},
transcription=transcription)
traj_p1_bfail = traj.add_phase('phase1_bfail', phase1_bfail)
traj_p1_bfail.set_time_options(fix_initial=False, fix_duration=True)
traj_p1_bfail.add_state('state_of_charge', fix_initial=False, fix_final=False,
targets=['SOC'], rate_source='dXdt:SOC')
# Second phase, but with motor failure.
phase1_mfail = dm.Phase(ode_class=BatteryODE, ode_init_kwargs={'num_motor': 2},
transcription=transcription)
traj_p1_mfail = traj.add_phase('phase1_mfail', phase1_mfail)
traj_p1_mfail.set_time_options(fix_initial=False, fix_duration=True)
traj_p1_mfail.add_state('state_of_charge', fix_initial=False, fix_final=False,
targets=['SOC'], rate_source='dXdt:SOC')
traj.link_phases(phases=['phase0', 'phase1'], vars=['state_of_charge', 'time'])
traj.link_phases(phases=['phase0', 'phase1_bfail'], vars=['state_of_charge', 'time'])
traj.link_phases(phases=['phase0', 'phase1_mfail'], vars=['state_of_charge', 'time'])
prob.model.options['assembled_jac_type'] = 'csc'
prob.model.linear_solver = om.DirectSolver(assemble_jac=True)
prob.setup()
prob['traj.phase0.t_initial'] = 0
prob['traj.phase0.t_duration'] = 1.0*3600
prob['traj.phase1.t_initial'] = 1.0*3600
prob['traj.phase1.t_duration'] = 1.0*3600
prob['traj.phase1_bfail.t_initial'] = 1.0*3600
prob['traj.phase1_bfail.t_duration'] = 1.0*3600
prob['traj.phase1_mfail.t_initial'] = 1.0*3600
prob['traj.phase1_mfail.t_duration'] = 1.0*3600
prob.set_solver_print(level=0)
dm.run_problem(prob)
soc0 = prob['traj.phase0.states:state_of_charge']
soc1 = prob['traj.phase1.states:state_of_charge']
soc1b = prob['traj.phase1_bfail.states:state_of_charge']
soc1m = prob['traj.phase1_mfail.states:state_of_charge']
# Final value for State of Chrage in each segment should be a good test.
print('State of Charge after 1 hour')
assert_near_equal(soc0[-1], 0.63464982, 1e-6)
print('State of Charge after 2 hours')
assert_near_equal(soc1[-1], 0.23794217, 1e-6)
print('State of Charge after 2 hours, battery fails at 1 hour')
assert_near_equal(soc1b[-1], 0.0281523, 1e-6)
print('State of Charge after 2 hours, motor fails at 1 hour')
assert_near_equal(soc1m[-1], 0.18625395, 1e-6)
# Plot Results
t0 = prob['traj.phases.phase0.time.time']/3600
t1 = prob['traj.phases.phase1.time.time']/3600
t1b = prob['traj.phases.phase1_bfail.time.time']/3600
t1m = prob['traj.phases.phase1_mfail.time.time']/3600
plt.subplot(2, 1, 1)
plt.plot(t0, soc0, 'b')
plt.plot(t1, soc1, 'b')
plt.plot(t1b, soc1b, 'r')
plt.plot(t1m, soc1m, 'c')
plt.xlabel('Time (hour)')
plt.ylabel('State of Charge (percent)')
I_Li0 = prob['traj.phases.phase0.rhs_all.pwr_balance.I_Li']
I_Li1 = prob['traj.phases.phase1.rhs_all.pwr_balance.I_Li']
I_Li1b = prob['traj.phases.phase1_bfail.rhs_all.pwr_balance.I_Li']
I_Li1m = prob['traj.phases.phase1_mfail.rhs_all.pwr_balance.I_Li']
plt.subplot(2, 1, 2)
plt.plot(t0, I_Li0, 'b')
plt.plot(t1, I_Li1, 'b')
plt.plot(t1b, I_Li1b, 'r')
plt.plot(t1m, I_Li1m, 'c')
plt.xlabel('Time (hour)')
plt.ylabel('Line Current (A)')
plt.legend(['Phase 1', 'Phase 2', 'Phase 2 Battery Fail', 'Phase 2 Motor Fail'], loc=2)
plt.show()
if __name__ == '__main__': # pragma: no cover
unittest.main()
| 38.925676
| 104
| 0.644333
|
4e624fcd2f0cba1bc8e3578fd1b63b045d286d69
| 1,497
|
py
|
Python
|
dataset_split.py
|
Complicateddd/Complicateddd-ROITransformer
|
2adfbf98892d569c460d100c6e2169c5fa3a9b82
|
[
"Apache-2.0"
] | null | null | null |
dataset_split.py
|
Complicateddd/Complicateddd-ROITransformer
|
2adfbf98892d569c460d100c6e2169c5fa3a9b82
|
[
"Apache-2.0"
] | null | null | null |
dataset_split.py
|
Complicateddd/Complicateddd-ROITransformer
|
2adfbf98892d569c460d100c6e2169c5fa3a9b82
|
[
"Apache-2.0"
] | 1
|
2021-12-17T12:49:06.000Z
|
2021-12-17T12:49:06.000Z
|
import os
import random
import shutil
file=os.listdir("/media/ubuntu/data/huojianjun/科目四热身赛数据/labelTxt")
tv=int(len(file)*0.8)
list_one=list(range(1,len(file)+1))
trainval=random.sample(list_one,tv)
for i in list_one:
if i in trainval:
shutil.copy(os.path.join('/media/ubuntu/data/huojianjun/科目四热身赛数据/images/{}.tif'.format(i)),
os.path.join('/media/ubuntu/data/huojianjun/科目四热身赛数据/trainval/images/{}.tif'.format(i)))
shutil.copy(os.path.join('/media/ubuntu/data/huojianjun/科目四热身赛数据/labelTxt/{}.txt'.format(i)),
os.path.join('/media/ubuntu/data/huojianjun/科目四热身赛数据/trainval/labelTxt/{}.txt'.format(i)))
else:
shutil.copy(os.path.join('/media/ubuntu/data/huojianjun/科目四热身赛数据/images/{}.tif'.format(i)),
os.path.join('/media/ubuntu/data/huojianjun/科目四热身赛数据/test/images/{}.tif'.format(i)))
shutil.copy(os.path.join('/media/ubuntu/data/huojianjun/科目四热身赛数据/labelTxt/{}.txt'.format(i)),
os.path.join('/media/ubuntu/data/huojianjun/科目四热身赛数据/test/labelTxt/{}.txt'.format(i)))
# print(list_one)
# import os
# import shutil
# file=open("/media/ubuntu/新加卷/xiangmu/dataset/ImageSets/Main/test.txt",'r')
# list_=[]
# for line in file.readlines():
# list_.append(line.strip()+'.jpg')
# print(line)
# print(list_)
# img=os.listdir("/media/ubuntu/新加卷/xiangmu/dataset/JPEGImages")
# print(len(img))
# for i in img:
# if i in list_:
# shutil.copy(os.path.join("/media/ubuntu/新加卷/xiangmu/dataset/JPEGImages",i),
# os.path.join("/media/ubuntu/新加卷/xiangmu/sample",i))
# file.close()
| 35.642857
| 95
| 0.718771
|
c859b9b9b1f2a036ecfc77f29dfee02d183b780a
| 26,041
|
py
|
Python
|
test/functional/p2p_sendheaders.py
|
bryceweiner/syscoin-1
|
ff4392cb771b59ba66e249f614360ab4c9d4ce2a
|
[
"MIT"
] | 145
|
2015-01-03T17:07:36.000Z
|
2022-02-11T07:35:46.000Z
|
test/functional/p2p_sendheaders.py
|
bryceweiner/syscoin-1
|
ff4392cb771b59ba66e249f614360ab4c9d4ce2a
|
[
"MIT"
] | 366
|
2015-01-08T05:10:17.000Z
|
2022-03-07T02:30:03.000Z
|
test/functional/p2p_sendheaders.py
|
bryceweiner/syscoin-1
|
ff4392cb771b59ba66e249f614360ab4c9d4ce2a
|
[
"MIT"
] | 110
|
2015-01-03T17:00:15.000Z
|
2022-02-13T15:31:08.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test behavior of headers messages to announce blocks.
Setup:
- Two nodes:
- node0 is the node-under-test. We create two p2p connections to it. The
first p2p connection is a control and should only ever receive inv's. The
second p2p connection tests the headers sending logic.
- node1 is used to create reorgs.
test_null_locators
==================
Sends two getheaders requests with null locator values. First request's hashstop
value refers to validated block, while second request's hashstop value refers to
a block which hasn't been validated. Verifies only the first request returns
headers.
test_nonnull_locators
=====================
Part 1: No headers announcements before "sendheaders"
a. node mines a block [expect: inv]
send getdata for the block [expect: block]
b. node mines another block [expect: inv]
send getheaders and getdata [expect: headers, then block]
c. node mines another block [expect: inv]
peer mines a block, announces with header [expect: getdata]
d. node mines another block [expect: inv]
Part 2: After "sendheaders", headers announcements should generally work.
a. peer sends sendheaders [expect: no response]
peer sends getheaders with current tip [expect: no response]
b. node mines a block [expect: tip header]
c. for N in 1, ..., 10:
* for announce-type in {inv, header}
- peer mines N blocks, announces with announce-type
[ expect: getheaders/getdata or getdata, deliver block(s) ]
- node mines a block [ expect: 1 header ]
Part 3: Headers announcements stop after large reorg and resume after getheaders or inv from peer.
- For response-type in {inv, getheaders}
* node mines a 7 block reorg [ expect: headers announcement of 8 blocks ]
* node mines an 8-block reorg [ expect: inv at tip ]
* peer responds with getblocks/getdata [expect: inv, blocks ]
* node mines another block [ expect: inv at tip, peer sends getdata, expect: block ]
* node mines another block at tip [ expect: inv ]
* peer responds with getheaders with an old hashstop more than 8 blocks back [expect: headers]
* peer requests block [ expect: block ]
* node mines another block at tip [ expect: inv, peer sends getdata, expect: block ]
* peer sends response-type [expect headers if getheaders, getheaders/getdata if mining new block]
* node mines 1 block [expect: 1 header, peer responds with getdata]
Part 4: Test direct fetch behavior
a. Announce 2 old block headers.
Expect: no getdata requests.
b. Announce 3 new blocks via 1 headers message.
Expect: one getdata request for all 3 blocks.
(Send blocks.)
c. Announce 1 header that forks off the last two blocks.
Expect: no response.
d. Announce 1 more header that builds on that fork.
Expect: one getdata request for two blocks.
e. Announce 16 more headers that build on that fork.
Expect: getdata request for 14 more blocks.
f. Announce 1 more header that builds on that fork.
Expect: no response.
Part 5: Test handling of headers that don't connect.
a. Repeat 10 times:
1. Announce a header that doesn't connect.
Expect: getheaders message
2. Send headers chain.
Expect: getdata for the missing blocks, tip update.
b. Then send 9 more headers that don't connect.
Expect: getheaders message each time.
c. Announce a header that does connect.
Expect: no response.
d. Announce 49 headers that don't connect.
Expect: getheaders message each time.
e. Announce one more that doesn't connect.
Expect: disconnect.
"""
from test_framework.blocktools import create_block, create_coinbase
from test_framework.messages import CInv
from test_framework.p2p import (
CBlockHeader,
NODE_WITNESS,
P2PInterface,
p2p_lock,
MSG_BLOCK,
msg_block,
msg_getblocks,
msg_getdata,
msg_getheaders,
msg_headers,
msg_inv,
msg_sendheaders,
)
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import (
assert_equal,
)
DIRECT_FETCH_RESPONSE_TIME = 0.05
class BaseNode(P2PInterface):
def __init__(self):
super().__init__()
self.block_announced = False
self.last_blockhash_announced = None
self.recent_headers_announced = []
def send_get_data(self, block_hashes):
"""Request data for a list of block hashes."""
msg = msg_getdata()
for x in block_hashes:
msg.inv.append(CInv(MSG_BLOCK, x))
self.send_message(msg)
def send_get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.send_message(msg)
def send_block_inv(self, blockhash):
msg = msg_inv()
msg.inv = [CInv(MSG_BLOCK, blockhash)]
self.send_message(msg)
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
self.send_message(headers_message)
def send_getblocks(self, locator):
getblocks_message = msg_getblocks()
getblocks_message.locator.vHave = locator
self.send_message(getblocks_message)
def wait_for_block_announcement(self, block_hash, timeout=60):
test_function = lambda: self.last_blockhash_announced == block_hash
self.wait_until(test_function, timeout=timeout)
def on_inv(self, message):
self.block_announced = True
self.last_blockhash_announced = message.inv[-1].hash
def on_headers(self, message):
if len(message.headers):
self.block_announced = True
for x in message.headers:
x.calc_sha256()
# append because headers may be announced over multiple messages.
self.recent_headers_announced.append(x.sha256)
self.last_blockhash_announced = message.headers[-1].sha256
def clear_block_announcements(self):
with p2p_lock:
self.block_announced = False
self.last_message.pop("inv", None)
self.last_message.pop("headers", None)
self.recent_headers_announced = []
def check_last_headers_announcement(self, headers):
"""Test whether the last headers announcements received are right.
Headers may be announced across more than one message."""
test_function = lambda: (len(self.recent_headers_announced) >= len(headers))
self.wait_until(test_function)
with p2p_lock:
assert_equal(self.recent_headers_announced, headers)
self.block_announced = False
self.last_message.pop("headers", None)
self.recent_headers_announced = []
def check_last_inv_announcement(self, inv):
"""Test whether the last announcement received had the right inv.
inv should be a list of block hashes."""
test_function = lambda: self.block_announced
self.wait_until(test_function)
with p2p_lock:
compare_inv = []
if "inv" in self.last_message:
compare_inv = [x.hash for x in self.last_message["inv"].inv]
assert_equal(compare_inv, inv)
self.block_announced = False
self.last_message.pop("inv", None)
class SendHeadersTest(SyscoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def mine_blocks(self, count):
"""Mine count blocks and return the new tip."""
# Clear out block announcements from each p2p listener
[x.clear_block_announcements() for x in self.nodes[0].p2ps]
self.nodes[0].generatetoaddress(count, self.nodes[0].get_deterministic_priv_key().address)
return int(self.nodes[0].getbestblockhash(), 16)
def mine_reorg(self, length):
"""Mine a reorg that invalidates length blocks (replacing them with # length+1 blocks).
Note: we clear the state of our p2p connections after the
to-be-reorged-out blocks are mined, so that we don't break later tests.
return the list of block hashes newly mined."""
# make sure all invalidated blocks are node0's
self.nodes[0].generatetoaddress(length, self.nodes[0].get_deterministic_priv_key().address)
self.sync_blocks(self.nodes, wait=0.1)
for x in self.nodes[0].p2ps:
x.wait_for_block_announcement(int(self.nodes[0].getbestblockhash(), 16))
x.clear_block_announcements()
tip_height = self.nodes[1].getblockcount()
hash_to_invalidate = self.nodes[1].getblockhash(tip_height - (length - 1))
self.nodes[1].invalidateblock(hash_to_invalidate)
all_hashes = self.nodes[1].generatetoaddress(length + 1, self.nodes[1].get_deterministic_priv_key().address) # Must be longer than the orig chain
self.sync_blocks(self.nodes, wait=0.1)
return [int(x, 16) for x in all_hashes]
def run_test(self):
# Setup the p2p connections
inv_node = self.nodes[0].add_p2p_connection(BaseNode())
# Make sure NODE_NETWORK is not set for test_node, so no block download
# will occur outside of direct fetching
test_node = self.nodes[0].add_p2p_connection(BaseNode(), services=NODE_WITNESS)
self.test_null_locators(test_node, inv_node)
self.test_nonnull_locators(test_node, inv_node)
def test_null_locators(self, test_node, inv_node):
tip = self.nodes[0].getblockheader(self.nodes[0].generatetoaddress(1, self.nodes[0].get_deterministic_priv_key().address)[0])
tip_hash = int(tip["hash"], 16)
inv_node.check_last_inv_announcement(inv=[tip_hash])
test_node.check_last_inv_announcement(inv=[tip_hash])
self.log.info("Verify getheaders with null locator and valid hashstop returns headers.")
test_node.clear_block_announcements()
test_node.send_get_headers(locator=[], hashstop=tip_hash)
test_node.check_last_headers_announcement(headers=[tip_hash])
self.log.info("Verify getheaders with null locator and invalid hashstop does not return headers.")
block = create_block(int(tip["hash"], 16), create_coinbase(tip["height"] + 1), tip["mediantime"] + 1)
block.solve()
test_node.send_header_for_blocks([block])
test_node.clear_block_announcements()
test_node.send_get_headers(locator=[], hashstop=int(block.hash, 16))
test_node.sync_with_ping()
assert_equal(test_node.block_announced, False)
inv_node.clear_block_announcements()
test_node.send_message(msg_block(block))
inv_node.check_last_inv_announcement(inv=[int(block.hash, 16)])
def test_nonnull_locators(self, test_node, inv_node):
tip = int(self.nodes[0].getbestblockhash(), 16)
# PART 1
# 1. Mine a block; expect inv announcements each time
self.log.info("Part 1: headers don't start before sendheaders message...")
for i in range(4):
self.log.debug("Part 1.{}: starting...".format(i))
old_tip = tip
tip = self.mine_blocks(1)
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_inv_announcement(inv=[tip])
# Try a few different responses; none should affect next announcement
if i == 0:
# first request the block
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
elif i == 1:
# next try requesting header and block
test_node.send_get_headers(locator=[old_tip], hashstop=tip)
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
test_node.clear_block_announcements() # since we requested headers...
elif i == 2:
# this time announce own block via headers
inv_node.clear_block_announcements()
height = self.nodes[0].getblockcount()
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
new_block = create_block(tip, create_coinbase(height + 1), block_time)
new_block.solve()
test_node.send_header_for_blocks([new_block])
test_node.wait_for_getdata([new_block.sha256])
test_node.send_and_ping(msg_block(new_block)) # make sure this block is processed
inv_node.wait_until(lambda: inv_node.block_announced)
inv_node.clear_block_announcements()
test_node.clear_block_announcements()
self.log.info("Part 1: success!")
self.log.info("Part 2: announce blocks with headers after sendheaders message...")
# PART 2
# 2. Send a sendheaders message and test that headers announcements
# commence and keep working.
test_node.send_message(msg_sendheaders())
prev_tip = int(self.nodes[0].getbestblockhash(), 16)
test_node.send_get_headers(locator=[prev_tip], hashstop=0)
test_node.sync_with_ping()
# Now that we've synced headers, headers announcements should work
tip = self.mine_blocks(1)
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_headers_announcement(headers=[tip])
height = self.nodes[0].getblockcount() + 1
block_time += 10 # Advance far enough ahead
for i in range(10):
self.log.debug("Part 2.{}: starting...".format(i))
# Mine i blocks, and alternate announcing either via
# inv (of tip) or via headers. After each, new blocks
# mined by the node should successfully be announced
# with block header, even though the blocks are never requested
for j in range(2):
self.log.debug("Part 2.{}.{}: starting...".format(i, j))
blocks = []
for _ in range(i + 1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
if j == 0:
# Announce via inv
test_node.send_block_inv(tip)
test_node.wait_for_getheaders()
# Should have received a getheaders now
test_node.send_header_for_blocks(blocks)
# Test that duplicate inv's won't result in duplicate
# getdata requests, or duplicate headers announcements
[inv_node.send_block_inv(x.sha256) for x in blocks]
test_node.wait_for_getdata([x.sha256 for x in blocks])
inv_node.sync_with_ping()
else:
# Announce via headers
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks])
# Test that duplicate headers won't result in duplicate
# getdata requests (the check is further down)
inv_node.send_header_for_blocks(blocks)
inv_node.sync_with_ping()
[test_node.send_message(msg_block(x)) for x in blocks]
test_node.sync_with_ping()
inv_node.sync_with_ping()
# This block should not be announced to the inv node (since it also
# broadcast it)
assert "inv" not in inv_node.last_message
assert "headers" not in inv_node.last_message
tip = self.mine_blocks(1)
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_headers_announcement(headers=[tip])
height += 1
block_time += 1
self.log.info("Part 2: success!")
self.log.info("Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer...")
# PART 3. Headers announcements can stop after large reorg, and resume after
# getheaders or inv from peer.
for j in range(2):
self.log.debug("Part 3.{}: starting...".format(j))
# First try mining a reorg that can propagate with header announcement
new_block_hashes = self.mine_reorg(length=7)
tip = new_block_hashes[-1]
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_headers_announcement(headers=new_block_hashes)
block_time += 8
# Mine a too-large reorg, which should be announced with a single inv
new_block_hashes = self.mine_reorg(length=8)
tip = new_block_hashes[-1]
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_inv_announcement(inv=[tip])
block_time += 9
fork_point = self.nodes[0].getblock("%064x" % new_block_hashes[0])["previousblockhash"]
fork_point = int(fork_point, 16)
# Use getblocks/getdata
test_node.send_getblocks(locator=[fork_point])
test_node.check_last_inv_announcement(inv=new_block_hashes)
test_node.send_get_data(new_block_hashes)
test_node.wait_for_block(new_block_hashes[-1])
for i in range(3):
self.log.debug("Part 3.{}.{}: starting...".format(j, i))
# Mine another block, still should get only an inv
tip = self.mine_blocks(1)
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_inv_announcement(inv=[tip])
if i == 0:
# Just get the data -- shouldn't cause headers announcements to resume
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
elif i == 1:
# Send a getheaders message that shouldn't trigger headers announcements
# to resume (best header sent will be too old)
test_node.send_get_headers(locator=[fork_point], hashstop=new_block_hashes[1])
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
elif i == 2:
# This time, try sending either a getheaders to trigger resumption
# of headers announcements, or mine a new block and inv it, also
# triggering resumption of headers announcements.
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
if j == 0:
test_node.send_get_headers(locator=[tip], hashstop=0)
test_node.sync_with_ping()
else:
test_node.send_block_inv(tip)
test_node.sync_with_ping()
# New blocks should now be announced with header
tip = self.mine_blocks(1)
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_headers_announcement(headers=[tip])
self.log.info("Part 3: success!")
self.log.info("Part 4: Testing direct fetch behavior...")
tip = self.mine_blocks(1)
height = self.nodes[0].getblockcount() + 1
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
# Create 2 blocks. Send the blocks, then send the headers.
blocks = []
for _ in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
inv_node.send_message(msg_block(blocks[-1]))
inv_node.sync_with_ping() # Make sure blocks are processed
test_node.last_message.pop("getdata", None)
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
# should not have received any getdata messages
with p2p_lock:
assert "getdata" not in test_node.last_message
# This time, direct fetch should work
blocks = []
for _ in range(3):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=DIRECT_FETCH_RESPONSE_TIME)
[test_node.send_message(msg_block(x)) for x in blocks]
test_node.sync_with_ping()
# Now announce a header that forks the last two blocks
tip = blocks[0].sha256
height -= 2
blocks = []
# Create extra blocks for later
for _ in range(20):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Announcing one block on fork should not trigger direct fetch
# (less work than tip)
test_node.last_message.pop("getdata", None)
test_node.send_header_for_blocks(blocks[0:1])
test_node.sync_with_ping()
with p2p_lock:
assert "getdata" not in test_node.last_message
# Announcing one more block on fork should trigger direct fetch for
# both blocks (same work as tip)
test_node.send_header_for_blocks(blocks[1:2])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=DIRECT_FETCH_RESPONSE_TIME)
# Announcing 16 more headers should trigger direct fetch for 14 more
# blocks
test_node.send_header_for_blocks(blocks[2:18])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=DIRECT_FETCH_RESPONSE_TIME)
# Announcing 1 more header should not trigger any response
test_node.last_message.pop("getdata", None)
test_node.send_header_for_blocks(blocks[18:19])
test_node.sync_with_ping()
with p2p_lock:
assert "getdata" not in test_node.last_message
self.log.info("Part 4: success!")
# Now deliver all those blocks we announced.
[test_node.send_message(msg_block(x)) for x in blocks]
self.log.info("Part 5: Testing handling of unconnecting headers")
# First we test that receipt of an unconnecting header doesn't prevent
# chain sync.
for i in range(10):
self.log.debug("Part 5.{}: starting...".format(i))
test_node.last_message.pop("getdata", None)
blocks = []
# Create two more blocks.
for _ in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Send the header of the second block -> this won't connect.
with p2p_lock:
test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[1]])
test_node.wait_for_getheaders()
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks])
[test_node.send_message(msg_block(x)) for x in blocks]
test_node.sync_with_ping()
assert_equal(int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256)
blocks = []
# Now we test that if we repeatedly don't send connecting headers, we
# don't go into an infinite loop trying to get them to connect.
MAX_UNCONNECTING_HEADERS = 10
for _ in range(MAX_UNCONNECTING_HEADERS + 1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
for i in range(1, MAX_UNCONNECTING_HEADERS):
# Send a header that doesn't connect, check that we get a getheaders.
with p2p_lock:
test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[i]])
test_node.wait_for_getheaders()
# Next header will connect, should re-set our count:
test_node.send_header_for_blocks([blocks[0]])
# Remove the first two entries (blocks[1] would connect):
blocks = blocks[2:]
# Now try to see how many unconnecting headers we can send
# before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS
for i in range(5 * MAX_UNCONNECTING_HEADERS - 1):
# Send a header that doesn't connect, check that we get a getheaders.
with p2p_lock:
test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[i % len(blocks)]])
test_node.wait_for_getheaders()
# Eventually this stops working.
test_node.send_header_for_blocks([blocks[-1]])
# Should get disconnected
test_node.wait_for_disconnect()
self.log.info("Part 5: success!")
# Finally, check that the inv node never received a getdata request,
# throughout the test
assert "getdata" not in inv_node.last_message
if __name__ == '__main__':
SendHeadersTest().main()
| 43.766387
| 154
| 0.638839
|
3d596fff993073c0d618cf23841956d94b6ec3f3
| 697
|
py
|
Python
|
app/src/config_example.py
|
hubacekjirka/dailyPhotoTwitterBot
|
abd490b73603883d4e71bfa6076e9925a055fcb7
|
[
"MIT"
] | 1
|
2020-03-16T10:51:07.000Z
|
2020-03-16T10:51:07.000Z
|
app/src/config_example.py
|
hubacekjirka/dailyPhotoTwitterBot
|
abd490b73603883d4e71bfa6076e9925a055fcb7
|
[
"MIT"
] | 6
|
2019-08-11T10:00:36.000Z
|
2021-06-02T00:18:58.000Z
|
app/src/config_example.py
|
hubacekjirka/dailyPhotoTwitterBot
|
abd490b73603883d4e71bfa6076e9925a055fcb7
|
[
"MIT"
] | 2
|
2019-09-30T18:45:47.000Z
|
2021-01-09T10:38:14.000Z
|
import os
# Twitter access config file
# To use this file, insert your own keys and rename it to config.py
consumer_key = 'CONSUMERKEY'
consumer_secret = 'CONSUMERSECRET'
access_token = 'ACCESS-TOKEN'
access_token_secret = 'ACCESSTOKENSECRET'
# Folder for storing Telegram's ChatIds
chat_id_folder = os.path.join(os.path.dirname(os.path.realpath(__file__)), "photos")
telegram_token = 'TELEGRAM TOKEN'
# values 'S3', anything else defaults to using the local folder only
photo_source = 'S3'
aws_access_key = 'AWSACCESSKEY'
aws_key_id = 'AWSKEYID'
aws_bucket = 'AWSBUCKETNAME'
# debugging variables
tweeting_enabled = True
telegraming_enabled = True
debug = True
# Sentry
sentry_api_key = ""
| 24.892857
| 84
| 0.776184
|
f13ab948f1df6382fcac9a2961daa57cd787601e
| 913
|
py
|
Python
|
vc_zoom/indico_vc_zoom/blueprint.py
|
tomasr8/indico-plugins
|
b85e4ad826fa362aa32eb236e73c9ab2f7c7f465
|
[
"MIT"
] | null | null | null |
vc_zoom/indico_vc_zoom/blueprint.py
|
tomasr8/indico-plugins
|
b85e4ad826fa362aa32eb236e73c9ab2f7c7f465
|
[
"MIT"
] | null | null | null |
vc_zoom/indico_vc_zoom/blueprint.py
|
tomasr8/indico-plugins
|
b85e4ad826fa362aa32eb236e73c9ab2f7c7f465
|
[
"MIT"
] | null | null | null |
# This file is part of the Indico plugins.
# Copyright (C) 2020 - 2022 CERN and ENEA
#
# The Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License;
# see the LICENSE file for more details.
from indico.core.plugins import IndicoPluginBlueprint
from indico_vc_zoom.controllers import RHRoomAlternativeHost, RHWebhook
blueprint = IndicoPluginBlueprint('vc_zoom', 'indico_vc_zoom')
# Room management
# using any(zoom) instead of defaults since the event vc room locator
# includes the service and normalization skips values provided in 'defaults'
blueprint.add_url_rule(
'/event/<int:event_id>/manage/videoconference/<any(zoom):service>/<int:event_vc_room_id>/make-me-alt-host',
'make_me_alt_host',
RHRoomAlternativeHost,
methods=('POST',)
)
blueprint.add_url_rule('/api/plugin/zoom/webhook', 'webhook', RHWebhook, methods=('POST',))
| 36.52
| 111
| 0.771084
|
b3221b04eb82789c9b7dd65b14a0fd9ee5802918
| 1,529
|
py
|
Python
|
2019/day11/2.py
|
tomhel/AoC_2019
|
c76c34235821864bc763f85d43cbcbfb9ed43469
|
[
"MIT"
] | 1
|
2021-12-07T13:18:52.000Z
|
2021-12-07T13:18:52.000Z
|
2019/day11/2.py
|
tomhel/AoC
|
c76c34235821864bc763f85d43cbcbfb9ed43469
|
[
"MIT"
] | null | null | null |
2019/day11/2.py
|
tomhel/AoC
|
c76c34235821864bc763f85d43cbcbfb9ed43469
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import intcode
import queue
import threading
def move(current_dir, turn):
if current_dir == "U":
return ("L", -1, 0) if turn == 0 else ("R", 1, 0)
elif current_dir == "D":
return ("R", 1, 0) if turn == 0 else ("L", -1, 0)
elif current_dir == "L":
return ("D", 0, 1) if turn == 0 else ("U", 0, -1)
elif current_dir == "R":
return ("U", 0, -1) if turn == 0 else ("D", 0, 1)
def run_robot():
grid = {}
direction, x, y = "U", 0, 0
in_pipe = queue.Queue()
out_pipe = queue.Queue()
io_handler = intcode.PipeIOHandler(in_pipe, out_pipe)
computer = intcode.Computer(intcode.load_program("input"), io_handler)
t = threading.Thread(target=computer.execute)
t.start()
i = 0
while t.is_alive():
color = grid.get((x, y), 1 if i == 0 else 0)
in_pipe.put(color)
new_color = out_pipe.get()
if new_color is None:
break
turn = out_pipe.get()
if turn is None:
break
grid[(x, y)] = new_color
direction, dx, dy = move(direction, turn)
x, y = x + dx, y + dy
i += 1
return grid
def render_grid(grid):
minx, miny = min(x for x, _ in grid), min(y for _, y in grid)
maxx, maxy = max(x for x, _ in grid), max(y for _, y in grid)
for y in range(miny, maxy + 1):
for x in range(minx, maxx + 1):
print(" " if grid.get((x, y), 0) == 0 else "@", end="")
print()
render_grid(run_robot())
| 25.065574
| 74
| 0.535644
|
aa166553e42771ed913e35aa8b9a7f04a57008df
| 1,310
|
py
|
Python
|
src/09-built-on-asyncio/the_unsync/nosync.py
|
lleites/async-techniques-python-course
|
fca6915bc78fec828a87bed56fd41565ed89ad9d
|
[
"MIT"
] | null | null | null |
src/09-built-on-asyncio/the_unsync/nosync.py
|
lleites/async-techniques-python-course
|
fca6915bc78fec828a87bed56fd41565ed89ad9d
|
[
"MIT"
] | null | null | null |
src/09-built-on-asyncio/the_unsync/nosync.py
|
lleites/async-techniques-python-course
|
fca6915bc78fec828a87bed56fd41565ed89ad9d
|
[
"MIT"
] | null | null | null |
import datetime
import math
import time
import requests
def main():
t0 = datetime.datetime.now()
compute_some()
compute_some()
compute_some()
download_some()
download_some()
download_some_more()
download_some_more()
wait_some()
wait_some()
wait_some()
wait_some()
dt = datetime.datetime.now() - t0
print(
"Synchronous version done in {:,.2f} seconds.".format(
dt.total_seconds()
)
)
def compute_some():
print("Computing...")
for _ in range(1, 10_000_000):
math.sqrt(25 ** 25 + 0.01)
def download_some():
print("Downloading...")
url = "https://talkpython.fm/episodes/show/174/coming-into-python-from-another-industry-part-2"
resp = requests.get(url)
resp.raise_for_status()
text = resp.text
print("Downloaded (more) {:,} characters.".format(len(text)))
def download_some_more():
print("Downloading more ...")
url = "https://pythonbytes.fm/episodes/show/92/will-your-python-be-compiled"
resp = requests.get(url)
resp.raise_for_status()
text = resp.text
print("Downloaded {:,} characters.".format(len(text)))
def wait_some():
print("Waiting...")
for _ in range(1, 1000):
time.sleep(0.001)
if __name__ == "__main__":
main()
| 19.848485
| 99
| 0.625954
|
47930a8e781198727fa5062182ed980580b448ee
| 778
|
py
|
Python
|
tools/protoxform/utils.py
|
dcillera/envoy
|
cb54ba8eec26f768f8c1ae412113b07bacde7321
|
[
"Apache-2.0"
] | 17,703
|
2017-09-14T18:23:43.000Z
|
2022-03-31T22:04:17.000Z
|
tools/protoxform/utils.py
|
dcillera/envoy
|
cb54ba8eec26f768f8c1ae412113b07bacde7321
|
[
"Apache-2.0"
] | 15,957
|
2017-09-14T16:38:22.000Z
|
2022-03-31T23:56:30.000Z
|
tools/protoxform/utils.py
|
dcillera/envoy
|
cb54ba8eec26f768f8c1ae412113b07bacde7321
|
[
"Apache-2.0"
] | 3,780
|
2017-09-14T18:58:47.000Z
|
2022-03-31T17:10:47.000Z
|
import importlib
from tools.type_whisperer.api_type_db_pb2 import TypeDb
from google.protobuf import text_format
PROTO_FILES = (
"google.api.annotations", "validate.validate", "envoy.annotations.deprecation",
"envoy.annotations.resource", "udpa.annotations.migrate", "udpa.annotations.security",
"udpa.annotations.status", "udpa.annotations.sensitive", "udpa.annotations.versioning",
"xds.annotations.v3.status")
_typedb = None
def get_type_db():
assert _typedb != None
return _typedb
def load_type_db(type_db_path):
global _typedb
_typedb = TypeDb()
with open(type_db_path, 'r') as f:
text_format.Merge(f.read(), _typedb)
def load_protos():
for package in PROTO_FILES:
importlib.import_module(f"{package}_pb2")
| 25.096774
| 91
| 0.727506
|
4e681e48101a4e1a42e998a2defa80b34bf2de44
| 759
|
py
|
Python
|
MascotasPasto/MascotasPasto/urls.py
|
Esteban-Rosas/Frameworks-7a-2020B
|
768d354ee8d966c93465ff9bb2d7da27ad3e1baa
|
[
"MIT"
] | null | null | null |
MascotasPasto/MascotasPasto/urls.py
|
Esteban-Rosas/Frameworks-7a-2020B
|
768d354ee8d966c93465ff9bb2d7da27ad3e1baa
|
[
"MIT"
] | null | null | null |
MascotasPasto/MascotasPasto/urls.py
|
Esteban-Rosas/Frameworks-7a-2020B
|
768d354ee8d966c93465ff9bb2d7da27ad3e1baa
|
[
"MIT"
] | null | null | null |
"""MascotasPasto URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| 34.5
| 77
| 0.70751
|
1b2f761bf1f78346a3f382038ac5c36c0a5e13d7
| 3,321
|
py
|
Python
|
support_files/scraping/entries/proj_2075/proj_2075/settings.py
|
miccaldas/new_rss
|
9580887ac44b5c3e4c4ed5045478f2c7fef36afe
|
[
"MIT"
] | null | null | null |
support_files/scraping/entries/proj_2075/proj_2075/settings.py
|
miccaldas/new_rss
|
9580887ac44b5c3e4c4ed5045478f2c7fef36afe
|
[
"MIT"
] | null | null | null |
support_files/scraping/entries/proj_2075/proj_2075/settings.py
|
miccaldas/new_rss
|
9580887ac44b5c3e4c4ed5045478f2c7fef36afe
|
[
"MIT"
] | null | null | null |
# Scrapy settings for proj_2075 project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'proj_2075'
SPIDER_MODULES = ['proj_2075.spiders']
NEWSPIDER_MODULE = 'proj_2075.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'proj_2075 (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'proj_2075.middlewares.Proj2075SpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'proj_2075.middlewares.Proj2075DownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'proj_2075.pipelines.Proj2075Pipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
ITEM_PIPELINES = {'scrapy.pipelines.images.ImagesPipeline': 1}
IMAGES_STORE = 'proj_2075/imgs'
FEED_EXPORT_FIELDS = ["title", "links", "content", "images", "image_urls"]
IMAGES_URLS_FIELD = "image_urls"
IMAGES_RESULT_FIELD = "images"
| 35.329787
| 103
| 0.777176
|
0d7a6f1c1511e4f95c8ee89c023327ab0ee0f427
| 394
|
py
|
Python
|
061 - P.A. 2.0.py
|
Rprjunior/PraticandoPython
|
cad85e2f05703986b2ee47d8a475ac67c8909b88
|
[
"MIT"
] | null | null | null |
061 - P.A. 2.0.py
|
Rprjunior/PraticandoPython
|
cad85e2f05703986b2ee47d8a475ac67c8909b88
|
[
"MIT"
] | null | null | null |
061 - P.A. 2.0.py
|
Rprjunior/PraticandoPython
|
cad85e2f05703986b2ee47d8a475ac67c8909b88
|
[
"MIT"
] | null | null | null |
'''061 - P.A. 2.0
Refaça o DESAFIO 51, lendo o primeiro termo e a razão de uma PA.
Mostre os 10 primeiros termos da progressão usando a estrutura while.'''
valor = int(input('Digite um valor: '))
razao = int(input('Qual será sua razão: '))
contador = 1
while contador <= 10:
print('{} > '.format(valor), end=' ')
valor = valor + razao
contador = contador + 1
print('FIM', end=' ')
| 30.307692
| 72
| 0.647208
|
f229406934f90c9a2679b34cd2132c8b3ca563a4
| 5,359
|
py
|
Python
|
sec_utils/logger.py
|
squigglepuff/firebytes
|
6a6d5476ca73a489254c40c9bc87a0e68e5f36a4
|
[
"Apache-2.0"
] | null | null | null |
sec_utils/logger.py
|
squigglepuff/firebytes
|
6a6d5476ca73a489254c40c9bc87a0e68e5f36a4
|
[
"Apache-2.0"
] | null | null | null |
sec_utils/logger.py
|
squigglepuff/firebytes
|
6a6d5476ca73a489254c40c9bc87a0e68e5f36a4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# *-* coding: utf-8 *-*
from io import IOBase
import sys
import re
import os
import os.path
from datetime import datetime as dt
global fireIO
# For legacy support.
global PrintLog
global DumpLog
global TruncateLog
def PrintLog(line, level, debug_info="", plugin_id=0):
global fireIO
fireIO.write(line, level=level, debug_info=debug_info, plugin_id=plugin_id)
fireIO.flush()
def DumpLog():
global fireIO
fireIO.DumpLog()
fireIO.flush()
def TruncateLog():
global fireIO
fireIO.TruncateLog()
fireIO.flush()
class FirebytesIO(IOBase):
m_logFile = "./firebytes.log"
m_allowDebug = False
m_baseString = "\033[33m<\033[34m{0}\033[33m> [\033[32m{1}\033[33m]"
m_debugString = "\033[33m<\033[34m{0}\033[33m> [\033[32m{1}\033[33m] \033[35mDEBUG INFO:\033[0m {2}\n"
def __init__(self):
self.m_logFile = "./firebytes.log"
self.m_allowDebug = False
self.m_baseString = "\033[33m<\033[34m{0}\033[33m> [\033[32m{1}\033[33m]"
self.m_debugString = "\033[33m<\033[34m{0}\033[33m> [\033[32m{1}\033[33m] \033[35mDEBUG INFO:\033[0m {2}\n"
self.m_dataBuffer = ""
def set_debug(self, enabled=True):
self.m_allowDebug = enabled
def write(self, line, level="Info", debug_info="", plugin_id=0):
# Grab a time stamp.
dateStamp = dt.now().strftime("%x")
timeStamp = dt.now().strftime("%X")
self.m_baseString = self.m_baseString.format(dateStamp, timeStamp)
self.m_debugString = self.m_debugString.format(dateStamp, timeStamp, debug_info)
if line is not None and level is not None:
if re.match("^Fatal(\sError)?$", level, re.IGNORECASE):
sys.stderr.write("{0} \033[1;31m{1}:\033[0m {2}\n".format(self.m_baseString, level, line))
elif re.match("^(Critical|Error)$", level, re.IGNORECASE):
sys.stderr.write("{0} \033[31m{1}:\033[0m {2}\n".format(self.m_baseString, level, line))
elif re.match("^Warn(ing)?$", level, re.IGNORECASE):
sys.stderr.write("{0} \033[1;33m{1}:\033[0m {2}\n".format(self.m_baseString, level, line))
elif re.match("^Info(rmation)?$", level, re.IGNORECASE):
sys.stdout.write("{0} \033[34m{1}:\033[0m {2}\n".format(self.m_baseString, level, line))
elif self.m_allowDebug == True and re.match("^(Debug|Trace)$", level, re.IGNORECASE):
sys.stderr.write("{0} \033[35m{1}:\033[0m {2}\n".format(self.m_baseString, level, line))
elif re.match('^Failure$', level, re.IGNORECASE):
sys.stderr.write("{0} \033[1;31m{1}:\033[0m {2}\n".format(self.m_baseString, level.upper(), line))
elif re.match('^Success$', level, re.IGNORECASE):
sys.stderr.write("{0} \033[1;32m{1}:\033[0m {2}\n".format(self.m_baseString, level.upper(), line))
elif re.match('^Plugin$', level, re.IGNORECASE):
if os.environ['TERM'] == "xterm-256color":
sys.stdout.write("{0} \033[38;5;202m[{1} {2}]:\033[0m {3}\n".format(self.m_baseString, level, plugin_id, line))
else:
sys.stdout.write("{0} \033[1;33m[{1} {2}]:\033[0m {3}\n".format(self.m_baseString, level, plugin_id, line))
if self.m_allowDebug == True and debug_info != "":
sys.stderr.write(self.m_debugString)
if sys.version_info[0] >= 3:
sys.stdout.flush()
sys.stderr.flush()
self.flush()
if self.m_logFile is not None and self.m_logFile != "":
if self.m_allowDebug != True and (level == "Debug" or level == "Trace"):
return # Skip
else:
try:
hLogFile = open(self.m_logFile, "a+")
hLogFile.write("<{0}> [{1}] {2}: {3}\n".format(dateStamp, timeStamp, level, line))
if self.m_allowDebug == True and debug_info != "":
hLogFile.write("<{0}> [{1}] DEBUG INFO: {2}\n".format(dateStamp, timeStamp, debug_info))
hLogFile.close()
except (IOError, OSError):
sys.stderr.write("{0} \033[1;31m{1}:\033[0m {2}\n".format(self.m_baseString, 'CRITICAL', "Unable to write to {0}".format(self.m_logFile)))
def DumpLog(self):
logRtn = ""
if self.m_logFile is not None and self.m_logFile != "":
try:
hLogFile = open(self.m_logFile, "r")
for line in hLogFile:
logRtn += line
self.flush()
hLogFile.close()
except (IOError, OSError):
sys.stderr.write("{0} \033[1;31m{1}:\033[0m {2}\n".format(self.m_baseString, 'CRITICAL', "Unable to write to {0}".format(self.m_logFile)))
return logRtn
def TruncateLog(self):
if self.m_logFile is not None and self.m_logFile != "":
try:
hLogFile = open(self.m_logFile, "w+")
hLogFile.close()
except (IOError, OSError):
sys.stderr.write("{0} \033[1;31m{1}:\033[0m {2}\n".format(self.m_baseString, 'CRITICAL', "Unable to write to {0}".format(self.m_logFile)))
# Setup the global instance.
fireIO = FirebytesIO()
# fireIO.set_debug(True)
| 38.833333
| 158
| 0.574361
|
24f49c660c0309aee6e99d7963046de07980453d
| 23,534
|
py
|
Python
|
python/ccxt/indodax.py
|
DavidFelsen/ccxt
|
6497b7da13d2ea3a9e56207b46b8691938b07839
|
[
"MIT"
] | 2
|
2021-04-15T22:12:33.000Z
|
2021-09-04T05:29:32.000Z
|
python/ccxt/indodax.py
|
DavidFelsen/ccxt
|
6497b7da13d2ea3a9e56207b46b8691938b07839
|
[
"MIT"
] | 1
|
2021-08-23T16:27:34.000Z
|
2021-08-23T16:27:34.000Z
|
python/ccxt/indodax.py
|
DavidFelsen/ccxt
|
6497b7da13d2ea3a9e56207b46b8691938b07839
|
[
"MIT"
] | 2
|
2020-09-08T01:41:24.000Z
|
2021-04-30T00:07:59.000Z
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.precise import Precise
class indodax(Exchange):
def describe(self):
return self.deep_extend(super(indodax, self).describe(), {
'id': 'indodax',
'name': 'INDODAX',
'countries': ['ID'], # Indonesia
'has': {
'cancelOrder': True,
'CORS': False,
'createMarketOrder': False,
'createOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': False,
'fetchMarkets': True,
'fetchMyTrades': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': False,
'fetchTicker': True,
'fetchTickers': False,
'fetchTime': True,
'fetchTrades': True,
'withdraw': True,
},
'version': '2.0', # as of 9 April 2018
'urls': {
'logo': 'https://user-images.githubusercontent.com/51840849/87070508-9358c880-c221-11ea-8dc5-5391afbbb422.jpg',
'api': {
'public': 'https://indodax.com/api',
'private': 'https://indodax.com/tapi',
},
'www': 'https://www.indodax.com',
'doc': 'https://github.com/btcid/indodax-official-api-docs',
'referral': 'https://indodax.com/ref/testbitcoincoid/1',
},
'api': {
'public': {
'get': [
'server_time',
'pairs',
'{pair}/ticker',
'{pair}/trades',
'{pair}/depth',
],
},
'private': {
'post': [
'getInfo',
'transHistory',
'trade',
'tradeHistory',
'getOrder',
'openOrders',
'cancelOrder',
'orderHistory',
'withdrawCoin',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0,
'taker': 0.003,
},
},
'exceptions': {
'exact': {
'invalid_pair': BadSymbol, # {"error":"invalid_pair","error_description":"Invalid Pair"}
'Insufficient balance.': InsufficientFunds,
'invalid order.': OrderNotFound,
'Invalid credentials. API not found or session has expired.': AuthenticationError,
'Invalid credentials. Bad sign.': AuthenticationError,
},
'broad': {
'Minimum price': InvalidOrder,
'Minimum order': InvalidOrder,
},
},
# exchange-specific options
'options': {
'recvWindow': 5 * 1000, # default 5 sec
'timeDifference': 0, # the difference between system clock and exchange clock
'adjustForTimeDifference': False, # controls the adjustment logic upon instantiation
},
'commonCurrencies': {
'STR': 'XLM',
'BCHABC': 'BCH',
'BCHSV': 'BSV',
'DRK': 'DASH',
'NEM': 'XEM',
},
})
def nonce(self):
return self.milliseconds() - self.options['timeDifference']
def fetch_time(self, params={}):
response = self.publicGetServerTime(params)
#
# {
# "timezone": "UTC",
# "server_time": 1571205969552
# }
#
return self.safe_integer(response, 'server_time')
def load_time_difference(self, params={}):
serverTime = self.fetch_time(params)
after = self.milliseconds()
self.options['timeDifference'] = after - serverTime
return self.options['timeDifference']
def fetch_markets(self, params={}):
response = self.publicGetPairs(params)
#
# [
# {
# "id": "btcidr",
# "symbol": "BTCIDR",
# "base_currency": "idr",
# "traded_currency": "btc",
# "traded_currency_unit": "BTC",
# "description": "BTC/IDR",
# "ticker_id": "btc_idr",
# "volume_precision": 0,
# "price_precision": 1000,
# "price_round": 8,
# "pricescale": 1000,
# "trade_min_base_currency": 10000,
# "trade_min_traded_currency": 0.00007457,
# "has_memo": False,
# "memo_name": False,
# "has_payment_id": False,
# "trade_fee_percent": 0.3,
# "url_logo": "https://indodax.com/v2/logo/svg/color/btc.svg",
# "url_logo_png": "https://indodax.com/v2/logo/png/color/btc.png",
# "is_maintenance": 0
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'ticker_id')
baseId = self.safe_string(market, 'traded_currency')
quoteId = self.safe_string(market, 'base_currency')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
taker = self.safe_number(market, 'trade_fee_percent')
isMaintenance = self.safe_integer(market, 'is_maintenance')
active = False if (isMaintenance) else True
pricePrecision = self.safe_integer(market, 'price_round')
precision = {
'amount': 8,
'price': pricePrecision,
}
limits = {
'amount': {
'min': self.safe_number(market, 'trade_min_traded_currency'),
'max': None,
},
'price': {
'min': self.safe_number(market, 'trade_min_base_currency'),
'max': None,
},
'cost': {
'min': None,
'max': None,
},
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'taker': taker,
'percentage': True,
'precision': precision,
'limits': limits,
'info': market,
'active': active,
})
return result
def fetch_balance(self, params={}):
self.load_markets()
response = self.privatePostGetInfo(params)
#
# {
# "success":1,
# "return":{
# "server_time":1619562628,
# "balance":{
# "idr":167,
# "btc":"0.00000000",
# "1inch":"0.00000000",
# },
# "balance_hold":{
# "idr":0,
# "btc":"0.00000000",
# "1inch":"0.00000000",
# },
# "address":{
# "btc":"1KMntgzvU7iTSgMBWc11nVuJjAyfW3qJyk",
# "1inch":"0x1106c8bb3172625e1f411c221be49161dac19355",
# "xrp":"rwWr7KUZ3ZFwzgaDGjKBysADByzxvohQ3C",
# "zrx":"0x1106c8bb3172625e1f411c221be49161dac19355"
# },
# "user_id":"276011",
# "name":"",
# "email":"testbitcoincoid@mailforspam.com",
# "profile_picture":null,
# "verification_status":"unverified",
# "gauth_enable":true
# }
# }
#
balances = self.safe_value(response, 'return', {})
free = self.safe_value(balances, 'balance', {})
used = self.safe_value(balances, 'balance_hold', {})
timestamp = self.safe_timestamp(balances, 'server_time')
result = {
'info': response,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
}
currencyIds = list(free.keys())
for i in range(0, len(currencyIds)):
currencyId = currencyIds[i]
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(free, currencyId)
account['used'] = self.safe_string(used, currencyId)
result[code] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
request = {
'pair': self.market_id(symbol),
}
orderbook = self.publicGetPairDepth(self.extend(request, params))
return self.parse_order_book(orderbook, symbol, None, 'buy', 'sell')
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
response = self.publicGetPairTicker(self.extend(request, params))
#
# {
# "ticker": {
# "high":"0.01951",
# "low":"0.01877",
# "vol_eth":"39.38839319",
# "vol_btc":"0.75320886",
# "last":"0.01896",
# "buy":"0.01896",
# "sell":"0.019",
# "server_time":1565248908
# }
# }
#
ticker = response['ticker']
timestamp = self.safe_timestamp(ticker, 'server_time')
baseVolume = 'vol_' + market['baseId'].lower()
quoteVolume = 'vol_' + market['quoteId'].lower()
last = self.safe_number(ticker, 'last')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'high'),
'low': self.safe_number(ticker, 'low'),
'bid': self.safe_number(ticker, 'buy'),
'bidVolume': None,
'ask': self.safe_number(ticker, 'sell'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_number(ticker, baseVolume),
'quoteVolume': self.safe_number(ticker, quoteVolume),
'info': ticker,
}
def parse_trade(self, trade, market=None):
timestamp = self.safe_timestamp(trade, 'date')
id = self.safe_string(trade, 'tid')
symbol = None
if market is not None:
symbol = market['symbol']
type = None
side = self.safe_string(trade, 'type')
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
cost = self.parse_number(Precise.string_mul(priceString, amountString))
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'order': None,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
response = self.publicGetPairTrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'open': 'open',
'filled': 'closed',
'cancelled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# {
# "order_id": "12345",
# "submit_time": "1392228122",
# "price": "8000000",
# "type": "sell",
# "order_ltc": "100000000",
# "remain_ltc": "100000000"
# }
#
# market closed orders - note that the price is very high
# and does not reflect actual price the order executed at
#
# {
# "order_id": "49326856",
# "type": "sell",
# "price": "1000000000",
# "submit_time": "1618314671",
# "finish_time": "1618314671",
# "status": "filled",
# "order_xrp": "30.45000000",
# "remain_xrp": "0.00000000"
# }
side = None
if 'type' in order:
side = order['type']
status = self.parse_order_status(self.safe_string(order, 'status', 'open'))
symbol = None
cost = None
price = self.safe_number(order, 'price')
amount = None
remaining = None
if market is not None:
symbol = market['symbol']
quoteId = market['quoteId']
baseId = market['baseId']
if (market['quoteId'] == 'idr') and ('order_rp' in order):
quoteId = 'rp'
if (market['baseId'] == 'idr') and ('remain_rp' in order):
baseId = 'rp'
cost = self.safe_number(order, 'order_' + quoteId)
if not cost:
amount = self.safe_number(order, 'order_' + baseId)
remaining = self.safe_number(order, 'remain_' + baseId)
timestamp = self.safe_integer(order, 'submit_time')
fee = None
id = self.safe_string(order, 'order_id')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': 'limit',
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'cost': cost,
'average': None,
'amount': amount,
'filled': None,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': None,
})
def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol')
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
'order_id': id,
}
response = self.privatePostGetOrder(self.extend(request, params))
orders = response['return']
order = self.parse_order(self.extend({'id': id}, orders['order']), market)
return self.extend({'info': response}, order)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['pair'] = market['id']
response = self.privatePostOpenOrders(self.extend(request, params))
rawOrders = response['return']['orders']
# {success: 1, return: {orders: null}} if no orders
if not rawOrders:
return []
# {success: 1, return: {orders: [... objects]}} for orders fetched by symbol
if symbol is not None:
return self.parse_orders(rawOrders, market, since, limit)
# {success: 1, return: {orders: {marketid: [... objects]}}} if all orders are fetched
marketIds = list(rawOrders.keys())
exchangeOrders = []
for i in range(0, len(marketIds)):
marketId = marketIds[i]
marketOrders = rawOrders[marketId]
market = self.markets_by_id[marketId]
parsedOrders = self.parse_orders(marketOrders, market, since, limit)
exchangeOrders = self.array_concat(exchangeOrders, parsedOrders)
return exchangeOrders
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['pair'] = market['id']
response = self.privatePostOrderHistory(self.extend(request, params))
orders = self.parse_orders(response['return']['orders'], market)
orders = self.filter_by(orders, 'status', 'closed')
return self.filter_by_symbol_since_limit(orders, symbol, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
if type != 'limit':
raise ExchangeError(self.id + ' allows limit orders only')
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
'type': side,
'price': price,
}
currency = market['baseId']
if side == 'buy':
request[market['quoteId']] = amount * price
else:
request[market['baseId']] = amount
request[currency] = amount
result = self.privatePostTrade(self.extend(request, params))
data = self.safe_value(result, 'return', {})
id = self.safe_string(data, 'order_id')
return {
'info': result,
'id': id,
}
def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
side = self.safe_value(params, 'side')
if side is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires an extra "side" param')
self.load_markets()
market = self.market(symbol)
request = {
'order_id': id,
'pair': market['id'],
'type': side,
}
return self.privatePostCancelOrder(self.extend(request, params))
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
currency = self.currency(code)
# Custom string you need to provide to identify each withdrawal.
# Will be passed to callback URL(assigned via website to the API key)
# so your system can identify the request and confirm it.
# Alphanumeric, max length 255.
requestId = self.milliseconds()
# Alternatively:
# requestId = self.uuid()
request = {
'currency': currency['id'],
'withdraw_amount': amount,
'withdraw_address': address,
'request_id': str(requestId),
}
if tag:
request['withdraw_memo'] = tag
response = self.privatePostWithdrawCoin(self.extend(request, params))
#
# {
# "success": 1,
# "status": "approved",
# "withdraw_currency": "xrp",
# "withdraw_address": "rwWr7KUZ3ZFwzgaDGjKBysADByzxvohQ3C",
# "withdraw_amount": "10000.00000000",
# "fee": "2.00000000",
# "amount_after_fee": "9998.00000000",
# "submit_time": "1509469200",
# "withdraw_id": "xrp-12345",
# "txid": "",
# "withdraw_memo": "123123"
# }
#
id = None
if ('txid' in response) and (len(response['txid']) > 0):
id = response['txid']
return {
'info': response,
'id': id,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api]
if api == 'public':
url += '/' + self.implode_params(path, params)
else:
self.check_required_credentials()
body = self.urlencode(self.extend({
'method': path,
'timestamp': self.nonce(),
'recvWindow': self.options['recvWindow'],
}, params))
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Key': self.apiKey,
'Sign': self.hmac(self.encode(body), self.encode(self.secret), hashlib.sha512),
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
# {success: 0, error: "invalid order."}
# or
# [{data, ...}, {...}, ...]
if isinstance(response, list):
return # public endpoints may return []-arrays
error = self.safe_value(response, 'error', '')
if not ('success' in response) and error == '':
return # no 'success' property on public responses
if self.safe_integer(response, 'success', 0) == 1:
# {success: 1, return: {orders: []}}
if not ('return' in response):
raise ExchangeError(self.id + ': malformed response: ' + self.json(response))
else:
return
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], error, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
raise ExchangeError(feedback) # unknown message
| 38.454248
| 127
| 0.490822
|
e7994c794e0614f2f99d77eb7379781be60fb960
| 3,470
|
py
|
Python
|
kubernetes/client/models/v1_namespace_spec.py
|
kevingessner/python
|
3f4d09d260cf0839fae8173852c69e0419188454
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/v1_namespace_spec.py
|
kevingessner/python
|
3f4d09d260cf0839fae8173852c69e0419188454
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/v1_namespace_spec.py
|
kevingessner/python
|
3f4d09d260cf0839fae8173852c69e0419188454
|
[
"Apache-2.0"
] | 1
|
2018-07-19T16:37:20.000Z
|
2018-07-19T16:37:20.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.9.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1NamespaceSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'finalizers': 'list[str]'
}
attribute_map = {
'finalizers': 'finalizers'
}
def __init__(self, finalizers=None):
"""
V1NamespaceSpec - a model defined in Swagger
"""
self._finalizers = None
self.discriminator = None
if finalizers is not None:
self.finalizers = finalizers
@property
def finalizers(self):
"""
Gets the finalizers of this V1NamespaceSpec.
Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/
:return: The finalizers of this V1NamespaceSpec.
:rtype: list[str]
"""
return self._finalizers
@finalizers.setter
def finalizers(self, finalizers):
"""
Sets the finalizers of this V1NamespaceSpec.
Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/
:param finalizers: The finalizers of this V1NamespaceSpec.
:type: list[str]
"""
self._finalizers = finalizers
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1NamespaceSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 27.322835
| 183
| 0.57147
|
f01989026433128d32fe0dee11ff7d0d575c1dce
| 63,084
|
py
|
Python
|
pandas/core/groupby/generic.py
|
AbdulMAbdi/pandas
|
a5eb94d266fc5f57acd29f585e2c944259cd0861
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/core/groupby/generic.py
|
AbdulMAbdi/pandas
|
a5eb94d266fc5f57acd29f585e2c944259cd0861
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/core/groupby/generic.py
|
AbdulMAbdi/pandas
|
a5eb94d266fc5f57acd29f585e2c944259cd0861
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Define the SeriesGroupBy and DataFrameGroupBy
classes that hold the groupby interfaces (and some implementations).
These are user facing as the result of the ``df.groupby(...)`` operations,
which here returns a DataFrameGroupBy object.
"""
from collections import abc, namedtuple
import copy
from functools import partial
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
FrozenSet,
Iterable,
List,
Mapping,
Optional,
Sequence,
Type,
TypeVar,
Union,
cast,
)
import warnings
import numpy as np
from pandas._libs import lib, reduction as libreduction
from pandas._typing import ArrayLike, FrameOrSeries, FrameOrSeriesUnion, Label
from pandas.util._decorators import Appender, Substitution, doc
from pandas.core.dtypes.cast import (
find_common_type,
maybe_cast_result_dtype,
maybe_downcast_numeric,
)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_platform_int,
is_bool,
is_integer_dtype,
is_interval_dtype,
is_numeric_dtype,
is_scalar,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.aggregation import (
agg_list_like,
aggregate,
maybe_mangle_lambdas,
reconstruct_func,
validate_func_kwargs,
)
import pandas.core.algorithms as algorithms
from pandas.core.arrays import ExtensionArray
from pandas.core.base import DataError, SpecificationError
import pandas.core.common as com
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.core.frame import DataFrame
from pandas.core.generic import ABCDataFrame, ABCSeries, NDFrame
from pandas.core.groupby import base
from pandas.core.groupby.groupby import (
GroupBy,
_agg_template,
_apply_docs,
_transform_template,
get_groupby,
group_selection_context,
)
from pandas.core.indexes.api import Index, MultiIndex, all_indexes_same
import pandas.core.indexes.base as ibase
from pandas.core.internals import BlockManager
from pandas.core.series import Series
from pandas.core.util.numba_ import maybe_use_numba
from pandas.plotting import boxplot_frame_groupby
if TYPE_CHECKING:
from pandas.core.internals import Block
NamedAgg = namedtuple("NamedAgg", ["column", "aggfunc"])
# TODO(typing) the return value on this callable should be any *scalar*.
AggScalar = Union[str, Callable[..., Any]]
# TODO: validate types on ScalarResult and move to _typing
# Blocked from using by https://github.com/python/mypy/issues/1484
# See note at _mangle_lambda_list
ScalarResult = TypeVar("ScalarResult")
def generate_property(name: str, klass: Type[FrameOrSeries]):
"""
Create a property for a GroupBy subclass to dispatch to DataFrame/Series.
Parameters
----------
name : str
klass : {DataFrame, Series}
Returns
-------
property
"""
def prop(self):
return self._make_wrapper(name)
parent_method = getattr(klass, name)
prop.__doc__ = parent_method.__doc__ or ""
prop.__name__ = name
return property(prop)
def pin_allowlisted_properties(klass: Type[FrameOrSeries], allowlist: FrozenSet[str]):
"""
Create GroupBy member defs for DataFrame/Series names in a allowlist.
Parameters
----------
klass : DataFrame or Series class
class where members are defined.
allowlist : frozenset[str]
Set of names of klass methods to be constructed
Returns
-------
class decorator
Notes
-----
Since we don't want to override methods explicitly defined in the
base class, any such name is skipped.
"""
def pinner(cls):
for name in allowlist:
if hasattr(cls, name):
# don't override anything that was explicitly defined
# in the base class
continue
prop = generate_property(name, klass)
setattr(cls, name, prop)
return cls
return pinner
@pin_allowlisted_properties(Series, base.series_apply_allowlist)
class SeriesGroupBy(GroupBy[Series]):
_apply_allowlist = base.series_apply_allowlist
def _iterate_slices(self) -> Iterable[Series]:
yield self._selected_obj
@property
def _selection_name(self):
"""
since we are a series, we by definition only have
a single name, but may be the result of a selection or
the name of our object
"""
if self._selection is None:
return self.obj.name
else:
return self._selection
_agg_examples_doc = dedent(
"""
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.groupby([1, 1, 2, 2]).min()
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg('min')
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg(['min', 'max'])
min max
1 1 2
2 3 4
The output column names can be controlled by passing
the desired column names and aggregations as keyword arguments.
>>> s.groupby([1, 1, 2, 2]).agg(
... minimum='min',
... maximum='max',
... )
minimum maximum
1 1 2
2 3 4"""
)
@Appender(
_apply_docs["template"].format(
input="series", examples=_apply_docs["series_examples"]
)
)
def apply(self, func, *args, **kwargs):
return super().apply(func, *args, **kwargs)
@doc(_agg_template, examples=_agg_examples_doc, klass="Series")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with group_selection_context(self):
data = self._selected_obj
result, index = self._aggregate_with_numba(
data.to_frame(), func, *args, engine_kwargs=engine_kwargs, **kwargs
)
return self.obj._constructor(result.ravel(), index=index, name=data.name)
relabeling = func is None
columns = None
if relabeling:
columns, func = validate_func_kwargs(kwargs)
kwargs = {}
if isinstance(func, str):
return getattr(self, func)(*args, **kwargs)
elif isinstance(func, abc.Iterable):
# Catch instances of lists / tuples
# but not the class list / tuple itself.
func = maybe_mangle_lambdas(func)
ret = self._aggregate_multiple_funcs(func)
if relabeling:
ret.columns = columns
else:
cyfunc = self._get_cython_func(func)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func, *args, **kwargs)
try:
return self._python_agg_general(func, *args, **kwargs)
except (ValueError, KeyError):
# TODO: KeyError is raised in _python_agg_general,
# see test_groupby.test_basic
result = self._aggregate_named(func, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
ret = create_series_with_explicit_dtype(
result, index=index, dtype_if_empty=object
)
if not self.as_index: # pragma: no cover
print("Warning, ignoring as_index=True")
if isinstance(ret, dict):
from pandas import concat
ret = concat(ret.values(), axis=1, keys=[key.label for key in ret.keys()])
return ret
agg = aggregate
def _aggregate_multiple_funcs(self, arg):
if isinstance(arg, dict):
# show the deprecation, but only if we
# have not shown a higher level one
# GH 15931
if isinstance(self._selected_obj, Series):
raise SpecificationError("nested renamer is not supported")
columns = list(arg.keys())
arg = arg.items()
elif any(isinstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not isinstance(x, (tuple, list)) else x for x in arg]
# indicated column order
columns = next(zip(*arg))
else:
# list of functions / function names
columns = []
for f in arg:
columns.append(com.get_callable_name(f) or f)
arg = zip(columns, arg)
results: Dict[base.OutputKey, FrameOrSeriesUnion] = {}
for idx, (name, func) in enumerate(arg):
obj = self
# reset the cache so that we
# only include the named selection
if name in self._selected_obj:
obj = copy.copy(obj)
obj._reset_cache()
obj._selection = name
results[base.OutputKey(label=name, position=idx)] = obj.aggregate(func)
if any(isinstance(x, DataFrame) for x in results.values()):
# let higher level handle
return results
output = self._wrap_aggregated_output(results, index=None)
return self.obj._constructor_expanddim(output, columns=columns)
# TODO: index should not be Optional - see GH 35490
def _wrap_series_output(
self,
output: Mapping[base.OutputKey, Union[Series, np.ndarray]],
index: Optional[Index],
) -> FrameOrSeriesUnion:
"""
Wraps the output of a SeriesGroupBy operation into the expected result.
Parameters
----------
output : Mapping[base.OutputKey, Union[Series, np.ndarray]]
Data to wrap.
index : pd.Index or None
Index to apply to the output.
Returns
-------
Series or DataFrame
Notes
-----
In the vast majority of cases output and columns will only contain one
element. The exception is operations that expand dimensions, like ohlc.
"""
indexed_output = {key.position: val for key, val in output.items()}
columns = Index(key.label for key in output)
result: FrameOrSeriesUnion
if len(output) > 1:
result = self.obj._constructor_expanddim(indexed_output, index=index)
result.columns = columns
elif not columns.empty:
result = self.obj._constructor(
indexed_output[0], index=index, name=columns[0]
)
else:
result = self.obj._constructor_expanddim()
return result
# TODO: Remove index argument, use self.grouper.result_index, see GH 35490
def _wrap_aggregated_output(
self,
output: Mapping[base.OutputKey, Union[Series, np.ndarray]],
index: Optional[Index],
) -> FrameOrSeriesUnion:
"""
Wraps the output of a SeriesGroupBy aggregation into the expected result.
Parameters
----------
output : Mapping[base.OutputKey, Union[Series, np.ndarray]]
Data to wrap.
Returns
-------
Series or DataFrame
Notes
-----
In the vast majority of cases output will only contain one element.
The exception is operations that expand dimensions, like ohlc.
"""
result = self._wrap_series_output(output=output, index=index)
return self._reindex_output(result)
def _wrap_transformed_output(
self, output: Mapping[base.OutputKey, Union[Series, np.ndarray]]
) -> Series:
"""
Wraps the output of a SeriesGroupBy aggregation into the expected result.
Parameters
----------
output : dict[base.OutputKey, Union[Series, np.ndarray]]
Dict with a sole key of 0 and a value of the result values.
Returns
-------
Series
Notes
-----
output should always contain one element. It is specified as a dict
for consistency with DataFrame methods and _wrap_aggregated_output.
"""
assert len(output) == 1
result = self._wrap_series_output(output=output, index=self.obj.index)
# No transformations increase the ndim of the result
assert isinstance(result, Series)
return result
def _wrap_applied_output(
self, keys: Index, values: Optional[List[Any]], not_indexed_same: bool = False
) -> FrameOrSeriesUnion:
"""
Wrap the output of SeriesGroupBy.apply into the expected result.
Parameters
----------
keys : Index
Keys of groups that Series was grouped by.
values : Optional[List[Any]]
Applied output for each group.
not_indexed_same : bool, default False
Whether the applied outputs are not indexed the same as the group axes.
Returns
-------
DataFrame or Series
"""
if len(keys) == 0:
# GH #6265
return self.obj._constructor(
[], name=self._selection_name, index=keys, dtype=np.float64
)
assert values is not None
def _get_index() -> Index:
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(keys, names=self.grouper.names)
else:
index = Index(keys, name=self.grouper.names[0])
return index
if isinstance(values[0], dict):
# GH #823 #24880
index = _get_index()
result: FrameOrSeriesUnion = self._reindex_output(
self.obj._constructor_expanddim(values, index=index)
)
# if self.observed is False,
# keep all-NaN rows created while re-indexing
result = result.stack(dropna=self.observed)
result.name = self._selection_name
return result
elif isinstance(values[0], (Series, DataFrame)):
return self._concat_objects(keys, values, not_indexed_same=not_indexed_same)
else:
# GH #6265 #24880
result = self.obj._constructor(
data=values, index=_get_index(), name=self._selection_name
)
return self._reindex_output(result)
def _aggregate_named(self, func, *args, **kwargs):
result = {}
initialized = False
for name, group in self:
# Each step of this loop corresponds to
# libreduction._BaseGrouper._apply_to_group
group.name = name # NB: libreduction does not pin name
output = func(group, *args, **kwargs)
output = libreduction.extract_result(output)
if not initialized:
# We only do this validation on the first iteration
libreduction.check_result_array(output, 0)
initialized = True
result[name] = output
return result
@Substitution(klass="Series")
@Appender(_transform_template)
def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with group_selection_context(self):
data = self._selected_obj
result = self._transform_with_numba(
data.to_frame(), func, *args, engine_kwargs=engine_kwargs, **kwargs
)
return self.obj._constructor(
result.ravel(), index=data.index, name=data.name
)
func = self._get_cython_func(func) or func
if not isinstance(func, str):
return self._transform_general(func, *args, **kwargs)
elif func not in base.transform_kernel_allowlist:
msg = f"'{func}' is not a valid function name for transform(name)"
raise ValueError(msg)
elif func in base.cythonized_kernels or func in base.transformation_kernels:
# cythonized transform or canned "agg+broadcast"
return getattr(self, func)(*args, **kwargs)
# If func is a reduction, we need to broadcast the
# result to the whole group. Compute func result
# and deal with possible broadcasting below.
# Temporarily set observed for dealing with categoricals.
with com.temp_setattr(self, "observed", True):
result = getattr(self, func)(*args, **kwargs)
return self._transform_fast(result)
def _transform_general(self, func, *args, **kwargs):
"""
Transform with a non-str `func`.
"""
klass = type(self._selected_obj)
results = []
for name, group in self:
object.__setattr__(group, "name", name)
res = func(group, *args, **kwargs)
if isinstance(res, (ABCDataFrame, ABCSeries)):
res = res._values
results.append(klass(res, index=group.index))
# check for empty "results" to avoid concat ValueError
if results:
from pandas.core.reshape.concat import concat
concatenated = concat(results)
result = self._set_result_index_ordered(concatenated)
else:
result = self.obj._constructor(dtype=np.float64)
# we will only try to coerce the result type if
# we have a numeric dtype, as these are *always* user-defined funcs
# the cython take a different path (and casting)
if is_numeric_dtype(result.dtype):
common_dtype = find_common_type([self._selected_obj.dtype, result.dtype])
if common_dtype is result.dtype:
result = maybe_downcast_numeric(result, self._selected_obj.dtype)
result.name = self._selected_obj.name
result.index = self._selected_obj.index
return result
def _transform_fast(self, result) -> Series:
"""
fast version of transform, only applicable to
builtin/cythonizable functions
"""
ids, _, ngroup = self.grouper.group_info
result = result.reindex(self.grouper.result_index, copy=False)
out = algorithms.take_1d(result._values, ids)
return self.obj._constructor(out, index=self.obj.index, name=self.obj.name)
def filter(self, func, dropna=True, *args, **kwargs):
"""
Return a copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> df.groupby('A').B.filter(lambda x: x.mean() > 3.)
1 2
3 4
5 6
Name: B, dtype: int64
Returns
-------
filtered : Series
"""
if isinstance(func, str):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notna(x) -> bool:
b = wrapper(x)
return b and notna(b)
try:
indices = [
self._get_index(name) for name, group in self if true_and_notna(group)
]
except (ValueError, TypeError) as err:
raise TypeError("the filter must return a boolean result") from err
filtered = self._apply_filter(indices, dropna)
return filtered
def nunique(self, dropna: bool = True) -> Series:
"""
Return number of unique elements in the group.
Returns
-------
Series
Number of unique values within each group.
"""
ids, _, _ = self.grouper.group_info
val = self.obj._values
codes, _ = algorithms.factorize(val, sort=False)
sorter = np.lexsort((codes, ids))
codes = codes[sorter]
ids = ids[sorter]
# group boundaries are where group ids change
# unique observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, codes[1:] != codes[:-1]]
# 1st item of each group is a new unique observation
mask = codes == -1
if dropna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).astype("int64", copy=False)
if len(ids):
# NaN/NaT group exists if the head of ids is -1,
# so remove it from res and exclude its index from idx
if ids[0] == -1:
res = out[1:]
idx = idx[np.flatnonzero(idx)]
else:
res = out
else:
res = out[1:]
ri = self.grouper.result_index
# we might have duplications among the bins
if len(res) != len(ri):
res, out = np.zeros(len(ri), dtype=out.dtype), res
res[ids[idx]] = out
result = self.obj._constructor(res, index=ri, name=self._selection_name)
return self._reindex_output(result, fill_value=0)
@doc(Series.describe)
def describe(self, **kwargs):
result = self.apply(lambda x: x.describe(**kwargs))
if self.axis == 1:
return result.T
return result.unstack()
def value_counts(
self, normalize=False, sort=True, ascending=False, bins=None, dropna=True
):
from pandas.core.reshape.merge import get_join_indexers
from pandas.core.reshape.tile import cut
if bins is not None and not np.iterable(bins):
# scalar bins cannot be done at top level
# in a backward compatible way
return self.apply(
Series.value_counts,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins,
)
ids, _, _ = self.grouper.group_info
val = self.obj._values
# groupby removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
if bins is None:
lab, lev = algorithms.factorize(val, sort=True)
llab = lambda lab, inc: lab[inc]
else:
# lab is a Categorical with categories an IntervalIndex
lab = cut(Series(val), bins, include_lowest=True)
lev = lab.cat.categories
lab = lev.take(lab.cat.codes, allow_fill=True, fill_value=lev._na_value)
llab = lambda lab, inc: lab[inc]._multiindex.codes[-1]
if is_interval_dtype(lab.dtype):
# TODO: should we do this inside II?
sorter = np.lexsort((lab.left, lab.right, ids))
else:
sorter = np.lexsort((lab, ids))
ids, lab = ids[sorter], lab[sorter]
# group boundaries are where group ids change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
# new values are where sorted labels change
lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))
inc = np.r_[True, lchanges]
inc[idx] = True # group boundaries are also new values
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
# num. of times each group should be repeated
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
codes = self.grouper.reconstructed_codes
codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)]
levels = [ping.group_index for ping in self.grouper.groupings] + [lev]
names = self.grouper.names + [self._selection_name]
if dropna:
mask = codes[-1] != -1
if mask.all():
dropna = False
else:
out, codes = out[mask], [level_codes[mask] for level_codes in codes]
if normalize:
out = out.astype("float")
d = np.diff(np.r_[idx, len(ids)])
if dropna:
m = ids[lab == -1]
np.add.at(d, m, -1)
acc = rep(d)[mask]
else:
acc = rep(d)
out /= acc
if sort and bins is None:
cat = ids[inc][mask] if dropna else ids[inc]
sorter = np.lexsort((out if ascending else -out, cat))
out, codes[-1] = out[sorter], codes[-1][sorter]
if bins is None:
mi = MultiIndex(
levels=levels, codes=codes, names=names, verify_integrity=False
)
if is_integer_dtype(out):
out = ensure_int64(out)
return self.obj._constructor(out, index=mi, name=self._selection_name)
# for compat. with libgroupby.value_counts need to ensure every
# bin is present at every index level, null filled with zeros
diff = np.zeros(len(out), dtype="bool")
for level_codes in codes[:-1]:
diff |= np.r_[True, level_codes[1:] != level_codes[:-1]]
ncat, nbin = diff.sum(), len(levels[-1])
left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)]
right = [diff.cumsum() - 1, codes[-1]]
_, idx = get_join_indexers(left, right, sort=False, how="left")
out = np.where(idx != -1, out[idx], 0)
if sort:
sorter = np.lexsort((out if ascending else -out, left[0]))
out, left[-1] = out[sorter], left[-1][sorter]
# build the multi-index w/ full levels
def build_codes(lev_codes: np.ndarray) -> np.ndarray:
return np.repeat(lev_codes[diff], nbin)
codes = [build_codes(lev_codes) for lev_codes in codes[:-1]]
codes.append(left[-1])
mi = MultiIndex(levels=levels, codes=codes, names=names, verify_integrity=False)
if is_integer_dtype(out):
out = ensure_int64(out)
return self.obj._constructor(out, index=mi, name=self._selection_name)
def count(self) -> Series:
"""
Compute count of group, excluding missing values.
Returns
-------
Series
Count of values within each group.
"""
ids, _, ngroups = self.grouper.group_info
val = self.obj._values
mask = (ids != -1) & ~isna(val)
ids = ensure_platform_int(ids)
minlength = ngroups or 0
out = np.bincount(ids[mask], minlength=minlength)
result = self.obj._constructor(
out,
index=self.grouper.result_index,
name=self._selection_name,
dtype="int64",
)
return self._reindex_output(result, fill_value=0)
def _apply_to_column_groupbys(self, func):
""" return a pass thru """
return func(self)
def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None):
"""Calculate pct_change of each value to previous entry in group"""
# TODO: Remove this conditional when #23918 is fixed
if freq:
return self.apply(
lambda x: x.pct_change(
periods=periods, fill_method=fill_method, limit=limit, freq=freq
)
)
if fill_method is None: # GH30463
fill_method = "pad"
limit = 0
filled = getattr(self, fill_method)(limit=limit)
fill_grp = filled.groupby(self.grouper.codes)
shifted = fill_grp.shift(periods=periods, freq=freq)
return (filled / shifted) - 1
@pin_allowlisted_properties(DataFrame, base.dataframe_apply_allowlist)
class DataFrameGroupBy(GroupBy[DataFrame]):
_apply_allowlist = base.dataframe_apply_allowlist
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(
... {
... "A": [1, 1, 2, 2],
... "B": [1, 2, 3, 4],
... "C": [0.362838, 0.227877, 1.267767, -0.562860],
... }
... )
>>> df
A B C
0 1 1 0.362838
1 1 2 0.227877
2 2 3 1.267767
3 2 4 -0.562860
The aggregation is for each column.
>>> df.groupby('A').agg('min')
B C
A
1 1 0.227877
2 3 -0.562860
Multiple aggregations
>>> df.groupby('A').agg(['min', 'max'])
B C
min max min max
A
1 1 2 0.227877 0.362838
2 3 4 -0.562860 1.267767
Select a column for aggregation
>>> df.groupby('A').B.agg(['min', 'max'])
min max
A
1 1 2
2 3 4
Different aggregations per column
>>> df.groupby('A').agg({'B': ['min', 'max'], 'C': 'sum'})
B C
min max sum
A
1 1 2 0.590715
2 3 4 0.704907
To control the output names with different aggregations per column,
pandas supports "named aggregation"
>>> df.groupby("A").agg(
... b_min=pd.NamedAgg(column="B", aggfunc="min"),
... c_sum=pd.NamedAgg(column="C", aggfunc="sum"))
b_min c_sum
A
1 1 0.590715
2 3 0.704907
- The keywords are the *output* column names
- The values are tuples whose first element is the column to select
and the second element is the aggregation to apply to that column.
Pandas provides the ``pandas.NamedAgg`` namedtuple with the fields
``['column', 'aggfunc']`` to make it clearer what the arguments are.
As usual, the aggregation can be a callable or a string alias.
See :ref:`groupby.aggregate.named` for more."""
)
@doc(_agg_template, examples=_agg_examples_doc, klass="DataFrame")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with group_selection_context(self):
data = self._selected_obj
result, index = self._aggregate_with_numba(
data, func, *args, engine_kwargs=engine_kwargs, **kwargs
)
return self.obj._constructor(result, index=index, columns=data.columns)
relabeling, func, columns, order = reconstruct_func(func, **kwargs)
func = maybe_mangle_lambdas(func)
result, how = aggregate(self, func, *args, **kwargs)
if how is None:
return result
if result is None:
# grouper specific aggregations
if self.grouper.nkeys > 1:
return self._python_agg_general(func, *args, **kwargs)
elif args or kwargs:
result = self._aggregate_frame(func, *args, **kwargs)
elif self.axis == 1:
# _aggregate_multiple_funcs does not allow self.axis == 1
result = self._aggregate_frame(func)
else:
# try to treat as if we are passing a list
try:
result = agg_list_like(self, [func], _axis=self.axis)
# select everything except for the last level, which is the one
# containing the name of the function(s), see GH 32040
result.columns = result.columns.rename(
[self._selected_obj.columns.name] * result.columns.nlevels
).droplevel(-1)
except ValueError as err:
if "no results" not in str(err):
# raised directly by _aggregate_multiple_funcs
raise
result = self._aggregate_frame(func)
except AttributeError:
# catch exception from line 969
# (Series does not have attribute "columns"), see GH 35246
result = self._aggregate_frame(func)
if relabeling:
# used reordered index of columns
result = result.iloc[:, order]
result.columns = columns
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
result.index = np.arange(len(result))
return result._convert(datetime=True)
agg = aggregate
def _iterate_slices(self) -> Iterable[Series]:
obj = self._selected_obj
if self.axis == 1:
obj = obj.T
if isinstance(obj, Series) and obj.name not in self.exclusions:
# Occurs when doing DataFrameGroupBy(...)["X"]
yield obj
else:
for label, values in obj.items():
if label in self.exclusions:
continue
yield values
def _cython_agg_general(
self, how: str, alt=None, numeric_only: bool = True, min_count: int = -1
) -> DataFrame:
agg_mgr = self._cython_agg_blocks(
how, alt=alt, numeric_only=numeric_only, min_count=min_count
)
return self._wrap_agged_blocks(agg_mgr.blocks, items=agg_mgr.items)
def _cython_agg_blocks(
self, how: str, alt=None, numeric_only: bool = True, min_count: int = -1
) -> BlockManager:
data: BlockManager = self._get_data_to_aggregate()
if numeric_only:
data = data.get_numeric_data(copy=False)
no_result = object()
def cast_agg_result(result, values: ArrayLike, how: str) -> ArrayLike:
# see if we can cast the values to the desired dtype
# this may not be the original dtype
assert not isinstance(result, DataFrame)
assert result is not no_result
dtype = maybe_cast_result_dtype(values.dtype, how)
result = maybe_downcast_numeric(result, dtype)
if isinstance(values, ExtensionArray) and isinstance(result, np.ndarray):
# e.g. values was an IntegerArray
# (1, N) case can occur if values was Categorical
# and result is ndarray[object]
# TODO(EA2D): special casing not needed with 2D EAs
assert result.ndim == 1 or result.shape[0] == 1
try:
# Cast back if feasible
result = type(values)._from_sequence(
result.ravel(), dtype=values.dtype
)
except (ValueError, TypeError):
# reshape to be valid for non-Extension Block
result = result.reshape(1, -1)
elif isinstance(result, np.ndarray) and result.ndim == 1:
# We went through a SeriesGroupByPath and need to reshape
result = result.reshape(1, -1)
return result
def blk_func(bvalues: ArrayLike) -> ArrayLike:
try:
result, _ = self.grouper._cython_operation(
"aggregate", bvalues, how, axis=1, min_count=min_count
)
except NotImplementedError:
# generally if we have numeric_only=False
# and non-applicable functions
# try to python agg
if alt is None:
# we cannot perform the operation
# in an alternate way, exclude the block
assert how == "ohlc"
raise
# We get here with a) EADtypes and b) object dtype
obj: FrameOrSeriesUnion
# call our grouper again with only this block
if isinstance(bvalues, ExtensionArray):
# TODO(EA2D): special case not needed with 2D EAs
obj = Series(bvalues)
else:
obj = DataFrame(bvalues.T)
if obj.shape[1] == 1:
# Avoid call to self.values that can occur in DataFrame
# reductions; see GH#28949
obj = obj.iloc[:, 0]
# Create SeriesGroupBy with observed=True so that it does
# not try to add missing categories if grouping over multiple
# Categoricals. This will done by later self._reindex_output()
# Doing it here creates an error. See GH#34951
sgb = get_groupby(obj, self.grouper, observed=True)
result = sgb.aggregate(lambda x: alt(x, axis=self.axis))
assert isinstance(result, (Series, DataFrame)) # for mypy
# In the case of object dtype block, it may have been split
# in the operation. We un-split here.
result = result._consolidate()
assert isinstance(result, (Series, DataFrame)) # for mypy
assert len(result._mgr.blocks) == 1
# unwrap DataFrame to get array
result = result._mgr.blocks[0].values
return cast_agg_result(result, bvalues, how)
# TypeError -> we may have an exception in trying to aggregate
# continue and exclude the block
# NotImplementedError -> "ohlc" with wrong dtype
new_mgr = data.apply(blk_func, ignore_failures=True)
if not len(new_mgr):
raise DataError("No numeric types to aggregate")
return new_mgr
def _aggregate_frame(self, func, *args, **kwargs) -> DataFrame:
if self.grouper.nkeys != 1:
raise AssertionError("Number of keys must be 1")
axis = self.axis
obj = self._obj_with_exclusions
result: Dict[Label, Union[NDFrame, np.ndarray]] = {}
if axis != obj._info_axis_number:
for name, data in self:
fres = func(data, *args, **kwargs)
result[name] = fres
else:
for name in self.indices:
data = self.get_group(name, obj=obj)
fres = func(data, *args, **kwargs)
result[name] = fres
return self._wrap_frame_output(result, obj)
def _aggregate_item_by_item(self, func, *args, **kwargs) -> DataFrame:
# only for axis==0
obj = self._obj_with_exclusions
result: Dict[Union[int, str], NDFrame] = {}
cannot_agg = []
for item in obj:
data = obj[item]
colg = SeriesGroupBy(data, selection=item, grouper=self.grouper)
try:
result[item] = colg.aggregate(func, *args, **kwargs)
except ValueError as err:
if "Must produce aggregated value" in str(err):
# raised in _aggregate_named, handle at higher level
# see test_apply_with_mutated_index
raise
# otherwise we get here from an AttributeError in _make_wrapper
cannot_agg.append(item)
continue
result_columns = obj.columns
if cannot_agg:
result_columns = result_columns.drop(cannot_agg)
return self.obj._constructor(result, columns=result_columns)
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if len(keys) == 0:
return self.obj._constructor(index=keys)
# GH12824
first_not_none = next(com.not_none(*values), None)
if first_not_none is None:
# GH9684 - All values are None, return an empty frame.
return self.obj._constructor()
elif isinstance(first_not_none, DataFrame):
return self._concat_objects(keys, values, not_indexed_same=not_indexed_same)
key_index = self.grouper.result_index if self.as_index else None
if isinstance(first_not_none, (np.ndarray, Index)):
# GH#1738: values is list of arrays of unequal lengths
# fall through to the outer else clause
# TODO: sure this is right? we used to do this
# after raising AttributeError above
return self.obj._constructor_sliced(
values, index=key_index, name=self._selection_name
)
elif not isinstance(first_not_none, Series):
# values are not series or array-like but scalars
# self._selection_name not passed through to Series as the
# result should not take the name of original selection
# of columns
if self.as_index:
return self.obj._constructor_sliced(values, index=key_index)
else:
result = DataFrame(values, index=key_index, columns=[self._selection])
self._insert_inaxis_grouper_inplace(result)
return result
else:
# values are Series
return self._wrap_applied_output_series(
keys, values, not_indexed_same, first_not_none, key_index
)
def _wrap_applied_output_series(
self,
keys,
values: List[Series],
not_indexed_same: bool,
first_not_none,
key_index,
) -> FrameOrSeriesUnion:
# this is to silence a DeprecationWarning
# TODO: Remove when default dtype of empty Series is object
kwargs = first_not_none._construct_axes_dict()
backup = create_series_with_explicit_dtype(dtype_if_empty=object, **kwargs)
values = [x if (x is not None) else backup for x in values]
all_indexed_same = all_indexes_same(x.index for x in values)
# GH3596
# provide a reduction (Frame -> Series) if groups are
# unique
if self.squeeze:
applied_index = self._selected_obj._get_axis(self.axis)
singular_series = len(values) == 1 and applied_index.nlevels == 1
# assign the name to this series
if singular_series:
values[0].name = keys[0]
# GH2893
# we have series in the values array, we want to
# produce a series:
# if any of the sub-series are not indexed the same
# OR we don't have a multi-index and we have only a
# single values
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
# still a series
# path added as of GH 5545
elif all_indexed_same:
from pandas.core.reshape.concat import concat
return concat(values)
if not all_indexed_same:
# GH 8467
return self._concat_objects(keys, values, not_indexed_same=True)
# Combine values
# vstack+constructor is faster than concat and handles MI-columns
stacked_values = np.vstack([np.asarray(v) for v in values])
if self.axis == 0:
index = key_index
columns = first_not_none.index.copy()
if columns.name is None:
# GH6124 - propagate name of Series when it's consistent
names = {v.name for v in values}
if len(names) == 1:
columns.name = list(names)[0]
else:
index = first_not_none.index
columns = key_index
stacked_values = stacked_values.T
result = self.obj._constructor(stacked_values, index=index, columns=columns)
# if we have date/time like in the original, then coerce dates
# as we are stacking can easily have object dtypes here
so = self._selected_obj
if so.ndim == 2 and so.dtypes.apply(needs_i8_conversion).any():
result = result._convert(datetime=True)
else:
result = result._convert(datetime=True)
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
return self._reindex_output(result)
def _transform_general(self, func, *args, **kwargs):
from pandas.core.reshape.concat import concat
applied = []
obj = self._obj_with_exclusions
gen = self.grouper.get_iterator(obj, axis=self.axis)
fast_path, slow_path = self._define_paths(func, *args, **kwargs)
for name, group in gen:
object.__setattr__(group, "name", name)
# Try slow path and fast path.
try:
path, res = self._choose_path(fast_path, slow_path, group)
except TypeError:
return self._transform_item_by_item(obj, fast_path)
except ValueError as err:
msg = "transform must return a scalar value for each group"
raise ValueError(msg) from err
if isinstance(res, Series):
# we need to broadcast across the
# other dimension; this will preserve dtypes
# GH14457
if not np.prod(group.shape):
continue
elif res.index.is_(obj.index):
r = concat([res] * len(group.columns), axis=1)
r.columns = group.columns
r.index = group.index
else:
r = self.obj._constructor(
np.concatenate([res.values] * len(group.index)).reshape(
group.shape
),
columns=group.columns,
index=group.index,
)
applied.append(r)
else:
applied.append(res)
concat_index = obj.columns if self.axis == 0 else obj.index
other_axis = 1 if self.axis == 0 else 0 # switches between 0 & 1
concatenated = concat(applied, axis=self.axis, verify_integrity=False)
concatenated = concatenated.reindex(concat_index, axis=other_axis, copy=False)
return self._set_result_index_ordered(concatenated)
@Substitution(klass="DataFrame")
@Appender(_transform_template)
def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with group_selection_context(self):
data = self._selected_obj
result = self._transform_with_numba(
data, func, *args, engine_kwargs=engine_kwargs, **kwargs
)
return self.obj._constructor(result, index=data.index, columns=data.columns)
# optimized transforms
func = self._get_cython_func(func) or func
if not isinstance(func, str):
return self._transform_general(func, *args, **kwargs)
elif func not in base.transform_kernel_allowlist:
msg = f"'{func}' is not a valid function name for transform(name)"
raise ValueError(msg)
elif func in base.cythonized_kernels or func in base.transformation_kernels:
# cythonized transformation or canned "reduction+broadcast"
return getattr(self, func)(*args, **kwargs)
# GH 30918
# Use _transform_fast only when we know func is an aggregation
if func in base.reduction_kernels:
# If func is a reduction, we need to broadcast the
# result to the whole group. Compute func result
# and deal with possible broadcasting below.
# Temporarily set observed for dealing with categoricals.
with com.temp_setattr(self, "observed", True):
result = getattr(self, func)(*args, **kwargs)
if isinstance(result, DataFrame) and result.columns.equals(
self._obj_with_exclusions.columns
):
return self._transform_fast(result)
return self._transform_general(func, *args, **kwargs)
def _transform_fast(self, result: DataFrame) -> DataFrame:
"""
Fast transform path for aggregations
"""
obj = self._obj_with_exclusions
# for each col, reshape to size of original frame by take operation
ids, _, ngroup = self.grouper.group_info
result = result.reindex(self.grouper.result_index, copy=False)
output = [
algorithms.take_1d(result.iloc[:, i].values, ids)
for i, _ in enumerate(result.columns)
]
return self.obj._constructor._from_arrays(
output, columns=result.columns, index=obj.index
)
def _define_paths(self, func, *args, **kwargs):
if isinstance(func, str):
fast_path = lambda group: getattr(group, func)(*args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis
)
else:
fast_path = lambda group: func(group, *args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: func(x, *args, **kwargs), axis=self.axis
)
return fast_path, slow_path
def _choose_path(self, fast_path: Callable, slow_path: Callable, group: DataFrame):
path = slow_path
res = slow_path(group)
# if we make it here, test if we can use the fast path
try:
res_fast = fast_path(group)
except AssertionError:
raise
except Exception:
# GH#29631 For user-defined function, we can't predict what may be
# raised; see test_transform.test_transform_fastpath_raises
return path, res
# verify fast path does not change columns (and names), otherwise
# its results cannot be joined with those of the slow path
if not isinstance(res_fast, DataFrame):
return path, res
if not res_fast.columns.equals(group.columns):
return path, res
if res_fast.equals(res):
path = fast_path
return path, res
def _transform_item_by_item(self, obj: DataFrame, wrapper) -> DataFrame:
# iterate through columns
output = {}
inds = []
for i, col in enumerate(obj):
try:
output[col] = self[col].transform(wrapper)
except TypeError:
# e.g. trying to call nanmean with string values
pass
else:
inds.append(i)
if not output:
raise TypeError("Transform function invalid for data types")
columns = obj.columns
if len(output) < len(obj.columns):
columns = columns.take(inds)
return self.obj._constructor(output, index=obj.index, columns=columns)
def filter(self, func, dropna=True, *args, **kwargs):
"""
Return a copy of a DataFrame excluding filtered elements.
Elements from groups are filtered if they do not satisfy the
boolean criterion specified by func.
Parameters
----------
func : function
Function to apply to each subframe. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
If False, groups that evaluate False are filled with NaNs.
Returns
-------
filtered : DataFrame
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> grouped.filter(lambda x: x['B'].mean() > 3.)
A B C
1 bar 2 5.0
3 bar 4 1.0
5 bar 6 9.0
"""
indices = []
obj = self._selected_obj
gen = self.grouper.get_iterator(obj, axis=self.axis)
for name, group in gen:
object.__setattr__(group, "name", name)
res = func(group, *args, **kwargs)
try:
res = res.squeeze()
except AttributeError: # allow e.g., scalars and frames to pass
pass
# interpret the result of the filter
if is_bool(res) or (is_scalar(res) and isna(res)):
if res and notna(res):
indices.append(self._get_index(name))
else:
# non scalars aren't allowed
raise TypeError(
f"filter function returned a {type(res).__name__}, "
"but expected a scalar bool"
)
return self._apply_filter(indices, dropna)
def __getitem__(self, key):
if self.axis == 1:
# GH 37725
raise ValueError("Cannot subset columns when using axis=1")
# per GH 23566
if isinstance(key, tuple) and len(key) > 1:
# if len == 1, then it becomes a SeriesGroupBy and this is actually
# valid syntax, so don't raise warning
warnings.warn(
"Indexing with multiple keys (implicitly converted to a tuple "
"of keys) will be deprecated, use a list instead.",
FutureWarning,
stacklevel=2,
)
return super().__getitem__(key)
def _gotitem(self, key, ndim: int, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : {1, 2}
requested ndim of result
subset : object, default None
subset to act on
"""
if ndim == 2:
if subset is None:
subset = self.obj
return DataFrameGroupBy(
subset,
self.grouper,
axis=self.axis,
level=self.level,
grouper=self.grouper,
exclusions=self.exclusions,
selection=key,
as_index=self.as_index,
sort=self.sort,
group_keys=self.group_keys,
squeeze=self.squeeze,
observed=self.observed,
mutated=self.mutated,
dropna=self.dropna,
)
elif ndim == 1:
if subset is None:
subset = self.obj[key]
return SeriesGroupBy(
subset,
level=self.level,
grouper=self.grouper,
selection=key,
sort=self.sort,
group_keys=self.group_keys,
squeeze=self.squeeze,
observed=self.observed,
dropna=self.dropna,
)
raise AssertionError("invalid ndim for _gotitem")
def _wrap_frame_output(self, result, obj: DataFrame) -> DataFrame:
result_index = self.grouper.levels[0]
if self.axis == 0:
return self.obj._constructor(
result, index=obj.columns, columns=result_index
).T
else:
return self.obj._constructor(result, index=obj.index, columns=result_index)
def _get_data_to_aggregate(self) -> BlockManager:
obj = self._obj_with_exclusions
if self.axis == 1:
return obj.T._mgr
else:
return obj._mgr
def _insert_inaxis_grouper_inplace(self, result: DataFrame) -> None:
# zip in reverse so we can always insert at loc 0
columns = result.columns
for name, lev, in_axis in zip(
reversed(self.grouper.names),
reversed(self.grouper.get_group_levels()),
reversed([grp.in_axis for grp in self.grouper.groupings]),
):
# GH #28549
# When using .apply(-), name will be in columns already
if in_axis and name not in columns:
result.insert(0, name, lev)
def _wrap_aggregated_output(
self,
output: Mapping[base.OutputKey, Union[Series, np.ndarray]],
index: Optional[Index],
) -> DataFrame:
"""
Wraps the output of DataFrameGroupBy aggregations into the expected result.
Parameters
----------
output : Mapping[base.OutputKey, Union[Series, np.ndarray]]
Data to wrap.
Returns
-------
DataFrame
"""
indexed_output = {key.position: val for key, val in output.items()}
columns = Index([key.label for key in output])
columns._set_names(self._obj_with_exclusions._get_axis(1 - self.axis).names)
result = self.obj._constructor(indexed_output)
result.columns = columns
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
result = result._consolidate()
else:
result.index = self.grouper.result_index
if self.axis == 1:
result = result.T
return self._reindex_output(result)
def _wrap_transformed_output(
self, output: Mapping[base.OutputKey, Union[Series, np.ndarray]]
) -> DataFrame:
"""
Wraps the output of DataFrameGroupBy transformations into the expected result.
Parameters
----------
output : Mapping[base.OutputKey, Union[Series, np.ndarray]]
Data to wrap.
Returns
-------
DataFrame
"""
indexed_output = {key.position: val for key, val in output.items()}
result = self.obj._constructor(indexed_output)
if self.axis == 1:
result = result.T
result.columns = self.obj.columns
else:
columns = Index(key.label for key in output)
columns.name = self.obj.columns.name
result.columns = columns
result.index = self.obj.index
return result
def _wrap_agged_blocks(self, blocks: Sequence["Block"], items: Index) -> DataFrame:
if not self.as_index:
index = np.arange(blocks[0].values.shape[-1])
mgr = BlockManager(blocks, axes=[items, index])
result = self.obj._constructor(mgr)
self._insert_inaxis_grouper_inplace(result)
result = result._consolidate()
else:
index = self.grouper.result_index
mgr = BlockManager(blocks, axes=[items, index])
result = self.obj._constructor(mgr)
if self.axis == 1:
result = result.T
return self._reindex_output(result)._convert(datetime=True)
def _iterate_column_groupbys(self):
for i, colname in enumerate(self._selected_obj.columns):
yield colname, SeriesGroupBy(
self._selected_obj.iloc[:, i],
selection=colname,
grouper=self.grouper,
exclusions=self.exclusions,
)
def _apply_to_column_groupbys(self, func) -> DataFrame:
from pandas.core.reshape.concat import concat
return concat(
(func(col_groupby) for _, col_groupby in self._iterate_column_groupbys()),
keys=self._selected_obj.columns,
axis=1,
)
def count(self) -> DataFrame:
"""
Compute count of group, excluding missing values.
Returns
-------
DataFrame
Count of values within each group.
"""
data = self._get_data_to_aggregate()
ids, _, ngroups = self.grouper.group_info
mask = ids != -1
def hfunc(bvalues: ArrayLike) -> ArrayLike:
# TODO(2DEA): reshape would not be necessary with 2D EAs
if bvalues.ndim == 1:
# EA
masked = mask & ~isna(bvalues).reshape(1, -1)
else:
masked = mask & ~isna(bvalues)
counted = lib.count_level_2d(masked, labels=ids, max_bin=ngroups, axis=1)
return counted
new_mgr = data.apply(hfunc)
# If we are grouping on categoricals we want unobserved categories to
# return zero, rather than the default of NaN which the reindexing in
# _wrap_agged_blocks() returns. GH 35028
with com.temp_setattr(self, "observed", True):
result = self._wrap_agged_blocks(new_mgr.blocks, items=data.items)
return self._reindex_output(result, fill_value=0)
def nunique(self, dropna: bool = True) -> DataFrame:
"""
Return DataFrame with counts of unique elements in each position.
Parameters
----------
dropna : bool, default True
Don't include NaN in the counts.
Returns
-------
nunique: DataFrame
Examples
--------
>>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam',
... 'ham', 'ham'],
... 'value1': [1, 5, 5, 2, 5, 5],
... 'value2': list('abbaxy')})
>>> df
id value1 value2
0 spam 1 a
1 egg 5 b
2 egg 5 b
3 spam 2 a
4 ham 5 x
5 ham 5 y
>>> df.groupby('id').nunique()
value1 value2
id
egg 1 1
ham 1 2
spam 2 1
Check for rows with the same id but conflicting values:
>>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any())
id value1 value2
0 spam 1 a
3 spam 2 a
4 ham 5 x
5 ham 5 y
"""
from pandas.core.reshape.concat import concat
# TODO: this is duplicative of how GroupBy naturally works
# Try to consolidate with normal wrapping functions
obj = self._obj_with_exclusions
axis_number = obj._get_axis_number(self.axis)
other_axis = int(not axis_number)
if axis_number == 0:
iter_func = obj.items
else:
iter_func = obj.iterrows
results = concat(
[
SeriesGroupBy(content, selection=label, grouper=self.grouper).nunique(
dropna
)
for label, content in iter_func()
],
axis=1,
)
results = cast(DataFrame, results)
if axis_number == 1:
results = results.T
results._get_axis(other_axis).names = obj._get_axis(other_axis).names
if not self.as_index:
results.index = ibase.default_index(len(results))
self._insert_inaxis_grouper_inplace(results)
return results
boxplot = boxplot_frame_groupby
| 34.434498
| 88
| 0.569447
|
6281f45645da51434958f60a0e90ed49511dc599
| 6,196
|
py
|
Python
|
Examples/DicomConvert/DicomConvert.py
|
rickardcronholm/SimpleITK
|
6850980606e5f0c09af29379181aea72c0cd9c2a
|
[
"Apache-2.0"
] | 576
|
2015-01-14T12:47:35.000Z
|
2022-03-31T07:45:52.000Z
|
Examples/DicomConvert/DicomConvert.py
|
resace3/SimpleITK
|
4e04ab7936038d91c5dc8bac991833becb88a69e
|
[
"Apache-2.0"
] | 874
|
2015-01-15T10:19:16.000Z
|
2022-03-29T16:51:12.000Z
|
Examples/DicomConvert/DicomConvert.py
|
resace3/SimpleITK
|
4e04ab7936038d91c5dc8bac991833becb88a69e
|
[
"Apache-2.0"
] | 186
|
2015-01-16T15:39:27.000Z
|
2022-03-21T17:22:35.000Z
|
# =========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =========================================================================
import argparse
import csv
import functools
import itertools
import multiprocessing
import os
import sys
import SimpleITK as sitk
def convert_image(input_file_name, output_file_name, new_width=None):
try:
image_file_reader = sitk.ImageFileReader()
# only read DICOM images
image_file_reader.SetImageIO('GDCMImageIO')
image_file_reader.SetFileName(input_file_name)
image_file_reader.ReadImageInformation()
image_size = list(image_file_reader.GetSize())
if len(image_size) == 3 and image_size[2] == 1:
image_size[2] = 0
image_file_reader.SetExtractSize(image_size)
image = image_file_reader.Execute()
if new_width:
original_size = image.GetSize()
original_spacing = image.GetSpacing()
new_spacing = [(original_size[0] - 1) * original_spacing[0]
/ (new_width - 1)] * 2
new_size = [new_width, int((original_size[1] - 1)
* original_spacing[1] / new_spacing[1])]
image = sitk.Resample(image1=image, size=new_size,
transform=sitk.Transform(),
interpolator=sitk.sitkLinear,
outputOrigin=image.GetOrigin(),
outputSpacing=new_spacing,
outputDirection=image.GetDirection(),
defaultPixelValue=0,
outputPixelType=image.GetPixelID())
# If a single channel image, rescale to [0,255]. Also modify the
# intensity values based on the photometric interpretation. If
# MONOCHROME2 (minimum should be displayed as black) we don't need to
# do anything, if image has MONOCRHOME1 (minimum should be displayed as
# white) we flip # the intensities. This is a constraint imposed by ITK
# which always assumes MONOCHROME2.
if image.GetNumberOfComponentsPerPixel() == 1:
image = sitk.RescaleIntensity(image, 0, 255)
if image_file_reader.GetMetaData('0028|0004').strip() \
== 'MONOCHROME1':
image = sitk.InvertIntensity(image, maximum=255)
image = sitk.Cast(image, sitk.sitkUInt8)
sitk.WriteImage(image, output_file_name)
return True
except BaseException:
return False
def convert_images(input_file_names, output_file_names, new_width):
MAX_PROCESSES = 15
with multiprocessing.Pool(processes=MAX_PROCESSES) as pool:
return pool.starmap(functools.partial(convert_image,
new_width=new_width),
zip(input_file_names, output_file_names))
def positive_int(int_str):
value = int(int_str)
if value <= 0:
raise argparse.ArgumentTypeError(
int_str + ' is not a positive integer value')
return value
def directory(dir_name):
if not os.path.isdir(dir_name):
raise argparse.ArgumentTypeError(dir_name +
' is not a valid directory name')
return dir_name
def main(argv=None):
parser = argparse.ArgumentParser(
description='Convert and resize DICOM files to common image types.')
parser.add_argument('root_of_data_directory', type=directory,
help='Path to the topmost directory containing data.')
parser.add_argument(
'output_file_extension',
help='Image file extension, this determines output file type '
'(e.g. png) .')
parser.add_argument('--w', type=positive_int,
help='Width of converted images.')
parser.add_argument('--od', type=directory, help='Output directory.')
args = parser.parse_args(argv)
input_file_names = []
for dir_name, subdir_names, file_names in os.walk(
args.root_of_data_directory):
input_file_names += [os.path.join(os.path.abspath(dir_name), fname)
for fname in file_names]
if args.od:
# if all output files are written to the same directory we need them
# to have a unique name, so use an index.
file_names = [os.path.join(os.path.abspath(args.od), str(i))
for i in range(len(input_file_names))]
else:
file_names = input_file_names
output_file_names = [file_name + '.' + args.output_file_extension
for file_name in file_names]
res = convert_images(input_file_names, output_file_names, args.w)
input_file_names = list(itertools.compress(input_file_names, res))
output_file_names = list(itertools.compress(output_file_names, res))
# save csv file mapping input and output file names.
# using csv module and not pandas so as not to create more dependencies
# for the examples. pandas based code is more elegant/shorter.
dir_name = args.od if args.od else os.getcwd()
with open(os.path.join(dir_name, 'file_names.csv'), mode='w') as fp:
fp_writer = csv.writer(fp, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL)
fp_writer.writerow(['input file name', 'output file name'])
for data in zip(input_file_names, output_file_names):
fp_writer.writerow(data)
if __name__ == "__main__":
sys.exit(main())
| 42.731034
| 79
| 0.615236
|
820d91bba5a5f7076009010ceb7d4ee4d9802150
| 5,358
|
py
|
Python
|
ppci/wasm/io.py
|
rakati/ppci-mirror
|
8f5b0282fd1122d7c389b39c86fcf5d9352b7bb2
|
[
"BSD-2-Clause"
] | null | null | null |
ppci/wasm/io.py
|
rakati/ppci-mirror
|
8f5b0282fd1122d7c389b39c86fcf5d9352b7bb2
|
[
"BSD-2-Clause"
] | null | null | null |
ppci/wasm/io.py
|
rakati/ppci-mirror
|
8f5b0282fd1122d7c389b39c86fcf5d9352b7bb2
|
[
"BSD-2-Clause"
] | null | null | null |
""" This module assists with reading and writing wasm to binary.
"""
from ..utils.leb128 import signed_leb128_encode, unsigned_leb128_encode
from ..utils.leb128 import unsigned_leb128_decode, signed_leb128_decode
from ..format.io import BaseIoReader, BaseIoWriter
LANG_TYPES = {
"i32": b"\x7f",
"i64": b"\x7e",
"f32": b"\x7d",
"f64": b"\x7c",
"anyfunc": b"\x70",
"func": b"\x60",
"emptyblock": b"\x40", # pseudo type for representing an empty block_type
}
LANG_TYPES_REVERSE = {v[0]: k for k, v in LANG_TYPES.items()}
class FileWriter(BaseIoWriter):
""" Helper class that can write bytes to a file """
def write(self, bb):
return self.f.write(bb)
def write_f64(self, x):
self.write_fmt("<d", x)
def write_f32(self, x):
self.write_fmt("<f", x)
def write_u32(self, x):
self.write_fmt("<I", x)
def write_str(self, x):
bb = x.encode("utf-8")
self.write_vu32(len(bb))
self.f.write(bb)
def write_vs64(self, x):
bb = signed_leb128_encode(x)
if not len(bb) <= 10:
raise ValueError("Cannot pack {} into 10 bytes".format(x))
self.f.write(bb)
def write_vs32(self, x):
bb = signed_leb128_encode(x)
if not len(bb) <= 5: # 5 = ceil(32/7)
raise ValueError("Cannot pack {} into 5 bytes".format(x))
self.f.write(bb)
def write_vu32(self, x):
bb = unsigned_leb128_encode(x)
assert len(bb) <= 5
self.f.write(bb)
def write_vu7(self, x):
bb = unsigned_leb128_encode(x)
assert len(bb) == 1
self.f.write(bb)
def write_vu1(self, x):
bb = unsigned_leb128_encode(x)
assert len(bb) == 1
self.f.write(bb)
def write_type(self, typ: str):
""" Write type """
self.write(LANG_TYPES[typ])
def write_limits(self, min, max):
if max is None:
self.write(b"\x00")
self.write_vu32(min)
else:
self.write(b"\x01")
self.write_vu32(min)
self.write_vu32(max)
def write_expression(self, expression):
""" Write an expression (a list of instructions) """
for instruction in expression:
instruction._to_writer(self)
# Encode explicit end:
from .components import Instruction
Instruction("end")._to_writer(self)
class FileReader(BaseIoReader):
""" Helper class that can read bytes from a file """
def __init__(self, f):
super().__init__(f)
self._buffer = bytes()
self._pos = 0
def read(self, amount=None):
if amount is not None and amount < 0:
raise ValueError("Cannot read {} bytes".format(amount))
data = self.f.read(amount)
if amount is not None and len(data) != amount:
raise EOFError("Reading beyond end of file")
return data
def read_data(self, amount):
return self.read(amount)
def bytefile(self, f):
b = f.read(1)
while b:
yield b[0]
b = f.read(1)
def __next__(self):
b = self.read(1)
return b[0]
def read_byte(self):
""" Read the value of a single byte """
data = self.read(1)
return data[0]
def read_int(self):
""" Read variable size signed int """
return signed_leb128_decode(self)
def read_uint(self):
""" Read variable size unsigned integer """
return unsigned_leb128_decode(self)
def read_f32(self) -> float:
return self.read_fmt("f")
def read_f64(self) -> float:
return self.read_fmt("d")
def read_u32(self) -> int:
return self.read_fmt("<I")
def read_bytes(self) -> bytes:
""" Read raw bytes data """
amount = self.read_uint()
return self.read(amount)
def read_str(self):
""" Read a string """
data = self.read_bytes()
return data.decode("utf-8")
def read_type(self):
""" Read a wasm type """
tp = self.read_byte()
return LANG_TYPES_REVERSE[tp]
def read_limits(self):
""" Read min and max limits """
mx_present = self.read(1)[0]
assert mx_present in [0, 1]
minimum = self.read_uint()
if mx_present:
maximum = self.read_uint()
else:
maximum = None
return minimum, maximum
def read_expression(self):
""" Read instructions until an end marker is found """
expr = []
blocks = 1
i = self.read_instruction()
# keep track of if/block/loop etc:
if i.opcode == "end":
blocks -= 1
elif i.opcode in ("if", "block", "loop"):
blocks += 1
expr.append(i)
while blocks:
i = self.read_instruction()
# print(i)
if i.opcode == "end":
blocks -= 1
elif i.opcode in ("if", "block", "loop"):
blocks += 1
expr.append(i)
# Strip of last end opcode:
assert expr[-1].opcode == "end"
return expr[:-1]
def read_instruction(self):
""" Read a single instruction """
# TODO: resolve this import hack:
from .components import Instruction
return Instruction(self)
| 27.060606
| 78
| 0.558231
|
9a2d3184b054060728ca03855f5b6c358ec8ecf9
| 12,327
|
py
|
Python
|
clearml/backend_interface/task/log.py
|
manojlds/clearml
|
b21e93272682af99fffc861224f38d65b42c2354
|
[
"Apache-2.0"
] | null | null | null |
clearml/backend_interface/task/log.py
|
manojlds/clearml
|
b21e93272682af99fffc861224f38d65b42c2354
|
[
"Apache-2.0"
] | null | null | null |
clearml/backend_interface/task/log.py
|
manojlds/clearml
|
b21e93272682af99fffc861224f38d65b42c2354
|
[
"Apache-2.0"
] | null | null | null |
import json
import sys
from pathlib2 import Path
from logging import LogRecord, getLogger, basicConfig, getLevelName, INFO, WARNING, Formatter, makeLogRecord, warning
from logging.handlers import BufferingHandler
from six.moves.queue import Queue as TrQueue
from threading import Event as TrEvent
from .development.worker import DevWorker
from ...backend_api.services import events
from ...backend_api.session.session import MaxRequestSizeError
from ...config import config
from ...utilities.process.mp import BackgroundMonitor
from ...utilities.process.mp import SafeQueue as PrQueue, SafeEvent
class BackgroundLogService(BackgroundMonitor):
__max_event_size = 1024 * 1024
def __init__(self, session, wait_period, worker=None, task=None, offline_log_filename=None):
super(BackgroundLogService, self).__init__(task=task, wait_period=wait_period)
self._worker = worker
self._task_id = task.id
self._queue = TrQueue()
self._flush = TrEvent()
self._last_event = None
self._offline_log_filename = offline_log_filename
self.session = session
self.counter = 1
self._last_timestamp = 0
def stop(self):
if isinstance(self._queue, PrQueue):
self._queue.close(self._event)
super(BackgroundLogService, self).stop()
self.flush()
def daemon(self):
# multiple daemons are supported
while not self._event.wait(0):
self._flush.wait(self._wait_timeout)
self._flush.clear()
self.send_all_records()
# flush all leftover events
self.send_all_records()
def _send_events(self, a_request):
if not a_request or not a_request.requests:
return
try:
if self._offline_log_filename:
with open(self._offline_log_filename.as_posix(), 'at') as f:
f.write(json.dumps([b.to_dict() for b in a_request.requests]) + '\n')
return
# if self._thread is None:
# self._log_stderr('Task.close() flushing remaining logs ({})'.format(self.pending))
res = self.session.send(a_request)
if res and not res.ok():
# noinspection PyProtectedMember
TaskHandler._log_stderr("failed logging task to backend ({:d} lines, {})".format(
len(a_request.requests), str(res.meta)), level=WARNING)
except MaxRequestSizeError:
# noinspection PyProtectedMember
TaskHandler._log_stderr("failed logging task to backend ({:d} lines) log size exceeded limit".format(
len(a_request.requests)), level=WARNING)
except Exception as ex:
# noinspection PyProtectedMember
TaskHandler._log_stderr("Retrying, failed logging task to backend ({:d} lines): {}".format(
len(a_request.requests), ex))
# we should push ourselves back into the thread pool
if self._queue:
self._queue.put(a_request)
def set_subprocess_mode(self):
if isinstance(self._queue, TrQueue):
self.send_all_records()
self._queue = PrQueue()
super(BackgroundLogService, self).set_subprocess_mode()
self._flush = SafeEvent()
def add_to_queue(self, record):
self._queue.put(record)
def empty(self):
return self._queue.empty() if self._queue else True
def send_all_records(self):
buffer = []
while self._queue and not self._queue.empty():
# noinspection PyBroadException
try:
request = self._queue.get(block=False)
if request:
buffer.append(request)
except Exception:
break
if buffer:
self._send_records(buffer)
def _record_to_event(self, record):
# type: (LogRecord) -> events.TaskLogEvent
timestamp = int(record.created * 1000)
if timestamp == self._last_timestamp:
timestamp += self.counter
self.counter += 1
else:
self._last_timestamp = timestamp
self.counter = 1
# ignore backspaces (they are often used)
full_msg = record.getMessage().replace('\x08', '')
return_events = []
while full_msg:
msg = full_msg[:self.__max_event_size]
full_msg = full_msg[self.__max_event_size:]
# unite all records in a single second
if self._last_event and timestamp - self._last_event.timestamp < 1000 and \
len(self._last_event.msg) + len(msg) < self.__max_event_size and \
record.levelname.lower() == str(self._last_event.level):
# ignore backspaces (they are often used)
self._last_event.msg += '\n' + msg
continue
# if we have a previous event and it timed out, return it.
new_event = events.TaskLogEvent(
task=self._task_id,
timestamp=timestamp,
level=record.levelname.lower(),
worker=self._worker,
msg=msg
)
if self._last_event:
return_events.append(self._last_event)
self._last_event = new_event
return return_events
def _send_records(self, records):
# if we have previous batch requests first send them
buffer = []
for r in records:
if isinstance(r, events.AddBatchRequest):
self._send_events(r)
else:
buffer.append(r)
# noinspection PyBroadException
try:
record_events = [r for record in buffer for r in self._record_to_event(record)] + [self._last_event]
self._last_event = None
batch_requests = events.AddBatchRequest(requests=[events.AddRequest(e) for e in record_events if e])
self._send_events(batch_requests)
except Exception as ex:
# noinspection PyProtectedMember
TaskHandler._log_stderr(
"{}\nWARNING: trains.log - Failed logging task to backend ({:d} lines)".format(ex, len(buffer)))
def flush(self):
if self.is_alive():
self._flush.set()
class TaskHandler(BufferingHandler):
__flush_max_history_seconds = 30.
__wait_for_flush_timeout = 10.
__once = False
__offline_filename = 'log.jsonl'
@property
def task_id(self):
return self._task_id
@task_id.setter
def task_id(self, value):
self._task_id = value
def __init__(self, task, capacity=None, use_subprocess=False):
capacity = capacity or config.get('log.task_log_buffer_capacity', 100)
super(TaskHandler, self).__init__(capacity)
self.task_id = task.id
self.worker = task.session.worker
self.counter = 0
self._offline_log_filename = None
if task.is_offline():
offline_folder = Path(task.get_offline_mode_folder())
offline_folder.mkdir(parents=True, exist_ok=True)
self._offline_log_filename = offline_folder / self.__offline_filename
self._background_log = BackgroundLogService(
worker=task.session.worker, task=task,
session=task.session, wait_period=DevWorker.report_period,
offline_log_filename=self._offline_log_filename)
self._background_log_size = 0
if use_subprocess:
self._background_log.set_subprocess_mode()
self._background_log.start()
def emit(self, record):
self.counter += 1
if self._background_log:
self._background_log.add_to_queue(record)
self._background_log_size += 1
def shouldFlush(self, record):
"""
Should the handler flush its buffer
Returns true if the buffer is up to capacity. This method can be
overridden to implement custom flushing strategies.
"""
if self._task_id is None:
return False
# if we need to add handlers to the base_logger,
# it will not automatically create stream one when first used, so we must manually configure it.
if not TaskHandler.__once:
base_logger = getLogger()
if len(base_logger.handlers) == 1 and isinstance(base_logger.handlers[0], TaskHandler):
if record.name != 'console' and not record.name.startswith('trains.'):
base_logger.removeHandler(self)
basicConfig()
base_logger.addHandler(self)
TaskHandler.__once = True
else:
TaskHandler.__once = True
# if we passed the max buffer
return (self.counter >= self.capacity and self._background_log and
self._background_log_size >= self.capacity)
def flush(self):
if self._task_id is None:
return
self.counter = 0
if self._background_log:
self._background_log.flush()
self._background_log_size = 0
def close(self, wait=False):
# self._log_stderr('Closing {} wait={}'.format(os.getpid(), wait))
# flush pending logs
if not self._task_id:
return
# avoid deadlocks just skip the lock, we are shutting down anyway
self.lock = None
self._task_id = None
# shut down the TaskHandler, from this point onwards. No events will be logged
_background_log = self._background_log
self._background_log = None
if _background_log:
if not _background_log.is_subprocess() or _background_log.is_alive():
_background_log.stop()
if wait:
# noinspection PyBroadException
try:
timeout = 1. if _background_log.empty() else self.__wait_for_flush_timeout
_background_log.wait(timeout=timeout)
if not _background_log.empty():
self._log_stderr('Flush timeout {}s exceeded, dropping last {} lines'.format(
timeout, self._background_log_size))
# self._log_stderr('Closing {} wait done'.format(os.getpid()))
except Exception:
pass
else:
_background_log.send_all_records()
# call super and remove the handler
super(TaskHandler, self).close()
@classmethod
def report_offline_session(cls, task, folder):
filename = Path(folder) / cls.__offline_filename
if not filename.is_file():
return False
with open(filename.as_posix(), 'rt') as f:
i = 0
while True:
try:
line = f.readline()
if not line:
break
list_requests = json.loads(line)
for r in list_requests:
r.pop('task', None)
i += 1
except StopIteration:
break
except Exception as ex:
warning('Failed reporting log, line {} [{}]'.format(i, ex))
batch_requests = events.AddBatchRequest(
requests=[events.TaskLogEvent(task=task.id, **r) for r in list_requests])
if batch_requests.requests:
res = task.session.send(batch_requests)
if res and not res.ok():
warning("failed logging task to backend ({:d} lines, {})".format(
len(batch_requests.requests), str(res.meta)))
return True
@staticmethod
def _log_stderr(msg, level=INFO):
# output directly to stderr, make sure we do not catch it.
# noinspection PyProtectedMember
write = sys.stderr._original_write if hasattr(sys.stderr, '_original_write') else sys.stderr.write
write('{asctime} - {name} - {levelname} - {message}\n'.format(
asctime=Formatter().formatTime(makeLogRecord({})),
name='trains.log', levelname=getLevelName(level), message=msg))
| 39.509615
| 117
| 0.59682
|
4f1da6677bd84b7cd04ec87514da467638bdd710
| 2,612
|
py
|
Python
|
operators/spin_loop_correlator.py
|
liyang2019/VMC-ISGO
|
842a8826db75b764fc0e02024094badd6211ca2a
|
[
"Apache-2.0"
] | 7
|
2019-08-22T19:10:06.000Z
|
2020-11-24T20:15:37.000Z
|
operators/spin_loop_correlator.py
|
liyang2019/VMC-ISGO
|
842a8826db75b764fc0e02024094badd6211ca2a
|
[
"Apache-2.0"
] | null | null | null |
operators/spin_loop_correlator.py
|
liyang2019/VMC-ISGO
|
842a8826db75b764fc0e02024094badd6211ca2a
|
[
"Apache-2.0"
] | null | null | null |
"""Operators for multi-component spin system loop correlation functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Tuple
import numpy as np
from operators.operator import Operator
class SpinLoopCorrelator(Operator):
def __init__(self, i, j, a=None, b=None, add_sign=False):
"""Initializes an Operator for multi-component spin loop correlator.
The spin correlator is of the form S^{a, b}_i(i...j), where i, j are
site indices, and a, b are spin indices. S^{a, b} is a SU(N) generator
such that S^{a, b}|c> = \delta_{bc}|a>. (i...j) is a loop permutation
operator, such that
(i...j)|...a_ia_{i + 1}...a_j...> = |...a_ja_i...a_{j - 1}...>
Args:
i, j: The site indices.
a, b: The spin indices.
add_sign: If True, add back the 'Fermi Sign'.
"""
self.i = i
self.j = j
self.a = a
self.b = b
self.add_sign = add_sign
def _get_sign(self, state: np.ndarray) -> int:
s = state[self.j]
if s.ndim == 0:
# rescale encoding
if self.i <= self.j:
n_diff = np.sum(state[self.i:self.j] != s)
else:
n_diff = (np.sum(state[:self.j] != s) +
np.sum(state[self.i:] != s))
else:
# onehot encoding
if self.i <= self.j:
n_diff = np.sum(np.any(state[self.i:self.j] != s, axis=-1))
else:
n_diff = (np.sum(np.any(state[:self.j] != s, axis=-1)) +
np.sum(np.any(state[self.i:] != s, axis=-1)))
return 1 if np.mod(n_diff, 2) == 0 else -1
def _loop_permute(self, state: np.ndarray) -> None:
s = state[self.j].copy()
if self.i <= self.j:
state[self.i + 1:self.j + 1] = state[self.i:self.j]
else:
state[1:self.j + 1] = state[:self.j]
state[0] = state[-1]
state[self.i + 1:] = state[self.i:-1]
state[self.i] = s
def find_states(self, state: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
temp = state.copy()
if self.b is not None and np.any(temp[self.j] != self.b):
return temp[None, ...], np.array([0.0])
self._loop_permute(temp)
if self.a is not None and self.b is not None:
temp[self.i] = self.a
return temp[None, ...], np.array([self._get_sign(state)
if self.add_sign else 1])
| 34.826667
| 78
| 0.523354
|
4b40630ab453a99a311b890e97529289ac1a47b1
| 4,433
|
py
|
Python
|
pyOCD/pyDAPAccess/interface/pywinusb_backend.py
|
mesheven/mesh-pyocd-old
|
99ecfeeac95820dacab52a1280b0fba6d4f51fc9
|
[
"Apache-2.0"
] | 1
|
2018-04-17T08:54:33.000Z
|
2018-04-17T08:54:33.000Z
|
pyOCD/pyDAPAccess/interface/pywinusb_backend.py
|
mesheven/mesh-pyocd-old
|
99ecfeeac95820dacab52a1280b0fba6d4f51fc9
|
[
"Apache-2.0"
] | null | null | null |
pyOCD/pyDAPAccess/interface/pywinusb_backend.py
|
mesheven/mesh-pyocd-old
|
99ecfeeac95820dacab52a1280b0fba6d4f51fc9
|
[
"Apache-2.0"
] | null | null | null |
"""
mbed CMSIS-DAP debugger
Copyright (c) 2006-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from interface import Interface
import logging, os, collections
from time import time
try:
import pywinusb.hid as hid
except:
if os.name == "nt":
logging.error("PyWinUSB is required on a Windows Machine")
isAvailable = False
else:
isAvailable = True
class PyWinUSB(Interface):
"""
This class provides basic functions to access
a USB HID device using pywinusb:
- write/read an endpoint
"""
vid = 0
pid = 0
isAvailable = isAvailable
def __init__(self):
super(PyWinUSB, self).__init__()
# Vendor page and usage_id = 2
self.report = []
# deque used here instead of synchronized Queue
# since read speeds are ~10-30% faster and are
# comprable to a based list implmentation.
self.rcv_data = collections.deque()
self.device = None
return
# handler called when a report is received
def rx_handler(self, data):
#logging.debug("rcv: %s", data[1:])
self.rcv_data.append(data[1:])
def open(self):
self.device.set_raw_data_handler(self.rx_handler)
self.device.open(shared=False)
@staticmethod
def getAllConnectedInterface():
"""
returns all the connected CMSIS-DAP devices
"""
all_devices = hid.find_all_hid_devices()
# find devices with good vid/pid
all_mbed_devices = []
for d in all_devices:
if (d.product_name.find("CMSIS-DAP") >= 0):
all_mbed_devices.append(d)
boards = []
for dev in all_mbed_devices:
try:
dev.open(shared=False)
report = dev.find_output_reports()
if (len(report) == 1):
new_board = PyWinUSB()
new_board.report = report[0]
new_board.vendor_name = dev.vendor_name
new_board.product_name = dev.product_name
new_board.serial_number = dev.serial_number
new_board.vid = dev.vendor_id
new_board.pid = dev.product_id
new_board.device = dev
new_board.device.set_raw_data_handler(new_board.rx_handler)
boards.append(new_board)
except Exception as e:
logging.error("Receiving Exception: %s", e)
dev.close()
return boards
def write(self, data):
"""
write data on the OUT endpoint associated to the HID interface
"""
for _ in range(64 - len(data)):
data.append(0)
#logging.debug("send: %s", data)
self.report.send([0] + data)
return
def read(self, timeout=1.0):
"""
read data on the IN endpoint associated to the HID interface
"""
start = time()
while len(self.rcv_data) == 0:
if time() - start > timeout:
# Read operations should typically take ~1-2ms.
# If this exception occurs, then it could indicate
# a problem in one of the following areas:
# 1. Bad usb driver causing either a dropped read or write
# 2. CMSIS-DAP firmware problem cause a dropped read or write
# 3. CMSIS-DAP is performing a long operation or is being
# halted in a debugger
raise Exception("Read timed out")
return self.rcv_data.popleft()
def setPacketCount(self, count):
# No interface level restrictions on count
self.packet_count = count
def getSerialNumber(self):
return self.serial_number
def close(self):
"""
close the interface
"""
logging.debug("closing interface")
self.device.close()
| 31.892086
| 79
| 0.596887
|
f2157e143c3dd62675d413581809201a46201b89
| 5,838
|
py
|
Python
|
mask_green_background_bbox.py
|
imitrob/imitrob_dataset_code
|
ac03e9d32c678408a658e9c00658da701ff68878
|
[
"BSD-3-Clause"
] | null | null | null |
mask_green_background_bbox.py
|
imitrob/imitrob_dataset_code
|
ac03e9d32c678408a658e9c00658da701ff68878
|
[
"BSD-3-Clause"
] | null | null | null |
mask_green_background_bbox.py
|
imitrob/imitrob_dataset_code
|
ac03e9d32c678408a658e9c00658da701ff68878
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Mask out green background and the outside of gluegun 3D bounding box.
Jiri Sedlar, 2019
Intelligent Machine Perception Project (IMPACT)
http://impact.ciirc.cvut.cz/
CIIRC, Czech Technical University in Prague
"""
import numpy as np
import cv2
import skimage.morphology
import matplotlib.pyplot as plt
from PIL import Image
def get_gluegun_bbox_3d():
bbox_3d = np.zeros((8,3), float)
# Bounding box in coordinate system of gluegun endpoint:
# axis x ... forward in direction of the endpoint
# axis y ... down
# axis z ... left
gluegun_length = 0.265
gluegun_width = 0.12
from_endpoint_to_top = 0.135
from_endpoint_to_bottom = 0.18
bbox_x = [0., -gluegun_length]
bbox_y = [from_endpoint_to_bottom, -from_endpoint_to_top]
bbox_z = [gluegun_width / 2., -gluegun_width / 2.]
for i in range(2):
for j in range(2):
for k in range(2):
if j == 0:
bbox_3d[4*i + 2*j + k] = [bbox_x[1-i], bbox_y[1-j], bbox_z[1-k]]
else:
bbox_3d[4*i + 2*j + k] = [bbox_x[1-i], bbox_y[1-j], bbox_z[k]]
return bbox_3d
def get_gluegun_internal_calibration_matrix():
internal_calibration_matrix = np.asarray([[543.5049721588473, 0.0, 324.231500840495], [0.0, 541.6044368823277, 244.3817928406171], [0.0, 0.0, 1.0]], dtype='float32')
return internal_calibration_matrix
def project_points_from_3d_to_2d(points_3d, transformation, internal_calibration_matrix):
points_2d = np.zeros((2, points_3d.shape[1]), dtype='float32')
camera_projection = (internal_calibration_matrix.dot(transformation)).dot(points_3d)
for i in range(2):
points_2d[i, :] = camera_projection[i, :] / camera_projection[2, :]
return points_2d
def crop_hand_by_bbox(rgba, pose, bbox_3d=None, internal_calibration_matrix=None):
if internal_calibration_matrix is None:
internal_calibration_matrix = get_gluegun_internal_calibration_matrix()
if bbox_3d is None:
bbox_3d = get_gluegun_bbox_3d()
bbox_3d_h = np.c_[bbox_3d, np.ones((bbox_3d.shape[0], 1))].transpose()
# column ... 3D homogeneous coordinates of 1 vertex
# rgba = cv2.imread(fname_rgba, -1)
h = np.shape(rgba)[0]
w = np.shape(rgba)[1]
if np.shape(rgba)[2] == 3:
tmp = 255. * np.ones((h, w, np.shape(rgba)[2] + 1))
tmp[:, :, :-1] = rgba
rgba = tmp
# Rt_gt = np.loadtxt(fname_pose, dtype='float32')[:3, :]
Rt_gt = pose.astype(np.float32)
bbox_2d = project_points_from_3d_to_2d(bbox_3d_h, Rt_gt, internal_calibration_matrix)
mask = np.zeros([h, w])
for j in range(len(bbox_2d[0])):
x = int(np.round(bbox_2d[0, j]))
y = int(np.round(bbox_2d[1, j]))
mask[max(0, min(y, h-1)), max(0, min(x, w-1))] = 1
mask = skimage.morphology.convex_hull.convex_hull_image(mask)
mask = np.asarray(mask, float)
mask = cv2.GaussianBlur(mask, (5,5), 0)
rgba[:, :, 3] = mask * rgba[:, :, 3]
return rgba, mask
def rgba_from_rgb_green_bg(rgb, r_minus_g_threshold=0, b_minus_g_threshold=-48):
# rgb = cv2.imread(fname_rgb, 1)
rgb = np.asarray(rgb, float)
r = rgb[:,:,2]
g = rgb[:,:,1]
b = rgb[:,:,0]
gbr = (2. * g - b - r)
gbr = (gbr - np.min(gbr)) / (np.max(gbr) - np.min(gbr))
alpha = gbr
mask = np.zeros_like(g) # 1 .. foreground, 0 .. green background
mask[r - g > r_minus_g_threshold] = 1.
mask[b - g > b_minus_g_threshold] = 1.
kernel_ci = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
kernel_sq = cv2.getStructuringElement(cv2.MORPH_RECT,(3,3))
mask = cv2.dilate(mask, kernel_sq, iterations = 1)
mask = cv2.erode(mask, kernel_ci, iterations = 1)
mask_inside = cv2.erode(mask, kernel_sq, iterations = 1)
mask_boundary = mask - mask_inside
mask = mask_inside + alpha * mask_boundary
mask = cv2.GaussianBlur(mask, (3,3), 0)
rgba = np.zeros([np.shape(rgb)[0], np.shape(rgb)[1], 4])
rgba[:,:,2] = r
rgba[:,:,1] = np.min([g, (r+b)/2], axis=0) # green
rgba[:,:,0] = b
rgba[:,:,3] = 255. * mask # alpha
return rgba, mask
# DEMO:
#fname = 'C2F0000'
#folder = 'LH_T01/'
#fname_rgb = folder + fname + '.png' # TODO: Images/
#fname_pose = folder + fname + '.txt' # TODO: Pose/
##fname_rgb = r'E:\Python 3 projects\tool_recognition_dataset\gun_in_hand_organized\LH\T01\Images\C2F0000.png'
#fname_rgb = r'E:\Python 3 projects\tool_recognition_dataset\LH_T01\C2F0000.png'
#fname_pose = r'E:\Python 3 projects\tool_recognition_dataset\gun_in_hand_organized\LH\T01\Pose\C2F0000.txt'
#
##fname_rgba = folder + fname + '_rgba.png'
##fname_mask = folder + fname + '_mask.png'
#
#rgb_raw = cv2.imread(fname_rgb, 1)
#
#rgba, mask_green = rgba_from_rgb_green_bg(rgb_raw)
#
#plt.imshow(rgba[:, :, :3])
#plt.show()
#
#pose = np.loadtxt(fname_pose, dtype='float32')[:3, :]
#
##cv2.imwrite(fname_rgba, rgba)
#rgba, mask_bbox = crop_hand_by_bbox(rgba, pose)
#
#plt.imshow(rgba[:, :, :3])
#plt.show()
#
##cv2.imwrite(fname_rgba, rgba)
#mask = mask_green * mask_bbox
#
#plt.imshow(mask)
#plt.show()
#
##cv2.imwrite(fname_mask, 255. * mask)
#
#rgb = np.stack([rgba[:, :, 2],rgba[:, :, 1],rgba[:, :, 0]],axis=2).astype(np.uint8)
#
##fname_augmented = folder + fname + '_augm.png'
##color_list = [135, 235, 135] # random background color
#color_list = [255, 0, 0] # random background color
#for i in range(len(color_list)):
# rgb[:, :, i] = mask * rgb[:, :, i] + (1. - mask) * color_list[i]
##cv2.imwrite(fname_augmented, rgba[:, :, :3])
#
##RGB_final = np.stack([rgba[:, :, 2],rgba[:, :, 1],rgba[:, :, 0]],axis=2).astype(np.uint8)
#
#out = Image.fromarray(rgb)
#
#plt.imshow(out)
#plt.show()
#fname_augmented = r'E:\Python 3 projects\tool_recognition_dataset\LH_T01\C2F0000_augumented.png'
#cv2.imwrite(fname_augmented, rgba[:, :, :3])
| 34.140351
| 169
| 0.645769
|
7df252d031f6dd90782b4f7e08141fb598820fc0
| 1,929
|
py
|
Python
|
buildroot/support/testing/tests/download/gitremote.py
|
bramkragten/operating-system
|
27fc2de146f1ef047316a4b58a236c72d26da81c
|
[
"Apache-2.0"
] | 349
|
2021-08-17T08:46:53.000Z
|
2022-03-30T06:25:25.000Z
|
buildroot/support/testing/tests/download/gitremote.py
|
bramkragten/operating-system
|
27fc2de146f1ef047316a4b58a236c72d26da81c
|
[
"Apache-2.0"
] | 8
|
2020-04-02T22:51:47.000Z
|
2020-04-27T03:24:55.000Z
|
buildroot/support/testing/tests/download/gitremote.py
|
bramkragten/operating-system
|
27fc2de146f1ef047316a4b58a236c72d26da81c
|
[
"Apache-2.0"
] | 12
|
2021-08-17T20:10:30.000Z
|
2022-01-06T10:52:54.000Z
|
# subprocess does not kill the child daemon when a test case fails by raising
# an exception. So use pexpect instead.
import infra
import pexpect
GIT_REMOTE_PORT_INITIAL = 9418
GIT_REMOTE_PORT_LAST = GIT_REMOTE_PORT_INITIAL + 99
class GitRemote(object):
def __init__(self, builddir, serveddir, logtofile):
"""
Start a local git server.
In order to support test cases in parallel, select the port the
server will listen to in runtime. Since there is no reliable way
to allocate the port prior to starting the server (another
process in the host machine can use the port between it is
selected from a list and it is really allocated to the server)
try to start the server in a port and in the case it is already
in use, try the next one in the allowed range.
"""
self.daemon = None
self.port = None
self.logfile = infra.open_log_file(builddir, "gitremote", logtofile)
daemon_cmd = ["git", "daemon", "--reuseaddr", "--verbose",
"--listen=localhost", "--export-all",
"--base-path={}".format(serveddir)]
for port in range(GIT_REMOTE_PORT_INITIAL, GIT_REMOTE_PORT_LAST + 1):
cmd = daemon_cmd + ["--port={port}".format(port=port)]
self.logfile.write("> starting git remote with '{}'\n".format(" ".join(cmd)))
self.daemon = pexpect.spawn(cmd[0], cmd[1:], logfile=self.logfile,
encoding='utf-8')
ret = self.daemon.expect(["Ready to rumble",
"Address already in use"])
if ret == 0:
self.port = port
return
raise SystemError("Could not find a free port to run git remote")
def stop(self):
if self.daemon is None:
return
self.daemon.terminate(force=True)
| 40.1875
| 89
| 0.599793
|
02fc22792d645318532b986f768c9560d8558446
| 15,521
|
py
|
Python
|
prass.py
|
tp7/Prass
|
e8e5b5de530dd542f55fd07bee649e7171ef488d
|
[
"MIT"
] | 50
|
2015-01-07T07:25:31.000Z
|
2021-12-03T15:17:58.000Z
|
prass.py
|
tp7/Prass
|
e8e5b5de530dd542f55fd07bee649e7171ef488d
|
[
"MIT"
] | 6
|
2015-10-20T07:23:04.000Z
|
2021-01-08T15:12:42.000Z
|
prass.py
|
tp7/Prass
|
e8e5b5de530dd542f55fd07bee649e7171ef488d
|
[
"MIT"
] | 7
|
2016-03-20T09:34:42.000Z
|
2021-03-13T00:56:49.000Z
|
#!/usr/bin/env python2
import click
import sys
from operator import attrgetter
from common import PrassError, zip, map
from subs import AssScript
from tools import Timecodes, parse_keyframes
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
def parse_fps_string(fps_string):
if '/' in fps_string:
parts = fps_string.split('/')
if len(parts) > 2:
raise PrassError('Invalid fps value')
try:
return float(parts[0]) / float(parts[1])
except ValueError:
raise PrassError('Invalid fps value')
else:
try:
return float(fps_string)
except ValueError:
raise PrassError('Invalid fps value')
def parse_shift_string(shift_string):
try:
if ':' in shift_string:
negator = 1
if shift_string.startswith('-'):
negator = -1
shift_string = shift_string[1:]
parts = list(map(float, shift_string.split(':')))
if len(parts) > 3:
raise PrassError("Invalid shift value: '{0}'".format(shift_string))
shift_seconds = sum(part * multiplier for part, multiplier in zip(reversed(parts), (1.0, 60.0, 3600.0)))
return shift_seconds * 1000 * negator # convert to ms
else:
if shift_string.endswith("ms"):
return float(shift_string[:-2])
elif shift_string.endswith("s"):
return float(shift_string[:-1]) * 1000
else:
return float(shift_string) * 1000
except ValueError:
raise PrassError("Invalid shift value: '{0}'".format(shift_string))
def parse_resolution_string(resolution_string):
if resolution_string == '720p':
return 1280,720
if resolution_string == '1080p':
return 1920,1080
for separator in (':', 'x', ","):
if separator in resolution_string:
width, _, height = resolution_string.partition(separator)
try:
return int(width), int(height)
except ValueError:
raise PrassError("Invalid resolution string: '{0}'".format(resolution_string))
raise PrassError("Invalid resolution string: '{0}'".format(resolution_string))
@click.group(context_settings=CONTEXT_SETTINGS)
def cli():
pass
@cli.command("convert-srt", short_help="convert srt subtitles to ass")
@click.option("-o", "--output", "output_file", default='-', type=click.File(encoding="utf-8-sig", mode='w'))
@click.option("--encoding", "encoding", default='utf-8-sig', help="Encoding to use for the input SRT file")
@click.argument("input_path", type=click.Path(exists=True, dir_okay=False, allow_dash=True))
def convert_srt(input_path, output_file, encoding):
"""Convert SRT script to ASS.
\b
Example:
$ prass convert-srt input.srt -o output.ass --encoding cp1251
"""
try:
with click.open_file(input_path, encoding=encoding) as input_file:
AssScript.from_srt_stream(input_file).to_ass_stream(output_file)
except LookupError:
raise PrassError("Encoding {0} doesn't exist".format(encoding))
@cli.command('copy-styles', short_help="copy styles from one ass script to another")
@click.option("-o", "--output", "output_file", default="-", type=click.File(encoding="utf-8-sig", mode='w'))
@click.option('--to', 'dst_file', required=True, type=click.File(encoding='utf-8-sig', mode='r'),
help="File to copy the styles to")
@click.option('--from', 'src_file', required=True, type=click.File(encoding='utf-8-sig', mode='r'),
help="File to take the styles from")
@click.option('--clean', default=False, is_flag=True,
help="Remove all older styles in the destination file")
@click.option('--resample/--no-resample', 'resample', default=True,
help="Resample style resolution to match output script when possible")
@click.option('--resolution', 'forced_resolution', default=None, help="Assume resolution of the destination file")
def copy_styles(dst_file, src_file, output_file, clean, resample, forced_resolution):
"""Copy styles from one ASS script to another, write the result as a third script.
You always have to provide the "from" argument, "to" defaults to stdin and "output" defaults to stdout.
\b
Simple usage:
$ prass copy-styles --from template.ass --to unstyled.ass -o styled.ass
With pipes:
$ cat unstyled.ass | prass copy-styles --from template.ass | prass cleanup --comments -o out.ass
"""
src_script = AssScript.from_ass_stream(src_file)
dst_script = AssScript.from_ass_stream(dst_file)
if forced_resolution:
forced_resolution = parse_resolution_string(forced_resolution)
dst_script.append_styles(src_script, clean, resample, forced_resolution)
dst_script.to_ass_stream(output_file)
@cli.command('sort', short_help="sort ass script events")
@click.option("-o", "--output", "output_file", default='-', type=click.File(encoding="utf-8-sig", mode='w'), metavar="<path>")
@click.argument("input_file", type=click.File(encoding="utf-8-sig"))
@click.option('--by', 'sort_by', multiple=True, default=['start'], help="Parameter to sort by",
type=click.Choice(['time', 'start', 'end', 'style', 'actor', 'effect', 'layer']))
@click.option('--desc', 'descending', default=False, is_flag=True, help="Descending order")
def sort_script(input_file, output_file, sort_by, descending):
"""Sort script by one or more parameters.
\b
Sorting by time:
$ prass sort input.ass --by time -o output.ass
Sorting by time and then by layer, both in descending order:
$ prass sort input.ass --by time --by layer --desc -o output.ass
"""
script = AssScript.from_ass_stream(input_file)
attrs_map = {
"start": "start",
"time": "start",
"end": "end",
"style": "style",
"actor": "actor",
"effect": "effect",
"layer": "layer"
}
getter = attrgetter(*[attrs_map[x] for x in sort_by])
script.sort_events(getter, descending)
script.to_ass_stream(output_file)
@cli.command('tpp', short_help="timing post-processor")
@click.option("-o", "--output", "output_file", default='-', type=click.File(encoding="utf-8-sig", mode='w'), metavar="<path>")
@click.argument("input_file", type=click.File(encoding="utf-8-sig"))
@click.option("-s", "--style", "styles", multiple=True, metavar="<names>",
help="Style names to process. All by default. Use comma to separate, or supply it multiple times")
@click.option("--lead-in", "lead_in", default=0, type=int, metavar="<ms>",
help="Lead-in value in milliseconds")
@click.option("--lead-out", "lead_out", default=0, type=int, metavar="<ms>",
help="Lead-out value in milliseconds")
@click.option("--overlap", "max_overlap", default=0, type=int, metavar="<ms>",
help="Maximum overlap for two lines to be made continuous, in milliseconds")
@click.option("--gap", "max_gap", default=0, type=int, metavar="<ms>",
help="Maximum gap between two lines to be made continuous, in milliseconds")
@click.option("--bias", "adjacent_bias", default=50, type=click.IntRange(0, 100), metavar="<percent>",
help="How to set the adjoining of lines. "
"0 - change start time of the second line, 100 - end time of the first line. "
"Values from 0 to 100 allowed.")
@click.option("--keyframes", "keyframes_path", type=click.Path(exists=True, readable=True, dir_okay=False), metavar="<path>",
help="Path to keyframes file")
@click.option("--timecodes", "timecodes_path", type=click.Path(readable=True, dir_okay=False), metavar="<path>",
help="Path to timecodes file")
@click.option("--fps", "fps", metavar="<float>",
help="Fps provided as decimal or proper fraction, in case you don't have timecodes")
@click.option("--kf-before-start", default=0, type=float, metavar="<ms>",
help="Max distance between a keyframe and event start for it to be snapped, when keyframe is placed before the event")
@click.option("--kf-after-start", default=0, type=float, metavar="<ms>",
help="Max distance between a keyframe and event start for it to be snapped, when keyframe is placed after the start time")
@click.option("--kf-before-end", default=0, type=float, metavar="<ms>",
help="Max distance between a keyframe and event end for it to be snapped, when keyframe is placed before the end time")
@click.option("--kf-after-end", default=0, type=float, metavar="<ms>",
help="Max distance between a keyframe and event end for it to be snapped, when keyframe is placed after the event")
def tpp(input_file, output_file, styles, lead_in, lead_out, max_overlap, max_gap, adjacent_bias,
keyframes_path, timecodes_path, fps, kf_before_start, kf_after_start, kf_before_end, kf_after_end):
"""Timing post-processor.
It's a pretty straightforward port from Aegisub so you should be familiar with it.
You have to specify keyframes and timecodes (either as a CFR value or a timecodes file) if you want keyframe snapping.
All parameters default to zero so if you don't want something - just don't put it in the command line.
\b
To add lead-in and lead-out:
$ prass tpp input.ass --lead-in 50 --lead-out 150 -o output.ass
To make adjacent lines continuous, with 80% bias to changing end time of the first line:
$ prass tpp input.ass --overlap 50 --gap 200 --bias 80 -o output.ass
To snap events to keyframes without a timecodes file:
$ prass tpp input.ass --keyframes kfs.txt --fps 23.976 --kf-before-end 150 --kf-after-end 150 --kf-before-start 150 --kf-after-start 150 -o output.ass
"""
if fps and timecodes_path:
raise PrassError('Timecodes file and fps cannot be specified at the same time')
if fps:
timecodes = Timecodes.cfr(parse_fps_string(fps))
elif timecodes_path:
timecodes = Timecodes.from_file(timecodes_path)
elif any((kf_before_start, kf_after_start, kf_before_end, kf_after_end)):
raise PrassError('You have to provide either fps or timecodes file for keyframes processing')
else:
timecodes = None
if timecodes and not keyframes_path:
raise PrassError('You have to specify keyframes file for keyframes processing')
keyframes_list = parse_keyframes(keyframes_path) if keyframes_path else None
actual_styles = []
for style in styles:
actual_styles.extend(x.strip() for x in style.split(','))
script = AssScript.from_ass_stream(input_file)
script.tpp(actual_styles, lead_in, lead_out, max_overlap, max_gap, adjacent_bias,
keyframes_list, timecodes, kf_before_start, kf_after_start, kf_before_end, kf_after_end)
script.to_ass_stream(output_file)
@cli.command("cleanup", short_help="remove useless data from ass scripts")
@click.option("-o", "--output", "output_file", default='-', type=click.File(encoding="utf-8-sig", mode='w'), metavar="<path>")
@click.argument("input_file", type=click.File(encoding="utf-8-sig"))
@click.option("--comments", "drop_comments", default=False, is_flag=True,
help="Remove commented lines")
@click.option("--empty-lines", "drop_empty_lines", default=False, is_flag=True,
help="Remove empty lines")
@click.option("--styles", "drop_unused_styles", default=False, is_flag=True,
help="Remove unused styles")
@click.option("--actors", "drop_actors", default=False, is_flag=True,
help="Remove actor field")
@click.option("--effects", "drop_effects", default=False, is_flag=True,
help="Remove effects field")
@click.option("--spacing", "drop_spacing", default=False, is_flag=True,
help="Removes double spacing and newlines")
@click.option("--sections", "drop_sections", type=click.Choice(["fonts", "graphics", "aegi", "extradata"]), multiple=True,
help="Remove optional sections from the script")
def cleanup(input_file, output_file, drop_comments, drop_empty_lines, drop_unused_styles,
drop_actors, drop_effects, drop_spacing, drop_sections):
"""Remove junk data from ASS script
\b
To remove commented and empty lines plus clear unused styles:
$ prass cleanup input.ass --comments --empty-lines --styles output.ass
"""
sections_map = {
"fonts": "[Fonts]",
"graphics": "[Graphics]",
"aegi": "[Aegisub Project Garbage]",
"extradata": "[Aegisub Extradata]"
}
drop_sections = [sections_map[x] for x in drop_sections]
script = AssScript.from_ass_stream(input_file)
script.cleanup(drop_comments, drop_empty_lines, drop_unused_styles, drop_actors, drop_effects, drop_spacing, drop_sections)
script.to_ass_stream(output_file)
@cli.command("shift", short_help="shift start or end times of every event")
@click.option("-o", "--output", "output_file", default='-', type=click.File(encoding="utf-8-sig", mode='w'), metavar="<path>")
@click.argument("input_file", type=click.File(encoding="utf-8-sig"))
@click.option("--by", "shift_by", required=False, default="0", metavar="<time>",
help="Time to shift. Might be negative. 10.5s, 150ms or 1:12.23 formats are allowed, seconds assumed by default")
@click.option("--start", "shift_start", default=False, is_flag=True, help="Shift only start time")
@click.option("--end", "shift_end", default=False, is_flag=True, help="Shift only end time")
@click.option("--multiplier", "multiplier", default="1",
help="Multiplies timings by the value to change speed. Value is a decimal or proper fraction")
def shift(input_file, output_file, shift_by, shift_start, shift_end, multiplier):
"""Shift all lines in a script by defined amount and/or change speed.
\b
You can use one of the following formats to specify the time for shift:
- "1.5s" or just "1.5" means 1 second 500 milliseconds
- "150ms" means 150 milliseconds
- "1:7:12.55" means 1 hour, 7 minutes, 12 seconds and 550 milliseconds. All parts are optional.
Every format allows a negative sign before the value, which means "shift back", like "-12s"
\b
Optionally, specify multiplier to change speed:
- 1.2 makes subs 20% faster
- 7/8 makes subs 12.5% slower
\b
To shift both start end end time by one minute and 15 seconds:
$ prass shift input.ass --by 1:15 -o output.ass
To shift only start time by half a second back:
$ prass shift input.ass --start --by -0.5s -o output.ass
"""
if not shift_start and not shift_end:
shift_start = shift_end = True
shift_ms = parse_shift_string(shift_by)
multiplier = parse_fps_string(multiplier)
if multiplier<0:
raise PrassError('Speed multiplier should be a positive number')
script = AssScript.from_ass_stream(input_file)
script.shift(shift_ms, shift_start, shift_end, multiplier)
script.to_ass_stream(output_file)
if __name__ == '__main__':
default_map = {}
if not sys.stdin.isatty():
for command, arg_name in (("convert-srt", "input_path"), ("copy-styles", "dst_file"),
("sort", "input_file"), ("tpp", "input_file"), ("cleanup", "input_file"),
('shift', "input_file")):
default_map[command] = {arg_name: '-'}
cli(default_map=default_map)
| 49.746795
| 154
| 0.667934
|
453b808f95410145b7432fec2064281ef7da7c51
| 2,341
|
py
|
Python
|
tests/test_detection_coco.py
|
czhu12/brambox
|
eedcebd6781e7efcda188533fd22924bb128e89c
|
[
"MIT"
] | 3
|
2018-09-25T14:51:19.000Z
|
2020-02-06T15:52:46.000Z
|
tests/test_detection_coco.py
|
czhu12/brambox
|
eedcebd6781e7efcda188533fd22924bb128e89c
|
[
"MIT"
] | null | null | null |
tests/test_detection_coco.py
|
czhu12/brambox
|
eedcebd6781e7efcda188533fd22924bb128e89c
|
[
"MIT"
] | 1
|
2018-11-21T08:10:37.000Z
|
2018-11-21T08:10:37.000Z
|
import unittest
from brambox.boxes.detections.detection import Detection
from brambox.boxes.detections import CocoDetection, CocoParser
class TestCocoDetection(unittest.TestCase):
def setUp(self):
self.det = CocoDetection()
def tearDown(self):
pass
def test_det_deserialize(self):
""" test if deserialization of one detection works """
self.det.deserialize({"image_id": "V000/I00019.png",
"category_id": 1,
"bbox": [506.547791, 216.665741, 20.434814, 39.914307],
"score": 0.436614},
None)
self.assertAlmostEqual(self.det.x_top_left, 506.547791)
self.assertAlmostEqual(self.det.y_top_left, 216.665741)
self.assertAlmostEqual(self.det.width, 20.434814)
self.assertAlmostEqual(self.det.height, 39.914307)
self.assertAlmostEqual(self.det.confidence, 0.436614)
json_string = """[
{"image_id":"img_1", "category_id":1, "bbox":[506.547791, 216.665741, 20.434814, 39.914307], "score":0.436614},
{"image_id":"img_2", "category_id":1, "bbox":[72.131500, 207.804199, 32.555908, 63.634766], "score":0.125948},
{"image_id":"img_2", "category_id":2, "bbox":[73.131500, 207.804199, 33.555908, 64.634766], "score":0.56983}
]"""
class TestCocoParser(unittest.TestCase):
def setUp(self):
self.parser = CocoParser(class_label_map=None)
def tearDown(self):
pass
def test_parser_deserialize(self):
""" test basic deserialization with parser """
obj = self.parser.deserialize(json_string)
self.assertEqual(type(obj), dict)
self.assertEqual(type(obj['img_1']), list)
self.assertEqual(len(obj['img_1']), 1)
self.assertEqual(len(obj['img_2']), 2)
self.assertEqual(obj['img_1'][0].class_label, '1')
self.assertEqual(obj['img_1'][0].confidence, 0.436614)
def test_parser_deserialize_class_label_map(self):
""" test class label mapping with deserialize """
self.parser = CocoParser(class_label_map=['person', 'car'])
obj = self.parser.deserialize(json_string)
self.assertEqual(obj['img_1'][0].class_label, 'person')
self.assertEqual(obj['img_2'][1].class_label, 'car')
if __name__ == '__main__':
unittest.main()
| 37.758065
| 111
| 0.63947
|
d1dc66e1c60d4e9d75d4db6791a926d7f1e665f6
| 1,086
|
py
|
Python
|
CholletDL/chapter3/simple_dense_layer.py
|
Zilleplus/MachineLearning
|
13e4fe996386d3f66b7866cc133ae9b26a6333d6
|
[
"MIT"
] | null | null | null |
CholletDL/chapter3/simple_dense_layer.py
|
Zilleplus/MachineLearning
|
13e4fe996386d3f66b7866cc133ae9b26a6333d6
|
[
"MIT"
] | null | null | null |
CholletDL/chapter3/simple_dense_layer.py
|
Zilleplus/MachineLearning
|
13e4fe996386d3f66b7866cc133ae9b26a6333d6
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import tensorflow.keras as keras
class SimpleDense(keras.layers.Layer):
def __init__(self, units, activation=None):
super().__init__()
self.units = units
self.activation = activation
def build(self, input_shape):
input_dim = input_shape[-1]
self.W = self.add_weight(shape=(input_dim, self.units),
initializer="random_normal")
self.b = self.add_weight(shape=(self.units),
initializer="zeros")
def call(self, inputs):
y = tf.matmul(inputs, self.W) + self.b
if self.activation is not None:
y = self.activation(y)
return y
my_dense = SimpleDense(units=32, activation=tf.nn.relu)
input_tensor = tf.ones(shape=(2, 784))
output_tensor = my_dense(input_tensor)
print(output_tensor.shape)
model = keras.Sequential([
SimpleDense(units=32, activation="relu"),
SimpleDense(units=64, activation="relu"),
SimpleDense(units=32, activation="relu"),
SimpleDense(units=10, activation="relu")])
| 30.166667
| 63
| 0.634438
|
5806cc9c13e1e111941ba4d642f45d80c5291ef1
| 1,115
|
py
|
Python
|
commpy/examples/wifi80211_conv_encode_decode.py
|
goodcq/CommPy
|
af3a9acba32d2f9c6b723705f709fee2cb9352e2
|
[
"BSD-3-Clause"
] | 2
|
2018-11-18T22:10:49.000Z
|
2019-07-12T08:35:24.000Z
|
commpy/examples/wifi80211_conv_encode_decode.py
|
goodcq/CommPy
|
af3a9acba32d2f9c6b723705f709fee2cb9352e2
|
[
"BSD-3-Clause"
] | null | null | null |
commpy/examples/wifi80211_conv_encode_decode.py
|
goodcq/CommPy
|
af3a9acba32d2f9c6b723705f709fee2cb9352e2
|
[
"BSD-3-Clause"
] | 1
|
2020-10-13T10:33:23.000Z
|
2020-10-13T10:33:23.000Z
|
# Authors: CommPy contributors
# License: BSD 3-Clause
import math
import matplotlib.pyplot as plt
import numpy as np
import commpy.channels as chan
# ==================================================================================================
# Complete example using Commpy Wifi 802.11 physical parameters
# ==================================================================================================
from commpy.wifi80211 import Wifi80211
# AWGN channel
channels = chan.SISOFlatChannel(None, (1 + 0j, 0j))
w2 = Wifi80211(mcs=2)
w3 = Wifi80211(mcs=3)
# SNR range to test
SNRs2 = np.arange(0, 6) + 10 * math.log10(w2.get_modem().num_bits_symbol)
SNRs3 = np.arange(0, 6) + 10 * math.log10(w3.get_modem().num_bits_symbol)
BERs_mcs2 = w2.link_performance(channels, SNRs2, 10, 10, 600, stop_on_surpass_error=False)
BERs_mcs3 = w3.link_performance(channels, SNRs3, 10, 10, 600, stop_on_surpass_error=False)
# Test
plt.semilogy(SNRs2, BERs_mcs2, 'o-', SNRs3, BERs_mcs3, 'o-')
plt.grid()
plt.xlabel('Signal to Noise Ration (dB)')
plt.ylabel('Bit Error Rate')
plt.legend(('MCS 2', 'MCS 3'))
plt.show()
| 31.857143
| 100
| 0.605381
|
5a6103403f7aefcda026a20baffe8ebd9c8270b3
| 4,513
|
py
|
Python
|
vpip/dependency.py
|
jayvdb/vpip
|
fd3a11c525a5b917ac2332458615db80d6ca0642
|
[
"MIT"
] | null | null | null |
vpip/dependency.py
|
jayvdb/vpip
|
fd3a11c525a5b917ac2332458615db80d6ca0642
|
[
"MIT"
] | null | null | null |
vpip/dependency.py
|
jayvdb/vpip
|
fd3a11c525a5b917ac2332458615db80d6ca0642
|
[
"MIT"
] | null | null | null |
import configparser
import re
from pathlib import Path
from configupdater import ConfigUpdater
from pkg_resources import parse_requirements
def get_dev_requires():
return parse_requirements(DevUpdater().get_requirements())
def get_prod_requires():
return parse_requirements(ProdUpdater().get_requirements())
class Updater:
"""Dependency updater interface. Extend this class to create a new updater.
"""
def get_requirements(self):
"""Get requirements string.
:rtype: str
"""
raise NotImplementedError
def get_spec(self, name, version):
"""Get version specifier.
:arg str name: Installed package name.
:arg str version: Installed pacakge version.
:return: Version specifier e.g. ``"foo==0.1.0"``
:rtype: str
"""
raise NotImplementedError
def write_requirements(self, lines):
"""Write new requirements to file.
:arg list[str] line: Lines of requirements.
"""
raise NotImplementedError
class DevUpdater(Updater):
"""Development dependency (requirements.txt) updater."""
def __init__(self):
self.file = Path("requirements.txt")
def get_requirements(self):
try:
return self.file.read_text("utf8")
except OSError:
pass
return ""
def get_spec(self, name, version):
return "{}=={}".format(name, version)
def write_requirements(self, lines):
with self.file.open("w", encoding="utf8") as f:
for line in lines:
f.write(line + "\n")
class ProdUpdater(Updater):
"""Production dependency (setup.cfg) updater."""
def __init__(self):
self.file = Path("setup.cfg")
self.config = ConfigUpdater()
self.indent = None
def read(self):
try:
text = self.file.read_text("utf8")
self.indent = detect_indent(text)
self.config.read_string(text)
except OSError:
pass
def get_requirements(self):
self.read()
try:
return self.config.get("options", "install_requires").value
except configparser.Error:
pass
return ""
def get_name(self):
self.read()
return self.config.get("metadata", "name").value
def get_spec(self, name, version):
if not version.startswith("0."):
version = re.match("\d+\.\d+", version).group()
return "{}~={}".format(name, version)
def write_requirements(self, lines):
if "options" not in self.config:
self.config.add_section("options")
self.config.set("options", "install_requires", "".join(
"\n" + self.indent + l for l in lines))
self.file.write_text(str(self.config).replace("\r", ""), "utf8")
def update_dependency(updater, added=None, removed=None):
"""Update dependency and save.
:arg Updater updater: An Updater instance.
:arg dict added: A ``pkg_name -> version`` map. Added packages.
:arg list[str] removed: A list of package name. Removed packages.
"""
added = added or {}
removed = set(removed or [])
output = []
dirty = False
for require in parse_requirements(updater.get_requirements()):
if require.name in added:
dirty = True
version = added.pop(require.name)
spec = updater.get_spec(require.name, version)
if require.marker:
spec += ";{}".format(require.marker)
output.append(spec)
elif require.name in removed:
dirty = True
else:
output.append(str(require))
for name, version in added.items():
dirty = True
output.append(updater.get_spec(name, version))
if dirty:
output.sort()
updater.write_requirements(output)
def add_dev(packages):
update_dependency(DevUpdater(), added=packages)
def add_prod(packages):
update_dependency(ProdUpdater(), added=packages)
def delete(packages):
update_dependency(DevUpdater(), removed=packages)
update_dependency(ProdUpdater(), removed=packages)
def detect_indent(text):
for line in text.split("\n"):
match = re.match("(\s+)\S", line)
if match:
return match.group(1)
return None
| 30.493243
| 79
| 0.587414
|
1385f8ae014034952718ab59153ec625735f674f
| 906
|
py
|
Python
|
ping.py
|
bechynsky/bc-sigfox-nopower-alarm
|
29d925a5286d3b47f6f7e1317e43b5b1e141ebed
|
[
"MIT"
] | 1
|
2019-01-15T22:44:27.000Z
|
2019-01-15T22:44:27.000Z
|
ping.py
|
bechynsky/bc-sigfox-nopower-alarm
|
29d925a5286d3b47f6f7e1317e43b5b1e141ebed
|
[
"MIT"
] | null | null | null |
ping.py
|
bechynsky/bc-sigfox-nopower-alarm
|
29d925a5286d3b47f6f7e1317e43b5b1e141ebed
|
[
"MIT"
] | null | null | null |
import http.client
import serial
import time
# Configuration
PORT_NAME = "COM9"
WEB_URL = "www.python.org"
# Connect to serial port
device = serial.Serial(PORT_NAME, 115200)
def deviceUpdate(updateCode):
device.write(updateCode)
device.flush()
print(updateCode)
# simple chcek if server responding
def ping():
output = ""
try:
conn = http.client.HTTPSConnection(WEB_URL)
conn.request("HEAD", "/")
r1 = conn.getresponse()
if (r1.status == 200 or r1.status == 204):
output = b'o' # 0x6f
except:
output = b'e' # 0x65
return output
def main():
try:
while True:
r = ping()
deviceUpdate(r)
time.sleep(60)
except KeyboardInterrupt:
pass
finally:
device.close()
if __name__ == "__main__":
main()
| 19.695652
| 52
| 0.555188
|
2366b5c4f73dd00cc3e5e37c9740283737f617e7
| 77
|
py
|
Python
|
utils/measure_v2/light_controller/const.py
|
smonesi/homeassistant-powercalc
|
a5ba97836c03e4c09fa1a0f1b8d7bf32dd56e8ad
|
[
"MIT"
] | null | null | null |
utils/measure_v2/light_controller/const.py
|
smonesi/homeassistant-powercalc
|
a5ba97836c03e4c09fa1a0f1b8d7bf32dd56e8ad
|
[
"MIT"
] | null | null | null |
utils/measure_v2/light_controller/const.py
|
smonesi/homeassistant-powercalc
|
a5ba97836c03e4c09fa1a0f1b8d7bf32dd56e8ad
|
[
"MIT"
] | null | null | null |
MODE_HS = "hs"
MODE_COLOR_TEMP = "color_temp"
MODE_BRIGHTNESS = "brightness"
| 19.25
| 30
| 0.766234
|
faa8028bea404c59d25a6237d98d80586cdc6afd
| 32,355
|
py
|
Python
|
Chapter06/SHAP_ERRATA/_explanation.py
|
PacktPublishing/Applied-Machine-Learning-Explainability-Techniques
|
13db7d78421e7370a455f97590df3a6c60f0c630
|
[
"MIT"
] | 16
|
2021-12-13T02:12:47.000Z
|
2022-03-24T16:08:20.000Z
|
Chapter06/SHAP_ERRATA/_explanation.py
|
PacktPublishing/Applied-Machine-Learning-Explainability-Techniques
|
13db7d78421e7370a455f97590df3a6c60f0c630
|
[
"MIT"
] | null | null | null |
Chapter06/SHAP_ERRATA/_explanation.py
|
PacktPublishing/Applied-Machine-Learning-Explainability-Techniques
|
13db7d78421e7370a455f97590df3a6c60f0c630
|
[
"MIT"
] | 7
|
2021-12-27T08:40:52.000Z
|
2022-03-25T02:22:50.000Z
|
import pandas as pd
import numpy as np
import scipy as sp
import sys
import warnings
import copy
import operator
import sklearn
from slicer import Slicer, Alias, Obj
# from ._order import Order
from .utils._general import OpChain
# slicer confuses pylint...
# pylint: disable=no-member
op_chain_root = OpChain("shap.Explanation")
class MetaExplanation(type):
""" This metaclass exposes the Explanation object's methods for creating template op chains.
"""
def __getitem__(cls, item):
return op_chain_root.__getitem__(item)
@property
def abs(cls):
""" Element-wize absolute value op.
"""
return op_chain_root.abs
@property
def identity(cls):
""" A no-op.
"""
return op_chain_root.identity
@property
def argsort(cls):
""" Numpy style argsort.
"""
return op_chain_root.argsort
@property
def sum(cls):
""" Numpy style sum.
"""
return op_chain_root.sum
@property
def max(cls):
""" Numpy style max.
"""
return op_chain_root.max
@property
def min(cls):
""" Numpy style min.
"""
return op_chain_root.min
@property
def mean(cls):
""" Numpy style mean.
"""
return op_chain_root.mean
@property
def sample(cls):
""" Numpy style sample.
"""
return op_chain_root.sample
@property
def hclust(cls):
""" Hierarchial clustering op.
"""
return op_chain_root.hclust
class Explanation(metaclass=MetaExplanation):
""" A slicable set of parallel arrays representing a SHAP explanation.
"""
def __init__( # pylint: disable=too-many-arguments
self,
values,
base_values=None,
data=None,
display_data=None,
instance_names=None,
feature_names=None,
output_names=None,
output_indexes=None,
lower_bounds=None,
upper_bounds=None,
error_std=None,
main_effects=None,
hierarchical_values=None,
clustering=None,
compute_time=None
):
self.op_history = []
self.compute_time = compute_time
# cloning. TODOsomeday: better cloning :)
if issubclass(type(values), Explanation):
e = values
values = e.values
base_values = e.base_values
data = e.data
self.output_dims = compute_output_dims(values, base_values, data, output_names)
values_shape = _compute_shape(values)
if output_names is None and len(self.output_dims) == 1:
output_names = [f"Output {i}" for i in range(values_shape[self.output_dims[0]])]
if len(_compute_shape(feature_names)) == 1: # TODOsomeday: should always be an alias once slicer supports per-row aliases
if len(values_shape) >= 1 and len(feature_names) == values_shape[0]:
feature_names = Alias(list(feature_names), 0)
elif len(values_shape) >= 2 and len(feature_names) == values_shape[1]:
feature_names = Alias(list(feature_names), 1)
if len(_compute_shape(output_names)) == 1: # TODOsomeday: should always be an alias once slicer supports per-row aliases
output_names = Alias(list(output_names), self.output_dims[0])
# if len(values_shape) >= 1 and len(output_names) == values_shape[0]:
# output_names = Alias(list(output_names), 0)
# elif len(values_shape) >= 2 and len(output_names) == values_shape[1]:
# output_names = Alias(list(output_names), 1)
if output_names is not None and not isinstance(output_names, Alias):
l = len(_compute_shape(output_names))
if l == 0:
pass
elif l == 1:
output_names = Obj(output_names, self.output_dims)
elif l == 2:
output_names = Obj(output_names, [0] + list(self.output_dims))
else:
raise ValueError("shap.Explanation does not yet support output_names of order greater than 3!")
if not hasattr(base_values, "__len__") or len(base_values) == 0:
pass
elif len(_compute_shape(base_values)) == len(self.output_dims):
base_values = Obj(base_values, list(self.output_dims))
else:
base_values = Obj(base_values, [0] + list(self.output_dims))
self._s = Slicer(
values=values,
base_values=base_values,
data=list_wrap(data),
display_data=list_wrap(display_data),
instance_names=None if instance_names is None else Alias(instance_names, 0),
feature_names=feature_names,
output_names=output_names,
output_indexes=None if output_indexes is None else (self.output_dims, output_indexes),
lower_bounds=list_wrap(lower_bounds),
upper_bounds=list_wrap(upper_bounds),
error_std=list_wrap(error_std),
main_effects=list_wrap(main_effects),
hierarchical_values=list_wrap(hierarchical_values),
clustering=None if clustering is None else Obj(clustering, [0])
)
@property
def shape(self):
""" Compute the shape over potentially complex data nesting.
"""
return _compute_shape(self._s.values)
@property
def values(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.values
@values.setter
def values(self, new_values):
self._s.values = new_values
@property
def base_values(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.base_values
@base_values.setter
def base_values(self, new_base_values):
self._s.base_values = new_base_values
@property
def data(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.data
@data.setter
def data(self, new_data):
self._s.data = new_data
@property
def display_data(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.display_data
@display_data.setter
def display_data(self, new_display_data):
if issubclass(type(new_display_data), pd.DataFrame):
new_display_data = new_display_data.values
self._s.display_data = new_display_data
@property
def instance_names(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.instance_names
@property
def output_names(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.output_names
@output_names.setter
def output_names(self, new_output_names):
self._s.output_names = new_output_names
@property
def output_indexes(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.output_indexes
@property
def feature_names(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.feature_names
@feature_names.setter
def feature_names(self, new_feature_names):
self._s.feature_names = new_feature_names
@property
def lower_bounds(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.lower_bounds
@property
def upper_bounds(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.upper_bounds
@property
def error_std(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.error_std
@property
def main_effects(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.main_effects
@main_effects.setter
def main_effects(self, new_main_effects):
self._s.main_effects = new_main_effects
@property
def hierarchical_values(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.hierarchical_values
@hierarchical_values.setter
def hierarchical_values(self, new_hierarchical_values):
self._s.hierarchical_values = new_hierarchical_values
@property
def clustering(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.clustering
@clustering.setter
def clustering(self, new_clustering):
self._s.clustering = new_clustering
def cohorts(self, cohorts):
""" Split this explanation into several cohorts.
Parameters
----------
cohorts : int or array
If this is an integer then we auto build that many cohorts using a decision tree. If this is
an array then we treat that as an array of cohort names/ids for each instance.
"""
if isinstance(cohorts, int):
return _auto_cohorts(self, max_cohorts=cohorts)
if isinstance(cohorts, (list, tuple, np.ndarray)):
cohorts = np.array(cohorts)
return Cohorts(**{name: self[cohorts == name] for name in np.unique(cohorts)})
raise Exception("The given set of cohort indicators is not recognized! Please give an array or int.")
def __repr__(self):
""" Display some basic printable info, but not everything.
"""
out = ".values =\n"+self.values.__repr__()
if self.base_values is not None:
out += "\n\n.base_values =\n"+self.base_values.__repr__()
if self.data is not None:
out += "\n\n.data =\n"+self.data.__repr__()
return out
def __getitem__(self, item):
""" This adds support for OpChain indexing.
"""
new_self = None
if not isinstance(item, tuple):
item = (item,)
# convert any OpChains or magic strings
pos = -1
for t in item: # pylint: disable=too-many-nested-blocks
pos += 1
# skip over Ellipsis
if type(t) not in [np.ndarray, pd.Series]:
if t == Ellipsis:
pos += len(self.shape) - len(item)
continue
orig_t = t
if issubclass(type(t), OpChain):
t = t.apply(self)
if issubclass(type(t), (np.int64, np.int32)): # because slicer does not like numpy indexes
t = int(t)
elif issubclass(type(t), np.ndarray):
t = [int(v) for v in t] # slicer wants lists not numpy arrays for indexing
elif issubclass(type(t), (Explanation, pd.Series)):
t = t.values
elif isinstance(t, str):
# work around for 2D output_names since they are not yet slicer supported
output_names_dims = []
if "output_names" in self._s._objects:
output_names_dims = self._s._objects["output_names"].dim
elif "output_names" in self._s._aliases:
output_names_dims = self._s._aliases["output_names"].dim
if pos != 0 and pos in output_names_dims:
if len(output_names_dims) == 1:
t = np.argwhere(np.array(self.output_names) == t)[0][0]
elif len(output_names_dims) == 2:
new_values = []
new_base_values = []
new_data = []
new_self = copy.deepcopy(self)
for i, v in enumerate(self.values):
for j, s in enumerate(self.output_names[i]):
if s == t:
new_values.append(np.array(v[:,j]))
new_data.append(np.array(self.data[i]))
new_base_values.append(self.base_values[i][j])
new_self = Explanation(
np.array(new_values),
np.array(new_base_values),
np.array(new_data),
self.display_data,
self.instance_names,
np.array(new_data),
t, # output_names
self.output_indexes,
self.lower_bounds,
self.upper_bounds,
self.error_std,
self.main_effects,
self.hierarchical_values,
self.clustering
)
new_self.op_history = copy.copy(self.op_history)
# new_self = copy.deepcopy(self)
# new_self.values = np.array(new_values)
# new_self.base_values = np.array(new_base_values)
# new_self.data = np.array(new_data)
# new_self.output_names = t
# new_self.feature_names = np.array(new_data)
# new_self.clustering = None
# work around for 2D feature_names since they are not yet slicer supported
feature_names_dims = []
if "feature_names" in self._s._objects:
feature_names_dims = self._s._objects["feature_names"].dim
if pos != 0 and pos in feature_names_dims and len(feature_names_dims) == 2:
new_values = []
new_data = []
for i, val_i in enumerate(self.values):
for s,v,d in zip(self.feature_names[i], val_i, self.data[i]):
if s == t:
new_values.append(v)
new_data.append(d)
new_self = copy.deepcopy(self)
new_self.values = new_values
new_self.data = new_data
new_self.feature_names = t
new_self.clustering = None
# return new_self
if issubclass(type(t), (np.int8, np.int16, np.int32, np.int64)):
t = int(t)
if t is not orig_t:
tmp = list(item)
tmp[pos] = t
item = tuple(tmp)
# call slicer for the real work
item = tuple(v for v in item) # SML I cut out: `if not isinstance(v, str)`
if len(item) == 0:
return new_self
if new_self is None:
new_self = copy.copy(self)
new_self._s = new_self._s.__getitem__(item)
new_self.op_history.append({
"name": "__getitem__",
"args": (item,),
"prev_shape": self.shape
})
return new_self
def __len__(self):
return self.shape[0]
def __copy__(self):
new_exp = Explanation(
self.values,
self.base_values,
self.data,
self.display_data,
self.instance_names,
self.feature_names,
self.output_names,
self.output_indexes,
self.lower_bounds,
self.upper_bounds,
self.error_std,
self.main_effects,
self.hierarchical_values,
self.clustering
)
new_exp.op_history = copy.copy(self.op_history)
return new_exp
def _apply_binary_operator(self, other, binary_op, op_name):
new_exp = self.__copy__()
new_exp.op_history = copy.copy(self.op_history)
new_exp.op_history.append({
"name": op_name,
"args": (other,),
"prev_shape": self.shape
})
if isinstance(other, Explanation):
new_exp.values = binary_op(new_exp.values, other.values)
if new_exp.data is not None:
new_exp.data = binary_op(new_exp.data, other.data)
if new_exp.base_values is not None:
new_exp.base_values = binary_op(new_exp.base_values, other.base_values)
else:
new_exp.values = binary_op(new_exp.values, other)
if new_exp.data is not None:
new_exp.data = binary_op(new_exp.data, other)
if new_exp.base_values is not None:
new_exp.base_values = binary_op(new_exp.base_values, other)
return new_exp
def __add__(self, other):
return self._apply_binary_operator(other, operator.add, "__add__")
def __radd__(self, other):
return self._apply_binary_operator(other, operator.add, "__add__")
def __sub__(self, other):
return self._apply_binary_operator(other, operator.sub, "__sub__")
def __rsub__(self, other):
return self._apply_binary_operator(other, operator.sub, "__sub__")
def __mul__(self, other):
return self._apply_binary_operator(other, operator.mul, "__mul__")
def __rmul__(self, other):
return self._apply_binary_operator(other, operator.mul, "__mul__")
def __truediv__(self, other):
return self._apply_binary_operator(other, operator.truediv, "__truediv__")
# @property
# def abs(self):
# """ Element-size absolute value operator.
# """
# new_self = copy.copy(self)
# new_self.values = np.abs(new_self.values)
# new_self.op_history.append({
# "name": "abs",
# "prev_shape": self.shape
# })
# return new_self
def _numpy_func(self, fname, **kwargs):
""" Apply a numpy-style function to this Explanation.
"""
new_self = copy.copy(self)
axis = kwargs.get("axis", None)
# collapse the slicer to right shape
if axis == 0:
new_self = new_self[0]
elif axis == 1:
new_self = new_self[1]
elif axis == 2:
new_self = new_self[2]
if axis in [0,1,2]:
new_self.op_history = new_self.op_history[:-1] # pop off the slicing operation we just used
if self.feature_names is not None and not is_1d(self.feature_names) and axis == 0:
new_values = self._flatten_feature_names()
new_self.feature_names = np.array(list(new_values.keys()))
new_self.values = np.array([getattr(np, fname)(v,0) for v in new_values.values()])
new_self.clustering = None
else:
new_self.values = getattr(np, fname)(np.array(self.values), **kwargs)
if new_self.data is not None:
try:
new_self.data = getattr(np, fname)(np.array(self.data), **kwargs)
except:
new_self.data = None
if new_self.base_values is not None and issubclass(type(axis), int) and len(self.base_values.shape) > axis:
new_self.base_values = getattr(np, fname)(self.base_values, **kwargs)
elif issubclass(type(axis), int):
new_self.base_values = None
if axis == 0 and self.clustering is not None and len(self.clustering.shape) == 3:
if self.clustering.std(0).sum() < 1e-8:
new_self.clustering = self.clustering[0]
else:
new_self.clustering = None
new_self.op_history.append({
"name": fname,
"kwargs": kwargs,
"prev_shape": self.shape,
"collapsed_instances": axis == 0
})
return new_self
def mean(self, axis):
""" Numpy-style mean function.
"""
return self._numpy_func("mean", axis=axis)
def max(self, axis):
""" Numpy-style mean function.
"""
return self._numpy_func("max", axis=axis)
def min(self, axis):
""" Numpy-style mean function.
"""
return self._numpy_func("min", axis=axis)
def sum(self, axis=None, grouping=None):
""" Numpy-style mean function.
"""
if grouping is None:
return self._numpy_func("sum", axis=axis)
elif axis == 1 or len(self.shape) == 1:
return group_features(self, grouping)
else:
raise Exception("Only axis = 1 is supported for grouping right now...")
def hstack(self, other):
""" Stack two explanations column-wise.
"""
assert self.shape[0] == other.shape[0], "Can't hstack explanations with different numbers of rows!"
assert np.max(np.abs(self.base_values - other.base_values)) < 1e-6, "Can't hstack explanations with different base values!"
new_exp = Explanation(
np.hstack([self.values, other.values]),
np.hstack([self.values, other.values]),
self.base_values,
self.data,
self.display_data,
self.instance_names,
self.feature_names,
self.output_names,
self.output_indexes,
self.lower_bounds,
self.upper_bounds,
self.error_std,
self.main_effects,
self.hierarchical_values,
self.clustering
)
return self._numpy_func("min", axis=axis)
# def reshape(self, *args):
# return self._numpy_func("reshape", newshape=args)
@property
def abs(self):
return self._numpy_func("abs")
@property
def identity(self):
return self
@property
def argsort(self):
return self._numpy_func("argsort")
@property
def flip(self):
return self._numpy_func("flip")
def hclust(self, metric="sqeuclidean", axis=0):
""" Computes an optimal leaf ordering sort order using hclustering.
hclust(metric="sqeuclidean")
Parameters
----------
metric : string
A metric supported by scipy clustering.
axis : int
The axis to cluster along.
"""
values = self.values
if len(values.shape) != 2:
raise Exception("The hclust order only supports 2D arrays right now!")
if axis == 1:
values = values.T
# compute a hierarchical clustering and return the optimal leaf ordering
D = sp.spatial.distance.pdist(values, metric)
cluster_matrix = sp.cluster.hierarchy.complete(D)
inds = sp.cluster.hierarchy.leaves_list(sp.cluster.hierarchy.optimal_leaf_ordering(cluster_matrix, D))
return inds
def sample(self, max_samples, replace=False, random_state=0):
""" Randomly samples the instances (rows) of the Explanation object.
Parameters
----------
max_samples : int
The number of rows to sample. Note that if replace=False then less than
fewer than max_samples will be drawn if explanation.shape[0] < max_samples.
replace : bool
Sample with or without replacement.
"""
prev_seed = np.random.seed(random_state)
inds = np.random.choice(self.shape[0], min(max_samples, self.shape[0]), replace=replace)
np.random.seed(prev_seed)
return self[list(inds)]
def _flatten_feature_names(self):
new_values = {}
for i in range(len(self.values)):
for s,v in zip(self.feature_names[i], self.values[i]):
if s not in new_values:
new_values[s] = []
new_values[s].append(v)
return new_values
def _use_data_as_feature_names(self):
new_values = {}
for i in range(len(self.values)):
for s,v in zip(self.data[i], self.values[i]):
if s not in new_values:
new_values[s] = []
new_values[s].append(v)
return new_values
def percentile(self, q, axis=None):
new_self = copy.deepcopy(self)
if self.feature_names is not None and not is_1d(self.feature_names) and axis == 0:
new_values = self._flatten_feature_names()
new_self.feature_names = np.array(list(new_values.keys()))
new_self.values = np.array([np.percentile(v, q) for v in new_values.values()])
new_self.clustering = None
else:
new_self.values = np.percentile(new_self.values, q, axis)
new_self.data = np.percentile(new_self.data, q, axis)
#new_self.data = None
new_self.op_history.append({
"name": "percentile",
"args": (axis,),
"prev_shape": self.shape,
"collapsed_instances": axis == 0
})
return new_self
def group_features(shap_values, feature_map):
# TODOsomeday: support and deal with clusterings
reverse_map = {}
for name in feature_map:
reverse_map[feature_map[name]] = reverse_map.get(feature_map[name], []) + [name]
curr_names = shap_values.feature_names
sv_new = copy.deepcopy(shap_values)
found = {}
i = 0
rank1 = len(shap_values.shape) == 1
for name in curr_names:
new_name = feature_map.get(name, name)
if new_name in found:
continue
found[new_name] = True
new_name = feature_map.get(name, name)
cols_to_sum = reverse_map.get(new_name, [new_name])
old_inds = [curr_names.index(v) for v in cols_to_sum]
if rank1:
sv_new.values[i] = shap_values.values[old_inds].sum()
sv_new.data[i] = shap_values.data[old_inds].sum()
else:
sv_new.values[:,i] = shap_values.values[:,old_inds].sum(1)
sv_new.data[:,i] = shap_values.data[:,old_inds].sum(1)
sv_new.feature_names[i] = new_name
i += 1
return Explanation(
sv_new.values[:i] if rank1 else sv_new.values[:,:i],
base_values = sv_new.base_values,
data = sv_new.data[:i] if rank1 else sv_new.data[:,:i],
display_data = None if sv_new.display_data is None else (sv_new.display_data[:,:i] if rank1 else sv_new.display_data[:,:i]),
instance_names = None,
feature_names = None if sv_new.feature_names is None else sv_new.feature_names[:i],
output_names = None,
output_indexes = None,
lower_bounds = None,
upper_bounds = None,
error_std = None,
main_effects = None,
hierarchical_values = None,
clustering = None
)
def compute_output_dims(values, base_values, data, output_names):
""" Uses the passed data to infer which dimensions correspond to the model's output.
"""
values_shape = _compute_shape(values)
# input shape matches the data shape
if data is not None:
data_shape = _compute_shape(data)
# if we are not given any data we assume it would be the same shape as the given values
else:
data_shape = values_shape
# output shape is known from the base values or output names
if output_names is not None:
output_shape = _compute_shape(output_names)
# if our output_names are per sample then we need to drop the sample dimension here
if values_shape[-len(output_shape):] != output_shape and \
values_shape[-len(output_shape)+1:] == output_shape[1:] and values_shape[0] == output_shape[0]:
output_shape = output_shape[1:]
elif base_values is not None:
output_shape = _compute_shape(base_values)[1:]
else:
output_shape = tuple()
interaction_order = len(values_shape) - len(data_shape) - len(output_shape)
values_dims = list(range(len(values_shape)))
output_dims = range(len(data_shape) + interaction_order, len(values_shape))
return tuple(output_dims)
def is_1d(val):
return not (isinstance(val[0], list) or isinstance(val[0], np.ndarray))
class Op():
pass
class Percentile(Op):
def __init__(self, percentile):
self.percentile = percentile
def add_repr(self, s, verbose=False):
return "percentile("+s+", "+str(self.percentile)+")"
def _first_item(x):
for item in x:
return item
return None
def _compute_shape(x):
if not hasattr(x, "__len__") or isinstance(x, str):
return tuple()
elif not sp.sparse.issparse(x) and len(x) > 0 and isinstance(_first_item(x), str):
return (None,)
else:
if isinstance(x, dict):
return (len(x),) + _compute_shape(x[next(iter(x))])
# 2D arrays we just take their shape as-is
if len(getattr(x, "shape", tuple())) > 1:
return x.shape
# 1D arrays we need to look inside
if len(x) == 0:
return (0,)
elif len(x) == 1:
return (1,) + _compute_shape(_first_item(x))
else:
first_shape = _compute_shape(_first_item(x))
if first_shape == tuple():
return (len(x),)
else: # we have an array of arrays...
matches = np.ones(len(first_shape), dtype=np.bool)
for i in range(1, len(x)):
shape = _compute_shape(x[i])
assert len(shape) == len(first_shape), "Arrays in Explanation objects must have consistent inner dimensions!"
for j in range(0, len(shape)):
matches[j] &= shape[j] == first_shape[j]
return (len(x),) + tuple(first_shape[j] if match else None for j, match in enumerate(matches))
class Cohorts():
def __init__(self, **kwargs):
self.cohorts = kwargs
for k in self.cohorts:
assert isinstance(self.cohorts[k], Explanation), "All the arguments to a Cohorts set must be Explanation objects!"
def __getitem__(self, item):
new_cohorts = Cohorts()
for k in self.cohorts:
new_cohorts.cohorts[k] = self.cohorts[k].__getitem__(item)
return new_cohorts
def __getattr__(self, name):
new_cohorts = Cohorts()
for k in self.cohorts:
new_cohorts.cohorts[k] = getattr(self.cohorts[k], name)
return new_cohorts
def __call__(self, *args, **kwargs):
new_cohorts = Cohorts()
for k in self.cohorts:
new_cohorts.cohorts[k] = self.cohorts[k].__call__(*args, **kwargs)
return new_cohorts
def __repr__(self):
return f"<shap._explanation.Cohorts object with {len(self.cohorts)} cohorts of sizes: {[v.shape for v in self.cohorts.values()]}>"
def _auto_cohorts(shap_values, max_cohorts):
""" This uses a DecisionTreeRegressor to build a group of cohorts with similar SHAP values.
"""
# fit a decision tree that well spearates the SHAP values
m = sklearn.tree.DecisionTreeRegressor(max_leaf_nodes=max_cohorts)
m.fit(shap_values.data, shap_values.values)
# group instances by their decision paths
paths = m.decision_path(shap_values.data).toarray()
unique_paths = np.unique(m.decision_path(shap_values.data).todense(), axis=0)
path_names = []
# mark each instance with a path name
for i in range(shap_values.shape[0]):
name = ""
for j in range(len(paths[i])):
if paths[i,j] > 0:
feature = m.tree_.feature[j]
threshold = m.tree_.threshold[j]
val = shap_values.data[i,feature]
if feature >= 0:
name += str(shap_values.feature_names[feature])
if val < threshold:
name += " < "
else:
name += " >= "
name += str(threshold) + " & "
path_names.append(name[:-3]) # the -3 strips off the last unneeded ' & '
path_names = np.array(path_names)
# split the instances into cohorts by their path names
cohorts = {}
for name in np.unique(path_names):
cohorts[name] = shap_values[path_names == name]
return Cohorts(**cohorts)
def list_wrap(x):
""" A helper to patch things since slicer doesn't handle arrays of arrays (it does handle lists of arrays)
"""
if isinstance(x, np.ndarray) and len(x.shape) == 1 and isinstance(x[0], np.ndarray):
return [v for v in x]
else:
return x
| 35.672547
| 138
| 0.58025
|
2e71337756eb090a23ed5d3d1ce932e86218a85e
| 14,028
|
py
|
Python
|
manager/test/python/task_pb2.py
|
UCLA-VAST/Blaze
|
666e87d9e6cf4c4d83cd7551f6bcfc4556cd8965
|
[
"BSD-2-Clause"
] | 31
|
2015-08-21T19:58:00.000Z
|
2021-03-10T10:45:37.000Z
|
manager/test/python/task_pb2.py
|
UCLA-VAST/Blaze
|
666e87d9e6cf4c4d83cd7551f6bcfc4556cd8965
|
[
"BSD-2-Clause"
] | 1
|
2017-03-23T03:58:23.000Z
|
2017-03-23T04:24:33.000Z
|
manager/test/python/task_pb2.py
|
UCLA-VAST/Blaze
|
666e87d9e6cf4c4d83cd7551f6bcfc4556cd8965
|
[
"BSD-2-Clause"
] | 16
|
2016-09-07T08:35:10.000Z
|
2020-01-04T01:44:48.000Z
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: task.proto
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
DESCRIPTOR = _descriptor.FileDescriptor(
name='task.proto',
package='blaze',
serialized_pb='\n\ntask.proto\x12\x05\x62laze\"\xe8\x01\n\x07\x44\x61taMsg\x12\x14\n\x0cpartition_id\x18\x01 \x01(\x03\x12\x0e\n\x06\x63\x61\x63hed\x18\x02 \x01(\x08\x12\x0f\n\x07sampled\x18\x03 \x01(\x08\x12\x16\n\x0e\x65lement_length\x18\x04 \x01(\x05\x12\x14\n\x0c\x65lement_size\x18\x05 \x01(\x05\x12\x14\n\x0cnum_elements\x18\x06 \x01(\x05\x12\x14\n\x0cscalar_value\x18\x07 \x01(\x03\x12\x11\n\tfile_path\x18\x08 \x01(\t\x12\x11\n\tfile_size\x18\t \x01(\x03\x12\x13\n\x0b\x66ile_offset\x18\n \x01(\x03\x12\x11\n\tmask_path\x18\x0b \x01(\t\"\x8e\x01\n\x07TaskMsg\x12\x1c\n\x04type\x18\x01 \x02(\x0e\x32\x0e.blaze.MsgType\x12\x0e\n\x06\x61pp_id\x18\x02 \x01(\t\x12\x0e\n\x06\x61\x63\x63_id\x18\x03 \x01(\t\x12\x1c\n\x04\x64\x61ta\x18\x04 \x03(\x0b\x32\x0e.blaze.DataMsg\x12\x1a\n\x03\x61\x63\x63\x18\x05 \x01(\x0b\x32\r.blaze.AccMsg\x12\x0b\n\x03msg\x18\x06 \x01(\t\"\x8f\x01\n\x06\x41\x63\x63Msg\x12\x0e\n\x06\x61\x63\x63_id\x18\x01 \x02(\t\x12\x13\n\x0bplatform_id\x18\x02 \x02(\t\x12\x11\n\ttask_impl\x18\x03 \x01(\x0c\x12%\n\x05param\x18\x04 \x03(\x0b\x32\x16.blaze.AccMsg.KeyValue\x1a&\n\x08KeyValue\x12\x0b\n\x03key\x18\x01 \x02(\t\x12\r\n\x05value\x18\x02 \x02(\x0c*\xa1\x01\n\x07MsgType\x12\x0e\n\nACCREQUEST\x10\x00\x12\x0c\n\x08\x41\x43\x43GRANT\x10\x01\x12\r\n\tACCREJECT\x10\x02\x12\r\n\tACCFINISH\x10\x03\x12\x0b\n\x07\x41\x43\x43\x44\x41TA\x10\x04\x12\x0e\n\nACCFAILURE\x10\x05\x12\x10\n\x0c\x41\x43\x43\x42ROADCAST\x10\x06\x12\x0b\n\x07\x41\x43\x43TERM\x10\x07\x12\x0f\n\x0b\x41\x43\x43REGISTER\x10\x08\x12\r\n\tACCDELETE\x10\tB$\n\x16org.apache.spark.blazeB\nAccMessage')
_MSGTYPE = _descriptor.EnumDescriptor(
name='MsgType',
full_name='blaze.MsgType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ACCREQUEST', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACCGRANT', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACCREJECT', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACCFINISH', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACCDATA', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACCFAILURE', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACCBROADCAST', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACCTERM', index=7, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACCREGISTER', index=8, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACCDELETE', index=9, number=9,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=548,
serialized_end=709,
)
MsgType = enum_type_wrapper.EnumTypeWrapper(_MSGTYPE)
ACCREQUEST = 0
ACCGRANT = 1
ACCREJECT = 2
ACCFINISH = 3
ACCDATA = 4
ACCFAILURE = 5
ACCBROADCAST = 6
ACCTERM = 7
ACCREGISTER = 8
ACCDELETE = 9
_DATAMSG = _descriptor.Descriptor(
name='DataMsg',
full_name='blaze.DataMsg',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='partition_id', full_name='blaze.DataMsg.partition_id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cached', full_name='blaze.DataMsg.cached', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sampled', full_name='blaze.DataMsg.sampled', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='element_length', full_name='blaze.DataMsg.element_length', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='element_size', full_name='blaze.DataMsg.element_size', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_elements', full_name='blaze.DataMsg.num_elements', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='scalar_value', full_name='blaze.DataMsg.scalar_value', index=6,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='file_path', full_name='blaze.DataMsg.file_path', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='file_size', full_name='blaze.DataMsg.file_size', index=8,
number=9, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='file_offset', full_name='blaze.DataMsg.file_offset', index=9,
number=10, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mask_path', full_name='blaze.DataMsg.mask_path', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=22,
serialized_end=254,
)
_TASKMSG = _descriptor.Descriptor(
name='TaskMsg',
full_name='blaze.TaskMsg',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='blaze.TaskMsg.type', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='app_id', full_name='blaze.TaskMsg.app_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='acc_id', full_name='blaze.TaskMsg.acc_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='data', full_name='blaze.TaskMsg.data', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='acc', full_name='blaze.TaskMsg.acc', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='msg', full_name='blaze.TaskMsg.msg', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=257,
serialized_end=399,
)
_ACCMSG_KEYVALUE = _descriptor.Descriptor(
name='KeyValue',
full_name='blaze.AccMsg.KeyValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='blaze.AccMsg.KeyValue.key', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='blaze.AccMsg.KeyValue.value', index=1,
number=2, type=12, cpp_type=9, label=2,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=507,
serialized_end=545,
)
_ACCMSG = _descriptor.Descriptor(
name='AccMsg',
full_name='blaze.AccMsg',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='acc_id', full_name='blaze.AccMsg.acc_id', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='platform_id', full_name='blaze.AccMsg.platform_id', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='task_impl', full_name='blaze.AccMsg.task_impl', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='param', full_name='blaze.AccMsg.param', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_ACCMSG_KEYVALUE, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=402,
serialized_end=545,
)
_TASKMSG.fields_by_name['type'].enum_type = _MSGTYPE
_TASKMSG.fields_by_name['data'].message_type = _DATAMSG
_TASKMSG.fields_by_name['acc'].message_type = _ACCMSG
_ACCMSG_KEYVALUE.containing_type = _ACCMSG;
_ACCMSG.fields_by_name['param'].message_type = _ACCMSG_KEYVALUE
DESCRIPTOR.message_types_by_name['DataMsg'] = _DATAMSG
DESCRIPTOR.message_types_by_name['TaskMsg'] = _TASKMSG
DESCRIPTOR.message_types_by_name['AccMsg'] = _ACCMSG
class DataMsg(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DATAMSG
# @@protoc_insertion_point(class_scope:blaze.DataMsg)
class TaskMsg(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _TASKMSG
# @@protoc_insertion_point(class_scope:blaze.TaskMsg)
class AccMsg(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
class KeyValue(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ACCMSG_KEYVALUE
# @@protoc_insertion_point(class_scope:blaze.AccMsg.KeyValue)
DESCRIPTOR = _ACCMSG
# @@protoc_insertion_point(class_scope:blaze.AccMsg)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), '\n\026org.apache.spark.blazeB\nAccMessage')
# @@protoc_insertion_point(module_scope)
| 38.327869
| 1,592
| 0.718135
|
4f254dca1a36b88a94eb238b1103604691fa6e59
| 2,941
|
py
|
Python
|
tests/data.py
|
bennyrock20/rocket-python
|
ef1cebea3fcd18512f1b813842a06faa30cce207
|
[
"MIT"
] | null | null | null |
tests/data.py
|
bennyrock20/rocket-python
|
ef1cebea3fcd18512f1b813842a06faa30cce207
|
[
"MIT"
] | null | null | null |
tests/data.py
|
bennyrock20/rocket-python
|
ef1cebea3fcd18512f1b813842a06faa30cce207
|
[
"MIT"
] | null | null | null |
PUBLIC_ROOM_TEST = {'channels': [
{'name': 'Test Room',
'_id': '123456'}
]}
GET_ROOM_INFO_TEST = {
"channel": {
"_id": "ByehQjC44FwMeiLbX",
"ts": "2016-11-30T21:23:04.737Z",
"t": "c",
"name": "testing",
"usernames": [
"testing",
"testing1",
"testing2"
],
"msgs": 1,
"default": 'true',
"_updatedAt": "2016-12-09T12:50:51.575Z",
"lm": "2016-12-09T12:50:51.555Z"
},
"success": 'true'
}
GET_ME_TEST = {
"_id": "aobEdbYhXfu5hkeqG",
"name": "Example User",
"emails": [
{
"address": "example@example.com",
"verified": 'true'
}
],
"status": "offline",
"statusConnection": "offline",
"username": "example",
"utcOffset": 0,
"active": 'true',
"success": 'true'
}
GET_USERS_TEST = {
"users": [
{
"_id": "nSYqWzZ4GsKTX4dyK",
"createdAt": "2016-12-07T15:47:46.861Z",
"services": {
"password": {
"bcrypt": "..."
},
"email": {
"verificationTokens": [
{
"token": "...",
"address": "example@example.com",
"when": "2016-12-07T15:47:46.930Z"
}
]
},
"resume": {
"loginTokens": [
{
"when": "2016-12-07T15:47:47.334Z",
"hashedToken": "..."
}
]
}
},
"emails": [
{
"address": "example@example.com",
"verified": 'true'
}
],
"type": "user",
"status": "offline",
"active": 'true',
"roles": [
"user"
],
"name": "Example User",
"lastLogin": "2016-12-08T00:22:15.167Z",
"statusConnection": "offline",
"utcOffset": 0,
"username": "example"
}
],
"count": 3,
"offset": 2,
"total": 10,
"success": 'true'
}
GET_USER_INFO_TEST = {
"user": {
"_id": "nSYqWzZ4GsKTX4dyK",
"createdAt": "2016-12-07T15:47:46.861Z",
"services": {
"password": {
"bcrypt": "..."
},
"email": {
"verificationTokens": [
{
"token": "...",
"address": "example@example.com",
"when": "2016-12-07T15:47:46.930Z"
}
]
},
"resume": {
"loginTokens": [
{
"when": "2016-12-07T15:47:47.334Z",
"hashedToken": "..."
}
]
}
},
"emails": [
{
"address": "example@example.com",
"verified": 'true'
}
],
"type": "user",
"status": "offline",
"active": 'true',
"roles": [
"user"
],
"name": "Example User",
"lastLogin": "2016-12-08T00:22:15.167Z",
"statusConnection": "offline",
"utcOffset": 0,
"username": "example"
},
"success": 'true'
}
UPLOAD_FILE_TEST = {
"success": "true"
}
SET_ROOM_TOPIC_TEST = {
"topic": "test topic",
"success": 'true'
}
| 19.738255
| 49
| 0.440326
|
9ad346e6e4fa8d34300bcc3d5cc349b40dc43a53
| 10,025
|
py
|
Python
|
google-cloud-phishing_protection/synth.py
|
mtsmfm/google-cloud-ruby
|
5181abcae688e4fe4a55031eb844cb982df95154
|
[
"Apache-2.0"
] | 1
|
2020-11-14T21:49:23.000Z
|
2020-11-14T21:49:23.000Z
|
google-cloud-phishing_protection/synth.py
|
WebElZeN/google-cloud-ruby
|
c091a97f668010965809c450c54dbcdfe850fad5
|
[
"Apache-2.0"
] | null | null | null |
google-cloud-phishing_protection/synth.py
|
WebElZeN/google-cloud-ruby
|
c091a97f668010965809c450c54dbcdfe850fad5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import synthtool.languages.ruby as ruby
import logging
import os
import re
from subprocess import call
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICGenerator()
v1beta1_library = gapic.ruby_library(
'phishingprotection', 'v1beta1', artman_output_name='google-cloud-ruby/google-cloud-phishingprotection',
config_path='artman_phishingprotection_v1beta1.yaml'
)
s.copy(v1beta1_library / 'lib')
s.copy(v1beta1_library / 'test')
s.copy(v1beta1_library / 'README.md')
s.copy(v1beta1_library / 'LICENSE')
s.copy(v1beta1_library / '.gitignore')
s.copy(v1beta1_library / '.yardopts')
s.copy(v1beta1_library / 'google-cloud-phishing_protection.gemspec', merge=ruby.merge_gemspec)
# Copy common templates
templates = gcp.CommonTemplates().ruby_library()
s.copy(templates)
# Update gemspec to reflect Ruby 2.4
ruby.update_gemspec('google-cloud-phishing_protection.gemspec')
# Update README to reflect Ruby 2.4
s.replace(
'README.md',
'Ruby 2.3',
'Ruby 2.4'
)
# Support for service_address
s.replace(
[
'lib/google/cloud/phishing_protection.rb',
'lib/google/cloud/phishing_protection/v*.rb',
'lib/google/cloud/phishing_protection/v*/*_client.rb'
],
'\n(\\s+)#(\\s+)@param exception_transformer',
'\n\\1#\\2@param service_address [String]\n' +
'\\1#\\2 Override for the service hostname, or `nil` to leave as the default.\n' +
'\\1#\\2@param service_port [Integer]\n' +
'\\1#\\2 Override for the service port, or `nil` to leave as the default.\n' +
'\\1#\\2@param exception_transformer'
)
s.replace(
[
'lib/google/cloud/phishing_protection/v*.rb',
'lib/google/cloud/phishing_protection/v*/*_client.rb'
],
'\n(\\s+)metadata: nil,\n\\s+exception_transformer: nil,\n',
'\n\\1metadata: nil,\n\\1service_address: nil,\n\\1service_port: nil,\n\\1exception_transformer: nil,\n'
)
s.replace(
[
'lib/google/cloud/phishing_protection/v*.rb',
'lib/google/cloud/phishing_protection/v*/*_client.rb'
],
',\n(\\s+)lib_name: lib_name,\n\\s+lib_version: lib_version',
',\n\\1lib_name: lib_name,\n\\1service_address: service_address,\n\\1service_port: service_port,\n\\1lib_version: lib_version'
)
s.replace(
'lib/google/cloud/phishing_protection/v*/*_client.rb',
'service_path = self\\.class::SERVICE_ADDRESS',
'service_path = service_address || self.class::SERVICE_ADDRESS'
)
s.replace(
'lib/google/cloud/phishing_protection/v*/*_client.rb',
'port = self\\.class::DEFAULT_SERVICE_PORT',
'port = service_port || self.class::DEFAULT_SERVICE_PORT'
)
# Ensure simplecov is required
s.replace(
'test/**/*.rb',
'\n\nrequire "minitest/autorun"\n',
'\n\nrequire "simplecov"\nrequire "minitest/autorun"\n'
)
# Remove legacy release level from documentation
s.replace(
[
'README.md',
'lib/google/cloud/**/*.rb'
],
'\\s+\\(\\[\\w+\\]\\(https://github\\.com/(googleapis|GoogleCloudPlatform)/google-cloud-ruby#versioning\\)\\)',
''
)
# https://github.com/googleapis/gapic-generator/issues/2242
def escape_braces(match):
expr = re.compile('^([^`]*(`[^`]*`[^`]*)*)([^`#\\$\\\\])\\{([\\w,]+)\\}')
content = match.group(0)
while True:
content, count = expr.subn('\\1\\3\\\\\\\\{\\4}', content)
if count == 0:
return content
s.replace(
'lib/google/cloud/**/*.rb',
'\n(\\s+)#[^\n]*[^\n#\\$\\\\]\\{[\\w,]+\\}',
escape_braces)
# https://github.com/googleapis/gapic-generator/issues/2243
s.replace(
'lib/google/cloud/phishing_protection/*/*_client.rb',
'(\n\\s+class \\w+Client\n)(\\s+)(attr_reader :\\w+_stub)',
'\\1\\2# @private\n\\2\\3')
# https://github.com/googleapis/gapic-generator/issues/2279
s.replace(
'lib/**/*.rb',
'\\A(((#[^\n]*)?\n)*# (Copyright \\d+|Generated by the protocol buffer compiler)[^\n]+\n(#[^\n]*\n)*\n)([^\n])',
'\\1\n\\6')
# https://github.com/googleapis/gapic-generator/issues/2323
s.replace(
[
'lib/**/*.rb',
'README.md'
],
'https://github\\.com/GoogleCloudPlatform/google-cloud-ruby',
'https://github.com/googleapis/google-cloud-ruby'
)
s.replace(
[
'lib/**/*.rb',
'README.md'
],
'https://googlecloudplatform\\.github\\.io/google-cloud-ruby',
'https://googleapis.github.io/google-cloud-ruby'
)
# https://github.com/googleapis/gapic-generator/issues/2180
s.replace(
'google-cloud-phishing_protection.gemspec',
'gem.add_dependency "google-gax", "~> 1\\.[\\d\\.]+"',
"\n".join([
'gem.add_dependency "google-gax", "~> 1.8"',
' gem.add_dependency "googleapis-common-protos", ">= 1.3.9", "< 2.0"',
' gem.add_dependency "googleapis-common-protos-types", ">= 1.0.4", "< 2.0"',
' gem.add_dependency "grpc-google-iam-v1", "~> 0.6.9"'
])
)
os.rename(
'lib/google/cloud/phishing_protection/v1beta1/phishing_protection_service_v1_beta1_client_config.json',
'lib/google/cloud/phishing_protection/v1beta1/phishing_protection_client_config.json'
)
os.rename(
'lib/google/cloud/phishing_protection/v1beta1/phishing_protection_service_v1_beta1_client.rb',
'lib/google/cloud/phishing_protection/v1beta1/phishing_protection_client.rb'
)
os.rename(
'test/google/cloud/phishing_protection/v1beta1/phishing_protection_service_v1_beta1_client_test.rb',
'test/google/cloud/phishing_protection/v1beta1/phishing_protection_client_test.rb'
)
s.replace(
'lib/**/*.rb',
'_service_v1_beta1',
''
)
s.replace(
'lib/**/*.rb',
'ServiceV1Beta1',
''
)
s.replace(
'lib/**/*.rb',
'"google.cloud.phishingprotection.v1beta1.PhishingProtection"',
'"google.cloud.phishingprotection.v1beta1.PhishingProtectionServiceV1Beta1"'
)
s.replace(
'lib/**/credentials.rb',
'PHISHINGPROTECTION_',
'PHISHING_PROTECTION_'
)
s.replace(
'test/**/*.rb',
'_service_v1_beta1',
''
)
s.replace(
'test/**/*.rb',
'ServiceV1Beta1',
''
)
# Require the helpers file
s.replace(
f'lib/google/cloud/phishing_protection/v1beta1.rb',
f'require "google/cloud/phishing_protection/v1beta1/phishing_protection_client"',
'\n'.join([
f'require "google/cloud/phishing_protection/v1beta1/phishing_protection_client"',
f'require "google/cloud/phishing_protection/v1beta1/helpers"'
])
)
s.replace(
'google-cloud-phishing_protection.gemspec',
'"README.md", "LICENSE"',
'"README.md", "AUTHENTICATION.md", "LICENSE"'
)
s.replace(
'.yardopts',
'README.md\n',
'README.md\nAUTHENTICATION.md\nLICENSE\n'
)
# https://github.com/googleapis/google-cloud-ruby/issues/3058
s.replace(
'google-cloud-phishing_protection.gemspec',
'\nGem::Specification.new do',
'require File.expand_path("../lib/google/cloud/phishing_protection/version", __FILE__)\n\nGem::Specification.new do'
)
s.replace(
'google-cloud-phishing_protection.gemspec',
'(gem.version\s+=\s+).\d+.\d+.\d.*$',
'\\1Google::Cloud::PhishingProtection::VERSION'
)
s.replace(
'lib/google/cloud/phishing_protection/v1beta1/*_client.rb',
'(require \".*credentials\"\n)\n',
'\\1require "google/cloud/phishing_protection/version"\n\n'
)
s.replace(
'lib/google/cloud/phishing_protection/v1beta1/*_client.rb',
'Gem.loaded_specs\[.*\]\.version\.version',
'Google::Cloud::PhishingProtection::VERSION'
)
# Fix links for devsite migration
for file in ['lib/**/*.rb', '*.md']:
s.replace(
file,
'https://googleapis.github.io/google-cloud-ruby/#/docs/google-cloud-logging/latest/google/cloud/logging/logger',
'https://googleapis.dev/ruby/google-cloud-logging/latest'
)
s.replace(
'*.md',
'https://googleapis.github.io/google-cloud-ruby/#/docs/.*/authentication',
'https://googleapis.dev/ruby/google-cloud-phishing_protection/latest/file.AUTHENTICATION.html'
)
s.replace(
'lib/**/*.rb',
'https://googleapis.github.io/google-cloud-ruby/#/docs/.*/authentication',
'https://googleapis.dev/ruby/google-cloud-phishing_protection/latest/file.AUTHENTICATION.html'
)
s.replace(
'README.md',
'github.io/google-cloud-ruby/#/docs/google-cloud-phishing_protection/latest/.*$',
'dev/ruby/google-cloud-phishing_protection/latest'
)
s.replace(
['README.md', 'lib/**/*.rb'],
'https://cloud.google.com/phishingprotection',
'https://cloud.google.com/phishing-protection'
)
# https://github.com/googleapis/gapic-generator/issues/2525
s.replace(
'lib/google/cloud/phishing_protection/v*/**/*.rb',
'Google::Cloud::Phishingprotection',
'Google::Cloud::PhishingProtection')
s.replace(
'lib/google/cloud/phishing_protection/v*/doc/google/cloud/phishingprotection/**/*.rb',
'\n module Phishingprotection\n',
'\n module PhishingProtection\n'
)
# https://github.com/protocolbuffers/protobuf/issues/5584
s.replace(
'lib/google/cloud/phishingprotection/v*/*_pb.rb',
'\nmodule Google::Cloud::PhishingProtection::V(\\w+)\n',
'\nmodule Google\n module Cloud\n module PhishingProtection\n end\n Phishingprotection = PhishingProtection unless const_defined? :Phishingprotection\n end\nend\nmodule Google::Cloud::PhishingProtection::V\\1\n',
)
# Generate the helper methods
call('bundle update && bundle exec rake generate_partials', shell=True)
| 32.761438
| 227
| 0.677107
|
c42a61dcc12257f9bfbe08fd3682bfb3386cce90
| 7,383
|
py
|
Python
|
fairscale/nn/model_parallel/initialize.py
|
vfdev-5/fairscale
|
b75a5e266d0d7953186a59feff8d808af4e0bf82
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2021-03-31T10:31:26.000Z
|
2021-03-31T10:31:26.000Z
|
fairscale/nn/model_parallel/initialize.py
|
vfdev-5/fairscale
|
b75a5e266d0d7953186a59feff8d808af4e0bf82
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
fairscale/nn/model_parallel/initialize.py
|
vfdev-5/fairscale
|
b75a5e266d0d7953186a59feff8d808af4e0bf82
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding=utf-8
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model and data parallel groups."""
from typing import List, Optional
import torch
from .utils import ensure_divisibility
# Model parallel group that the current rank belongs to.
_MODEL_PARALLEL_GROUP = None
# Data parallel group that the current rank belongs to.
_DATA_PARALLEL_GROUP = None
# Pipeline parallel group that the current rank belongs to.
_PIPELINE_PARALLEL_GROUP = None
_PIPELINE_PARALLEL_RANKS = None
def initialize_model_parallel(
model_parallel_size_: int,
pipeline_length: int = 1,
*,
model_parallel_backend: Optional[str] = None,
pipeline_backend: Optional[str] = None,
ddp_backend: Optional[str] = None
) -> None:
"""
Initialize model data parallel groups.
Arguments:
model_parallel_size: number of GPUs used to parallelize model.
Let's say we have a total of 8 GPUs denoted by g0 ... g7 and we
use 2 GPUs to parallelize the model. The present function will
create 4 model parallel groups and 2 data parallel grous as:
4 model parallel groups:
[g0, g1], [g2, g3], [g4, g5], [g6, g7]
2 data parallel groups:
[g0, g2, g4, g6], [g1, g3, g5, g7]
Note that for efficiency, the caller should make sure adjacent ranks
are on the same DGX box. For example if we are using 2 DGX-1 boxes
with a total of 16 GPUs, rank 0 to 7 belong to the first box and
ranks 8 to 15 belong to the second box.
"""
# Get world size and rank. Ensure some consistencies.
assert torch.distributed.is_initialized()
world_size = torch.distributed.get_world_size()
model_parallel_size = int(min(model_parallel_size_, world_size))
ensure_divisibility(world_size, model_parallel_size)
ensure_divisibility(world_size, model_parallel_size * pipeline_length)
rank = torch.distributed.get_rank()
data_parallel_size = int(world_size / (model_parallel_size * pipeline_length))
if torch.distributed.get_rank() == 0:
print("> initializing model parallel with size {}".format(model_parallel_size_))
print("> initializing ddp with size {}".format(data_parallel_size))
print("> initializing pipeline with size {}".format(pipeline_length))
groups = torch.LongTensor(range(world_size)).reshape(data_parallel_size, pipeline_length, model_parallel_size)
found = torch.where(groups == rank)
assert all(len(x) == 1 for x in found)
found = [x[0] for x in found]
# Build the data parallel groups.
global _DATA_PARALLEL_GROUP
assert _DATA_PARALLEL_GROUP is None, "data parallel group is already initialized"
for j in range(pipeline_length):
for k in range(model_parallel_size):
group = torch.distributed.new_group(groups[:, j, k].tolist(), backend=ddp_backend)
if j == found[1] and k == found[2]:
_DATA_PARALLEL_GROUP = group
# Build the model parallel groups.
global _MODEL_PARALLEL_GROUP
assert _MODEL_PARALLEL_GROUP is None, "model parallel group is already initialized"
for i in range(data_parallel_size):
for j in range(pipeline_length):
group = torch.distributed.new_group(groups[i, j, :].tolist(), backend=model_parallel_backend)
if i == found[0] and j == found[1]:
_MODEL_PARALLEL_GROUP = group
global _PIPELINE_PARALLEL_GROUP
assert _PIPELINE_PARALLEL_GROUP is None, "model parallel group is already initialized"
global _PIPELINE_PARALLEL_RANKS
assert _PIPELINE_PARALLEL_RANKS is None, "model parallel group is already initialized"
for i in range(data_parallel_size):
for k in range(model_parallel_size):
ranks = groups[i, :, k].tolist()
group = torch.distributed.new_group(ranks, backend=pipeline_backend)
if i == found[0] and k == found[2]:
_PIPELINE_PARALLEL_GROUP = group
_PIPELINE_PARALLEL_RANKS = ranks
def model_parallel_is_initialized() -> bool:
"""Check if model and data parallel groups are initialized."""
if _MODEL_PARALLEL_GROUP is None or _DATA_PARALLEL_GROUP is None or _PIPELINE_PARALLEL_GROUP is None:
return False
return True
def get_model_parallel_group() -> torch.distributed.ProcessGroup:
"""Get the model parallel group the caller rank belongs to."""
assert _MODEL_PARALLEL_GROUP is not None, "model parallel group is not initialized"
return _MODEL_PARALLEL_GROUP
def get_data_parallel_group() -> torch.distributed.ProcessGroup:
"""Get the data parallel group the caller rank belongs to."""
assert _DATA_PARALLEL_GROUP is not None, "data parallel group is not initialized"
return _DATA_PARALLEL_GROUP
def get_pipeline_parallel_group() -> torch.distributed.ProcessGroup:
"""Get the pipeline parallel group the caller rank belongs to."""
assert _PIPELINE_PARALLEL_GROUP is not None, "pipeline parallel group is not initialized"
return _PIPELINE_PARALLEL_GROUP
def get_pipeline_parallel_ranks() -> List[int]:
"""Get the pipeline parallel group the caller rank belongs to."""
assert _PIPELINE_PARALLEL_RANKS is not None, "pipeline parallel group is not initialized"
return _PIPELINE_PARALLEL_RANKS
def get_model_parallel_world_size() -> int:
"""Return world size for the model parallel group."""
return torch.distributed.get_world_size(group=get_model_parallel_group())
def get_model_parallel_rank() -> int:
"""Return my rank for the model parallel group."""
return torch.distributed.get_rank(group=get_model_parallel_group())
def get_model_parallel_src_rank() -> int:
"""Calculate the global rank corresponding to a local rank zero
in the model parallel group."""
global_rank = torch.distributed.get_rank()
local_world_size = get_model_parallel_world_size()
return (global_rank // local_world_size) * local_world_size
def get_data_parallel_world_size() -> int:
"""Return world size for the data parallel group."""
return torch.distributed.get_world_size(group=get_data_parallel_group())
def get_data_parallel_rank() -> int:
"""Return my rank for the data parallel group."""
return torch.distributed.get_rank(group=get_data_parallel_group())
def destroy_model_parallel() -> None:
"""Set the groups to none."""
global _MODEL_PARALLEL_GROUP
_MODEL_PARALLEL_GROUP = None
global _DATA_PARALLEL_GROUP
_DATA_PARALLEL_GROUP = None
global _PIPELINE_PARALLEL_GROUP
_PIPELINE_PARALLEL_GROUP = None
global _PIPELINE_PARALLEL_RANKS
_PIPELINE_PARALLEL_RANKS = None
| 39.063492
| 114
| 0.72843
|
4bb9da9e4851217fef3fd9c0bfd2998b2cfdb97c
| 24,377
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_system_admin_user_dashboard.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 1
|
2020-01-22T13:11:23.000Z
|
2020-01-22T13:11:23.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_system_admin_user_dashboard.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_system_admin_user_dashboard.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
from __future__ import absolute_import, division, print_function
# Copyright 2019-2021 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_system_admin_user_dashboard
short_description: Custom dashboard widgets.
description:
- This module is able to configure a FortiManager device.
- Examples include all parameters and values which need to be adjusted to data sources before usage.
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Frank Shen (@fshen01)
- Hongbin Lu (@fgtdev-hblu)
notes:
- Running in workspace locking mode is supported in this FortiManager module, the top
level parameters workspace_locking_adom and workspace_locking_timeout help do the work.
- To create or update an object, use state present directive.
- To delete an object, use state absent directive.
- Normally, running one module can fail when a non-zero rc is returned. you can also override
the conditions to fail or succeed with parameters rc_failed and rc_succeeded
options:
enable_log:
description: Enable/Disable logging for task
required: false
type: bool
default: false
proposed_method:
description: The overridden method for the underlying Json RPC request
required: false
type: str
choices:
- update
- set
- add
bypass_validation:
description: only set to True when module schema diffs with FortiManager API structure, module continues to execute without validating parameters
required: false
type: bool
default: false
workspace_locking_adom:
description: the adom to lock for FortiManager running in workspace mode, the value can be global and others including root
required: false
type: str
workspace_locking_timeout:
description: the maximum time in seconds to wait for other user to release the workspace lock
required: false
type: int
default: 300
state:
description: the directive to create, update or delete an object
type: str
required: true
choices:
- present
- absent
rc_succeeded:
description: the rc codes list with which the conditions to succeed will be overriden
type: list
required: false
rc_failed:
description: the rc codes list with which the conditions to fail will be overriden
type: list
required: false
user:
description: the parameter (user) in requested url
type: str
required: true
system_admin_user_dashboard:
description: the top level parameters set
required: false
type: dict
suboptions:
column:
type: int
default: 0
description: 'Widgets column ID.'
diskio-content-type:
type: str
default: 'util'
description:
- 'Disk I/O Monitor widgets chart type.'
- 'util - bandwidth utilization.'
- 'iops - the number of I/O requests.'
- 'blks - the amount of data of I/O requests.'
choices:
- 'util'
- 'iops'
- 'blks'
diskio-period:
type: str
default: '1hour'
description:
- 'Disk I/O Monitor widgets data period.'
- '1hour - 1 hour.'
- '8hour - 8 hour.'
- '24hour - 24 hour.'
choices:
- '1hour'
- '8hour'
- '24hour'
log-rate-period:
type: str
description:
- 'Log receive monitor widgets data period.'
- '2min - 2 minutes.'
- '1hour - 1 hour.'
- '6hours - 6 hours.'
choices:
- '2min'
- '1hour'
- '6hours'
log-rate-topn:
type: str
default: '5'
description:
- 'Log receive monitor widgets number of top items to display.'
- '1 - Top 1.'
- '2 - Top 2.'
- '3 - Top 3.'
- '4 - Top 4.'
- '5 - Top 5.'
choices:
- '1'
- '2'
- '3'
- '4'
- '5'
log-rate-type:
type: str
default: 'device'
description:
- 'Log receive monitor widgets statistics breakdown options.'
- 'log - Show log rates for each log type.'
- 'device - Show log rates for each device.'
choices:
- 'log'
- 'device'
moduleid:
type: int
default: 0
description: 'Widget ID.'
name:
type: str
description: 'Widget name.'
num-entries:
type: int
default: 10
description: 'Number of entries.'
refresh-interval:
type: int
default: 300
description: 'Widgets refresh interval.'
res-cpu-display:
type: str
default: 'average'
description:
- 'Widgets CPU display type.'
- 'average - Average usage of CPU.'
- 'each - Each usage of CPU.'
choices:
- 'average'
- 'each'
res-period:
type: str
description:
- 'Widgets data period.'
- '10min - Last 10 minutes.'
- 'hour - Last hour.'
- 'day - Last day.'
choices:
- '10min'
- 'hour'
- 'day'
res-view-type:
type: str
description:
- 'Widgets data view type.'
- 'real-time - Real-time view.'
- 'history - History view.'
choices:
- 'real-time'
- 'history'
status:
type: str
default: 'open'
description:
- 'Widgets opened/closed state.'
- 'close - Widget closed.'
- 'open - Widget opened.'
choices:
- 'close'
- 'open'
tabid:
type: int
default: 0
description: 'ID of tab where widget is displayed.'
time-period:
type: str
default: '1hour'
description:
- 'Log Database Monitor widgets data period.'
- '1hour - 1 hour.'
- '8hour - 8 hour.'
- '24hour - 24 hour.'
choices:
- '1hour'
- '8hour'
- '24hour'
widget-type:
type: str
description:
- 'Widget type.'
- 'top-lograte - Log Receive Monitor.'
- 'sysres - System resources.'
- 'sysinfo - System Information.'
- 'licinfo - License Information.'
- 'jsconsole - CLI Console.'
- 'sysop - Unit Operation.'
- 'alert - Alert Message Console.'
- 'statistics - Statistics.'
- 'rpteng - Report Engine.'
- 'raid - Disk Monitor.'
- 'logrecv - Logs/Data Received.'
- 'devsummary - Device Summary.'
- 'logdb-perf - Log Database Performance Monitor.'
- 'logdb-lag - Log Database Lag Time.'
- 'disk-io - Disk I/O.'
- 'log-rcvd-fwd - Log receive and forwarding Monitor.'
choices:
- 'top-lograte'
- 'sysres'
- 'sysinfo'
- 'licinfo'
- 'jsconsole'
- 'sysop'
- 'alert'
- 'statistics'
- 'rpteng'
- 'raid'
- 'logrecv'
- 'devsummary'
- 'logdb-perf'
- 'logdb-lag'
- 'disk-io'
- 'log-rcvd-fwd'
'''
EXAMPLES = '''
- name: gathering fortimanager facts
hosts: fortimanager00
gather_facts: no
connection: httpapi
collections:
- fortinet.fortimanager
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: retrieve all the dashboard widgets
fmgr_fact:
facts:
selector: 'system_admin_user_dashboard'
params:
user: 'ansible-test'
dashboard: ''
- hosts: fortimanager00
collections:
- fortinet.fortimanager
connection: httpapi
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: Custom dashboard widgets.
fmgr_system_admin_user_dashboard:
bypass_validation: False
user: ansible-test
state: present
system_admin_user_dashboard:
column: 1
diskio-content-type: util #<value in [util, iops, blks]>
diskio-period: 1hour #<value in [1hour, 8hour, 24hour]>
log-rate-period: 1hour #<value in [2min , 1hour, 6hours]>
log-rate-topn: 5 #<value in [1, 2, 3, ...]>
log-rate-type: device #<value in [log, device]>
moduleid: 10
name: ansible-test-dashboard
num-entries: 10
refresh-interval: 0
res-cpu-display: 'each' #<value in [average , each]>
res-period: 10min #<value in [10min , hour, day]>
res-view-type: history #<value in [real-time , history]>
status: open #<value in [close, open]>
tabid: 1
time-period: 1hour #<value in [1hour, 8hour, 24hour]>
widget-type: sysres #<value in [top-lograte, sysres, sysinfo, ...]>
'''
RETURN = '''
request_url:
description: The full url requested
returned: always
type: str
sample: /sys/login/user
response_code:
description: The status of api request
returned: always
type: int
sample: 0
response_message:
description: The descriptive message of the api response
type: str
returned: always
sample: OK.
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import NAPIManager
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_galaxy_version
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_parameter_bypass
def main():
jrpc_urls = [
'/cli/global/system/admin/user/{user}/dashboard'
]
perobject_jrpc_urls = [
'/cli/global/system/admin/user/{user}/dashboard/{dashboard}'
]
url_params = ['user']
module_primary_key = 'moduleid'
module_arg_spec = {
'enable_log': {
'type': 'bool',
'required': False,
'default': False
},
'forticloud_access_token': {
'type': 'str',
'required': False,
'no_log': True
},
'proposed_method': {
'type': 'str',
'required': False,
'choices': [
'set',
'update',
'add'
]
},
'bypass_validation': {
'type': 'bool',
'required': False,
'default': False
},
'workspace_locking_adom': {
'type': 'str',
'required': False
},
'workspace_locking_timeout': {
'type': 'int',
'required': False,
'default': 300
},
'rc_succeeded': {
'required': False,
'type': 'list'
},
'rc_failed': {
'required': False,
'type': 'list'
},
'state': {
'type': 'str',
'required': True,
'choices': [
'present',
'absent'
]
},
'user': {
'required': True,
'type': 'str'
},
'system_admin_user_dashboard': {
'required': False,
'type': 'dict',
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'options': {
'column': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'diskio-content-type': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'util',
'iops',
'blks'
],
'type': 'str'
},
'diskio-period': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'1hour',
'8hour',
'24hour'
],
'type': 'str'
},
'log-rate-period': {
'required': False,
'choices': [
'2min',
'1hour',
'6hours'
],
'type': 'str'
},
'log-rate-topn': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'1',
'2',
'3',
'4',
'5'
],
'type': 'str'
},
'log-rate-type': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'log',
'device'
],
'type': 'str'
},
'moduleid': {
'required': True,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'name': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'num-entries': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'refresh-interval': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'res-cpu-display': {
'required': False,
'choices': [
'average',
'each'
],
'type': 'str'
},
'res-period': {
'required': False,
'choices': [
'10min',
'hour',
'day'
],
'type': 'str'
},
'res-view-type': {
'required': False,
'choices': [
'real-time',
'history'
],
'type': 'str'
},
'status': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'close',
'open'
],
'type': 'str'
},
'tabid': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'time-period': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'1hour',
'8hour',
'24hour'
],
'type': 'str'
},
'widget-type': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'top-lograte',
'sysres',
'sysinfo',
'licinfo',
'jsconsole',
'sysop',
'alert',
'statistics',
'rpteng',
'raid',
'logrecv',
'devsummary',
'logdb-perf',
'logdb-lag',
'disk-io',
'log-rcvd-fwd'
],
'type': 'str'
}
}
}
}
params_validation_blob = []
check_galaxy_version(module_arg_spec)
module = AnsibleModule(argument_spec=check_parameter_bypass(module_arg_spec, 'system_admin_user_dashboard'),
supports_check_mode=False)
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
connection.set_option('enable_log', module.params['enable_log'] if 'enable_log' in module.params else False)
connection.set_option('forticloud_access_token',
module.params['forticloud_access_token'] if 'forticloud_access_token' in module.params else None)
fmgr = NAPIManager(jrpc_urls, perobject_jrpc_urls, module_primary_key, url_params, module, connection, top_level_schema_name='data')
fmgr.validate_parameters(params_validation_blob)
fmgr.process_curd(argument_specs=module_arg_spec)
else:
module.fail_json(msg='MUST RUN IN HTTPAPI MODE')
module.exit_json(meta=module.params)
if __name__ == '__main__':
main()
| 33.809986
| 153
| 0.386963
|
2eff54e16f9c4e80c20483646bd08697f92439ee
| 1,409
|
py
|
Python
|
Floyd/2610.py
|
kjh9267/BOJ_Python
|
b4d2ae09c252cc9280df93ccecbd07880947827e
|
[
"Apache-2.0"
] | null | null | null |
Floyd/2610.py
|
kjh9267/BOJ_Python
|
b4d2ae09c252cc9280df93ccecbd07880947827e
|
[
"Apache-2.0"
] | null | null | null |
Floyd/2610.py
|
kjh9267/BOJ_Python
|
b4d2ae09c252cc9280df93ccecbd07880947827e
|
[
"Apache-2.0"
] | null | null | null |
import sys
from collections import deque
def bfs():
queue = deque()
visit = [0 for _ in range(n)]
for u in range(n):
if visit[u] is 0:
queue.append(u)
team = [u]
while queue:
x = queue.popleft()
for i, j in enumerate(graph[x]):
if i is u:
continue
if j is 1 and visit[i] is 0:
queue.append(i)
visit[i] = 1
team.append(i)
teams.append(team)
n = int(sys.stdin.readline())
m = int(sys.stdin.readline())
inf = float('inf')
graph = [[inf for __ in range(n)] for _ in range(n)]
teams = []
for _ in range(m):
a, b = map(int,sys.stdin.readline().split())
graph[a-1][b-1] = 1
graph[b-1][a-1] = 1
bfs()
for v in range(n):
for i in range(n):
for j in range(n):
if i == j:
continue
if graph[i][j] > graph[i][v] + graph[v][j]:
graph[i][j] = graph[i][v] + graph[v][j]
for i in range(n):
for j in range(n):
if graph[i][j] == inf:
graph[i][j] = 0
res = [[max(graph[j]) for j in i if graph[j]] for i in teams]
output = []
print(len(teams))
for i in range(len(teams)):
x = min(res[i])
output.append(teams[i][res[i].index(x)] + 1)
for i in sorted(output):
print(i)
| 23.483333
| 61
| 0.466288
|
59ce93c845a797427c1c062f74e2ee733548112a
| 470
|
py
|
Python
|
problems/rotate-image.py
|
sailikhithk/tech-interview-prep
|
e833764cf98915d56118bddfa0e01871c58de75e
|
[
"Apache-2.0"
] | null | null | null |
problems/rotate-image.py
|
sailikhithk/tech-interview-prep
|
e833764cf98915d56118bddfa0e01871c58de75e
|
[
"Apache-2.0"
] | null | null | null |
problems/rotate-image.py
|
sailikhithk/tech-interview-prep
|
e833764cf98915d56118bddfa0e01871c58de75e
|
[
"Apache-2.0"
] | null | null | null |
class Solution(object):
def rotate(self, matrix):
N = len(matrix)
#transpose
for i in xrange(N):
for j in xrange(N):
if j<=i: continue
matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]
#reflect
for i in xrange(N):
for j in xrange(N/2):
matrix[i][j], matrix[i][N-1-j] = matrix[i][N-1-j], matrix[i][j]
return matrix
| 29.375
| 79
| 0.444681
|
dc37a1ca4a7bfe34b812b0162ff15dc6e1540872
| 3,884
|
py
|
Python
|
virtual_env/libs/mysql-connector/python2/examples/transaction.py
|
bopopescu/fantastico
|
7c95f244f0cf0239ac5408146612dd72f88d35ea
|
[
"MIT"
] | 2
|
2016-12-18T02:42:25.000Z
|
2018-01-30T16:32:29.000Z
|
virtual_env/libs/mysql-connector/python2/examples/transaction.py
|
bopopescu/fantastico
|
7c95f244f0cf0239ac5408146612dd72f88d35ea
|
[
"MIT"
] | 1
|
2020-07-28T07:16:35.000Z
|
2020-07-28T07:16:35.000Z
|
virtual_env/libs/mysql-connector/python2/examples/transaction.py
|
bopopescu/fantastico
|
7c95f244f0cf0239ac5408146612dd72f88d35ea
|
[
"MIT"
] | 1
|
2020-07-24T05:55:28.000Z
|
2020-07-24T05:55:28.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import sys, os
import mysql.connector
"""
Example using MySQL Connector/Python showing:
* dropping and creating a table
* using warnings
* doing a transaction, rolling it back and committing one.
"""
def main(config):
output = []
db = mysql.connector.Connect(**config)
cursor = db.cursor()
# Drop table if exists, and create it new
stmt_drop = "DROP TABLE IF EXISTS names"
cursor.execute(stmt_drop)
stmt_create = """
CREATE TABLE names (
id TINYINT UNSIGNED NOT NULL AUTO_INCREMENT,
name VARCHAR(30) DEFAULT '' NOT NULL,
cnt TINYINT UNSIGNED DEFAULT 0,
PRIMARY KEY (id)
) ENGINE=InnoDB"""
cursor.execute(stmt_create)
warnings = cursor.fetchwarnings()
if warnings:
ids = [ i for l,i,m in warnings]
output.append("Oh oh.. we got warnings..")
if 1266L in ids:
output.append("""
Table was created as MYISAM, no transaction support.
Bailing out, no use to continue. Make sure InnoDB is available!
""")
db.close()
return
# Insert 3 records
output.append("Inserting data")
names = ( ('Geert',), ('Jan',), ('Michel',) )
stmt_insert = "INSERT INTO names (name) VALUES (%s)"
cursor.executemany(stmt_insert, names)
# Roll back!!!!
output.append("Rolling back transaction")
db.rollback()
# There should be no data!
stmt_select = "SELECT id, name FROM names ORDER BY id"
cursor.execute(stmt_select)
rows = None
try:
rows = cursor.fetchall()
except (mysql.connector.errors.InterfaceError), e:
raise
if rows == []:
output.append("No data, all is fine.")
else:
output.append("Something is wrong, we have data although we rolled back!")
output.append([repr(r) for r in rows])
raise
# Do the insert again.
cursor.executemany(stmt_insert, names)
# Data should be already there
cursor.execute(stmt_select)
output.append("Data before commit:")
for row in cursor.fetchall():
output.append("%d | %s" % (row[0], row[1]))
# Do a commit
db.commit()
cursor.execute(stmt_select)
output.append("Data after commit:")
for row in cursor.fetchall():
output.append("%d | %s" % (row[0], row[1]))
# Cleaning up, dropping the table again
cursor.execute(stmt_drop)
db.close()
return output
if __name__ == '__main__':
#
# Configure MySQL login and database to use in config.py
#
from config import Config
config = Config.dbinfo().copy()
out = main(config)
print '\n'.join(out)
| 30.34375
| 82
| 0.650618
|
b8a60bad97cc2df66e1f58aeaa3e38a8ccfc603a
| 1,323
|
py
|
Python
|
imagededup/handlers/search/brute_force.py
|
agarwalutkarsh554/imagededup
|
8cf82f2f59b051d61f90b6366a1f831d97a564be
|
[
"Apache-2.0"
] | 4,100
|
2019-09-30T14:34:05.000Z
|
2022-03-31T15:45:20.000Z
|
imagededup/handlers/search/brute_force.py
|
agarwalutkarsh554/imagededup
|
8cf82f2f59b051d61f90b6366a1f831d97a564be
|
[
"Apache-2.0"
] | 112
|
2019-10-01T14:08:35.000Z
|
2022-03-25T01:59:19.000Z
|
imagededup/handlers/search/brute_force.py
|
agarwalutkarsh554/imagededup
|
8cf82f2f59b051d61f90b6366a1f831d97a564be
|
[
"Apache-2.0"
] | 380
|
2019-10-04T13:02:28.000Z
|
2022-03-31T09:41:11.000Z
|
from typing import Callable, Dict
class BruteForce:
"""
Class to perform search using a Brute force.
"""
def __init__(self, hash_dict: Dict, distance_function: Callable) -> None:
"""
Initialize a dictionary for mapping file names and corresponding hashes and a distance function to be used for
getting distance between two hash strings.
Args:
hash_dict: Dictionary mapping file names to corresponding hash strings {filename: hash}
distance_function: A function for calculating distance between the hashes.
"""
self.distance_function = distance_function
self.hash_dict = hash_dict # database
def search(self, query: str, tol: int = 10) -> Dict[str, int]:
"""
Function for searching using brute force.
Args:
query: hash string for which brute force needs to work.
tol: distance upto which duplicate is valid.
Returns:
List of tuples of the form [(valid_retrieval_filename1: distance), (valid_retrieval_filename2: distance)]
"""
return [
(item, self.distance_function(query, self.hash_dict[item]))
for item in self.hash_dict
if self.distance_function(query, self.hash_dict[item]) <= tol
]
| 35.756757
| 118
| 0.641723
|
065198c83e57a04b235d33595a23d8e4706502f6
| 35,492
|
py
|
Python
|
pysnmp/ALCATEL-IND1-SLB-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11
|
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/ALCATEL-IND1-SLB-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75
|
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/ALCATEL-IND1-SLB-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module ALCATEL-IND1-SLB-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ALCATEL-IND1-SLB-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:03:47 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
softentIND1Slb, = mibBuilder.importSymbols("ALCATEL-IND1-BASE", "softentIND1Slb")
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsUnion, ConstraintsIntersection, ValueRangeConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueRangeConstraint", "ValueSizeConstraint")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ObjectGroup", "ModuleCompliance")
Counter32, Unsigned32, TimeTicks, ObjectIdentity, Bits, MibIdentifier, NotificationType, Counter64, Gauge32, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, IpAddress, iso = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "Unsigned32", "TimeTicks", "ObjectIdentity", "Bits", "MibIdentifier", "NotificationType", "Counter64", "Gauge32", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "IpAddress", "iso")
DisplayString, TextualConvention, MacAddress, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention", "MacAddress", "RowStatus")
alcatelIND1SLBMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1))
alcatelIND1SLBMIB.setRevisions(('2010-05-13 00:00',))
if mibBuilder.loadTexts: alcatelIND1SLBMIB.setLastUpdated('201005130000Z')
if mibBuilder.loadTexts: alcatelIND1SLBMIB.setOrganization('Alcatel-Lucent')
alcatelIND1SLBMIBNotifications = ObjectIdentity((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 0))
if mibBuilder.loadTexts: alcatelIND1SLBMIBNotifications.setStatus('current')
alcatelIND1SLBMIBObjects = ObjectIdentity((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1))
if mibBuilder.loadTexts: alcatelIND1SLBMIBObjects.setStatus('current')
alcatelIND1SLBMIBConformance = ObjectIdentity((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 2))
if mibBuilder.loadTexts: alcatelIND1SLBMIBConformance.setStatus('current')
alcatelIND1SLBMIBGroups = ObjectIdentity((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 2, 1))
if mibBuilder.loadTexts: alcatelIND1SLBMIBGroups.setStatus('current')
alcatelIND1SLBMIBCompliances = ObjectIdentity((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 2, 2))
if mibBuilder.loadTexts: alcatelIND1SLBMIBCompliances.setStatus('current')
class SlbAdminState(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("disable", 1), ("enable", 2))
class SlbOperState(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("outOfService", 1), ("inService", 2))
class SlbRedirectAlgorithm(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("roundRobin", 1), ("serverFailover", 2))
class SlbHashSelector(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))
namedValues = NamedValues(("useEcmp", 1), ("dip", 2), ("sip", 3), ("sipAndDip", 4), ("sipDipL4sportL4dport", 5), ("dipSipL4dportL4sport", 6), ("sipL4sport", 7), ("dipL4dport", 8))
class SlbServerOperState(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("inService", 1), ("linkDown", 2), ("noAnswer", 3), ("disabled", 4), ("retrying", 5), ("discovery", 6))
slbFeature = MibIdentifier((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 1))
slbAdminStatus = MibScalar((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 1, 1), SlbAdminState().clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: slbAdminStatus.setStatus('current')
slbOperStatus = MibScalar((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 1, 2), SlbOperState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slbOperStatus.setStatus('current')
slbClustersCount = MibScalar((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 1, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slbClustersCount.setStatus('current')
slbResetStatistics = MibScalar((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("notSignificant", 0), ("resetSlbStats", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: slbResetStatistics.setStatus('current')
slbClusters = MibIdentifier((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 2))
slbClusterTable = MibTable((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 2, 1), )
if mibBuilder.loadTexts: slbClusterTable.setStatus('current')
slbClusterTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 2, 1, 1), ).setIndexNames((0, "ALCATEL-IND1-SLB-MIB", "slbClusterName"))
if mibBuilder.loadTexts: slbClusterTableEntry.setStatus('current')
slbClusterName = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 2, 1, 1, 1), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 23)))
if mibBuilder.loadTexts: slbClusterName.setStatus('current')
slbClusterAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 2, 1, 1, 2), SlbAdminState().clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slbClusterAdminStatus.setStatus('current')
slbClusterOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 2, 1, 1, 3), SlbOperState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slbClusterOperStatus.setStatus('current')
slbClusterVIP = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 2, 1, 1, 4), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slbClusterVIP.setStatus('current')
slbClusterPingPeriod = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 2, 1, 1, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 3600)).clone(60)).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: slbClusterPingPeriod.setStatus('current')
slbClusterPingTimeout = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 2, 1, 1, 6), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1000, 3600000)).clone(3000)).setUnits('milliseconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: slbClusterPingTimeout.setStatus('current')
slbClusterPingRetries = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 2, 1, 1, 7), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 255)).clone(3)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slbClusterPingRetries.setStatus('current')
slbClusterRedirectAlgorithm = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 2, 1, 1, 8), SlbRedirectAlgorithm().clone('roundRobin')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slbClusterRedirectAlgorithm.setStatus('current')
slbClusterIdleTimer = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 2, 1, 1, 9), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 86400)).clone(1200)).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: slbClusterIdleTimer.setStatus('current')
slbClusterNumberOfServers = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 2, 1, 1, 10), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slbClusterNumberOfServers.setStatus('current')
slbClusterNewFlows = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 2, 1, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slbClusterNewFlows.setStatus('current')
slbClusterRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 2, 1, 1, 12), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slbClusterRowStatus.setStatus('current')
slbClusterProbeName = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 2, 1, 1, 13), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 23))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slbClusterProbeName.setStatus('current')
slbClusterPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 2, 1, 1, 14), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slbClusterPackets.setStatus('current')
slbClusterCondition = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 2, 1, 1, 15), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 23))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slbClusterCondition.setStatus('current')
slbClusterType = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 2, 1, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("l3", 1), ("l2", 2))).clone('l3')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slbClusterType.setStatus('current')
slbClusterHashType = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 2, 1, 1, 17), SlbHashSelector().clone('useEcmp')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slbClusterHashType.setStatus('current')
slbServers = MibIdentifier((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 3))
slbServerTable = MibTable((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 3, 1), )
if mibBuilder.loadTexts: slbServerTable.setStatus('current')
slbServerTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 3, 1, 1), ).setIndexNames((0, "ALCATEL-IND1-SLB-MIB", "slbServerClusterName"), (0, "ALCATEL-IND1-SLB-MIB", "slbServerIpAddress"))
if mibBuilder.loadTexts: slbServerTableEntry.setStatus('current')
slbServerClusterName = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 3, 1, 1, 1), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 23)))
if mibBuilder.loadTexts: slbServerClusterName.setStatus('current')
slbServerIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 3, 1, 1, 2), IpAddress())
if mibBuilder.loadTexts: slbServerIpAddress.setStatus('current')
slbServerAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 3, 1, 1, 3), SlbAdminState().clone('enable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slbServerAdminStatus.setStatus('current')
slbServerOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 3, 1, 1, 4), SlbServerOperState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slbServerOperStatus.setStatus('current')
slbServerAdminWeight = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 3, 1, 1, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 100)).clone(10)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slbServerAdminWeight.setStatus('current')
slbServerUpTime = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 3, 1, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slbServerUpTime.setStatus('current')
slbServerLastRTT = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 3, 1, 1, 7), Integer32()).setUnits('milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: slbServerLastRTT.setStatus('current')
slbServerPingFails = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 3, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slbServerPingFails.setStatus('current')
slbServerPortDown = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 3, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slbServerPortDown.setStatus('current')
slbServerFlows = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 3, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slbServerFlows.setStatus('current')
slbServerRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 3, 1, 1, 11), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slbServerRowStatus.setStatus('current')
slbServerProbeName = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 3, 1, 1, 12), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 23))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slbServerProbeName.setStatus('current')
slbServerProbeStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 3, 1, 1, 13), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: slbServerProbeStatus.setStatus('current')
slbProbes = MibIdentifier((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 4))
slbProbeTable = MibTable((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 4, 1), )
if mibBuilder.loadTexts: slbProbeTable.setStatus('current')
slbProbeTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 4, 1, 1), ).setIndexNames((0, "ALCATEL-IND1-SLB-MIB", "slbProbeName"))
if mibBuilder.loadTexts: slbProbeTableEntry.setStatus('current')
slbProbeName = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 4, 1, 1, 1), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 23)))
if mibBuilder.loadTexts: slbProbeName.setStatus('current')
slbProbeMethod = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 4, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))).clone(namedValues=NamedValues(("ping", 1), ("http", 2), ("https", 3), ("udp", 4), ("tcp", 5), ("ftp", 6), ("smtp", 7), ("pop", 8), ("pops", 9), ("imap", 10), ("imaps", 11), ("nntp", 12))).clone('ping')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slbProbeMethod.setStatus('current')
slbProbePeriod = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 4, 1, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 3600)).clone(60)).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: slbProbePeriod.setStatus('current')
slbProbeTimeout = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 4, 1, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1000, 3600000)).clone(3000)).setUnits('milliseconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: slbProbeTimeout.setStatus('current')
slbProbeRetries = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 4, 1, 1, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 255)).clone(3)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slbProbeRetries.setStatus('current')
slbProbePort = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 4, 1, 1, 6), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slbProbePort.setStatus('current')
slbProbeExpect = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 4, 1, 1, 7), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slbProbeExpect.setStatus('current')
slbProbeSSL = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 4, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slbProbeSSL.setStatus('current')
slbProbeSend = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 4, 1, 1, 9), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slbProbeSend.setStatus('current')
slbProbeHttpStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 4, 1, 1, 10), Integer32().clone(200)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slbProbeHttpStatus.setStatus('current')
slbProbeHttpUrl = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 4, 1, 1, 11), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slbProbeHttpUrl.setStatus('current')
slbProbeHttpUsername = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 4, 1, 1, 12), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slbProbeHttpUsername.setStatus('current')
slbProbeHttpPassword = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 4, 1, 1, 13), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slbProbeHttpPassword.setStatus('current')
slbProbeRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 4, 1, 1, 14), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: slbProbeRowStatus.setStatus('current')
slbStats = MibIdentifier((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 5))
slbStatsTable = MibTable((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 5, 1), )
if mibBuilder.loadTexts: slbStatsTable.setStatus('current')
slbStatsTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 5, 1, 1), ).setIndexNames((0, "ALCATEL-IND1-SLB-MIB", "slbStatsClusterName"), (0, "ALCATEL-IND1-SLB-MIB", "slbStatsIndex"))
if mibBuilder.loadTexts: slbStatsTableEntry.setStatus('current')
slbStatsClusterName = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 5, 1, 1, 1), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 23)))
if mibBuilder.loadTexts: slbStatsClusterName.setStatus('current')
slbStatsIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 5, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2000)))
if mibBuilder.loadTexts: slbStatsIndex.setStatus('current')
slbStatsCounter = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 5, 1, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slbStatsCounter.setStatus('current')
slbStatsQual = MibIdentifier((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 6))
slbStatsQualTable = MibTable((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 5, 2), )
if mibBuilder.loadTexts: slbStatsQualTable.setStatus('current')
slbStatsQualTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 5, 2, 1), ).setIndexNames((0, "ALCATEL-IND1-SLB-MIB", "slbStatsClusterName"), (0, "ALCATEL-IND1-SLB-MIB", "slbStatsIndex"), (0, "ALCATEL-IND1-SLB-MIB", "slbStatsQualType"))
if mibBuilder.loadTexts: slbStatsQualTableEntry.setStatus('current')
slbStatsQualType = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 5, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22))).clone(namedValues=NamedValues(("dstIp", 1), ("srcIp", 2), ("srcPort", 3), ("srcPortGroup", 4), ("srcVlan", 5), ("ipProtocol", 6), ("dstIpPort", 7), ("srcIpPort", 8), ("dstIpTcpPort", 9), ("srcIpTcpPort", 10), ("dstIpUdpPort", 11), ("srcIpUdpPort", 12), ("srcMac", 13), ("dstMac", 14), ("d8021p", 15), ("ethertype", 16), ("icmpType", 17), ("icmpCode", 18), ("tcpFlags", 19), ("tos", 20), ("dstPort", 21), ("dstPortGroup", 22))))
if mibBuilder.loadTexts: slbStatsQualType.setStatus('current')
slbStatsQualDataIp = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 5, 2, 1, 2), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slbStatsQualDataIp.setStatus('current')
slbStatsQualDataIpMask = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 5, 2, 1, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slbStatsQualDataIpMask.setStatus('current')
slbStatsQualDataSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 5, 2, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: slbStatsQualDataSlot.setStatus('current')
slbStatsQualDataStartPort = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 5, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: slbStatsQualDataStartPort.setStatus('current')
slbStatsQualDataEndPort = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 5, 2, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: slbStatsQualDataEndPort.setStatus('current')
slbStatsQualDataIpProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 5, 2, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: slbStatsQualDataIpProtocol.setStatus('current')
slbStatsQualDataVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 5, 2, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094))).setMaxAccess("readonly")
if mibBuilder.loadTexts: slbStatsQualDataVlan.setStatus('current')
slbStatsQualDataL4Port = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 5, 2, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: slbStatsQualDataL4Port.setStatus('current')
slbStatsQualDataMac = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 5, 2, 1, 10), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slbStatsQualDataMac.setStatus('current')
slbStatsQualDataMacMask = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 5, 2, 1, 11), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slbStatsQualDataMacMask.setStatus('current')
slbStatsQualDataEthertype = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 5, 2, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: slbStatsQualDataEthertype.setStatus('current')
slbStatsQualDataIcmpData = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 5, 2, 1, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: slbStatsQualDataIcmpData.setStatus('current')
slbStatsQualDataTcpFlags = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 5, 2, 1, 14), OctetString().subtype(subtypeSpec=ValueSizeConstraint(3, 3)).setFixedLength(3)).setMaxAccess("readonly")
if mibBuilder.loadTexts: slbStatsQualDataTcpFlags.setStatus('current')
slbStatsQualDataTos = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 5, 2, 1, 15), OctetString().subtype(subtypeSpec=ValueSizeConstraint(2, 2)).setFixedLength(2)).setMaxAccess("readonly")
if mibBuilder.loadTexts: slbStatsQualDataTos.setStatus('current')
slbStatsQualData8021p = MibTableColumn((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 5, 2, 1, 16), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: slbStatsQualData8021p.setStatus('current')
slbTrapsObj = MibIdentifier((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 7))
slbTrapException = NotificationType((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 0, 1)).setObjects(("ALCATEL-IND1-SLB-MIB", "slbTrapInfoException"))
if mibBuilder.loadTexts: slbTrapException.setStatus('current')
slbTrapConfigChanged = NotificationType((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 0, 2)).setObjects(("ALCATEL-IND1-SLB-MIB", "slbTrapInfoEntityGroup"), ("ALCATEL-IND1-SLB-MIB", "slbTrapInfoClusterName"), ("ALCATEL-IND1-SLB-MIB", "slbTrapInfoServerIpAddr"))
if mibBuilder.loadTexts: slbTrapConfigChanged.setStatus('current')
slbTrapOperStatus = NotificationType((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 0, 3)).setObjects(("ALCATEL-IND1-SLB-MIB", "slbTrapInfoEntityGroup"), ("ALCATEL-IND1-SLB-MIB", "slbTrapInfoOperStatus"), ("ALCATEL-IND1-SLB-MIB", "slbTrapInfoClusterName"), ("ALCATEL-IND1-SLB-MIB", "slbTrapInfoServerIpAddr"))
if mibBuilder.loadTexts: slbTrapOperStatus.setStatus('current')
slbTrapInfoClusterName = MibScalar((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 7, 1), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 23))).setMaxAccess("readonly")
if mibBuilder.loadTexts: slbTrapInfoClusterName.setStatus('current')
slbTrapInfoOperStatus = MibScalar((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 7, 2), SlbOperState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slbTrapInfoOperStatus.setStatus('current')
slbTrapInfoServerIpAddr = MibScalar((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 7, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slbTrapInfoServerIpAddr.setStatus('current')
slbTrapInfoEntityGroup = MibScalar((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 7, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("slb", 1), ("cluster", 2), ("server", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: slbTrapInfoEntityGroup.setStatus('current')
slbTrapInfoException = MibScalar((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 1, 7, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: slbTrapInfoException.setStatus('current')
alcatelIND1SLBMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 2, 2, 1)).setObjects(("ALCATEL-IND1-SLB-MIB", "slbFeatureGroup"), ("ALCATEL-IND1-SLB-MIB", "slbClustersGroup"), ("ALCATEL-IND1-SLB-MIB", "slbServersGroup"), ("ALCATEL-IND1-SLB-MIB", "slbProbesGroup"), ("ALCATEL-IND1-SLB-MIB", "slbTrapsGroup"), ("ALCATEL-IND1-SLB-MIB", "slbTrapsObjGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
alcatelIND1SLBMIBCompliance = alcatelIND1SLBMIBCompliance.setStatus('current')
slbFeatureGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 2, 1, 1)).setObjects(("ALCATEL-IND1-SLB-MIB", "slbAdminStatus"), ("ALCATEL-IND1-SLB-MIB", "slbOperStatus"), ("ALCATEL-IND1-SLB-MIB", "slbClustersCount"), ("ALCATEL-IND1-SLB-MIB", "slbResetStatistics"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
slbFeatureGroup = slbFeatureGroup.setStatus('current')
slbClustersGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 2, 1, 2)).setObjects(("ALCATEL-IND1-SLB-MIB", "slbClusterAdminStatus"), ("ALCATEL-IND1-SLB-MIB", "slbClusterOperStatus"), ("ALCATEL-IND1-SLB-MIB", "slbClusterVIP"), ("ALCATEL-IND1-SLB-MIB", "slbClusterPingPeriod"), ("ALCATEL-IND1-SLB-MIB", "slbClusterPingTimeout"), ("ALCATEL-IND1-SLB-MIB", "slbClusterPingRetries"), ("ALCATEL-IND1-SLB-MIB", "slbClusterRedirectAlgorithm"), ("ALCATEL-IND1-SLB-MIB", "slbClusterIdleTimer"), ("ALCATEL-IND1-SLB-MIB", "slbClusterNumberOfServers"), ("ALCATEL-IND1-SLB-MIB", "slbClusterNewFlows"), ("ALCATEL-IND1-SLB-MIB", "slbClusterRowStatus"), ("ALCATEL-IND1-SLB-MIB", "slbClusterProbeName"), ("ALCATEL-IND1-SLB-MIB", "slbClusterPackets"), ("ALCATEL-IND1-SLB-MIB", "slbClusterCondition"), ("ALCATEL-IND1-SLB-MIB", "slbClusterType"), ("ALCATEL-IND1-SLB-MIB", "slbClusterHashType"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
slbClustersGroup = slbClustersGroup.setStatus('current')
slbServersGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 2, 1, 3)).setObjects(("ALCATEL-IND1-SLB-MIB", "slbServerAdminStatus"), ("ALCATEL-IND1-SLB-MIB", "slbServerOperStatus"), ("ALCATEL-IND1-SLB-MIB", "slbServerAdminWeight"), ("ALCATEL-IND1-SLB-MIB", "slbServerUpTime"), ("ALCATEL-IND1-SLB-MIB", "slbServerLastRTT"), ("ALCATEL-IND1-SLB-MIB", "slbServerPingFails"), ("ALCATEL-IND1-SLB-MIB", "slbServerFlows"), ("ALCATEL-IND1-SLB-MIB", "slbServerRowStatus"), ("ALCATEL-IND1-SLB-MIB", "slbServerProbeName"), ("ALCATEL-IND1-SLB-MIB", "slbServerProbeStatus"), ("ALCATEL-IND1-SLB-MIB", "slbServerPortDown"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
slbServersGroup = slbServersGroup.setStatus('current')
slbTrapsGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 2, 1, 4)).setObjects(("ALCATEL-IND1-SLB-MIB", "slbTrapException"), ("ALCATEL-IND1-SLB-MIB", "slbTrapConfigChanged"), ("ALCATEL-IND1-SLB-MIB", "slbTrapOperStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
slbTrapsGroup = slbTrapsGroup.setStatus('current')
slbProbesGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 2, 1, 5)).setObjects(("ALCATEL-IND1-SLB-MIB", "slbProbeMethod"), ("ALCATEL-IND1-SLB-MIB", "slbProbePeriod"), ("ALCATEL-IND1-SLB-MIB", "slbProbeTimeout"), ("ALCATEL-IND1-SLB-MIB", "slbProbeRetries"), ("ALCATEL-IND1-SLB-MIB", "slbProbePort"), ("ALCATEL-IND1-SLB-MIB", "slbProbeExpect"), ("ALCATEL-IND1-SLB-MIB", "slbProbeSend"), ("ALCATEL-IND1-SLB-MIB", "slbProbeSSL"), ("ALCATEL-IND1-SLB-MIB", "slbProbeHttpStatus"), ("ALCATEL-IND1-SLB-MIB", "slbProbeHttpUrl"), ("ALCATEL-IND1-SLB-MIB", "slbProbeHttpUsername"), ("ALCATEL-IND1-SLB-MIB", "slbProbeHttpPassword"), ("ALCATEL-IND1-SLB-MIB", "slbProbeRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
slbProbesGroup = slbProbesGroup.setStatus('current')
slbStatsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 2, 1, 6)).setObjects(("ALCATEL-IND1-SLB-MIB", "slbStatsCounter"), ("ALCATEL-IND1-SLB-MIB", "slbStatsQualDataIp"), ("ALCATEL-IND1-SLB-MIB", "slbStatsQualDataIpMask"), ("ALCATEL-IND1-SLB-MIB", "slbStatsQualDataSlot"), ("ALCATEL-IND1-SLB-MIB", "slbStatsQualDataStartPort"), ("ALCATEL-IND1-SLB-MIB", "slbStatsQualDataEndPort"), ("ALCATEL-IND1-SLB-MIB", "slbStatsQualDataVlan"), ("ALCATEL-IND1-SLB-MIB", "slbStatsQualDataL4Port"), ("ALCATEL-IND1-SLB-MIB", "slbStatsQualDataMac"), ("ALCATEL-IND1-SLB-MIB", "slbStatsQualDataEthertype"), ("ALCATEL-IND1-SLB-MIB", "slbStatsQualDataIcmpData"), ("ALCATEL-IND1-SLB-MIB", "slbStatsQualDataTcpFlags"), ("ALCATEL-IND1-SLB-MIB", "slbStatsQualDataTos"), ("ALCATEL-IND1-SLB-MIB", "slbStatsQualData8021p"), ("ALCATEL-IND1-SLB-MIB", "slbStatsQualDataIpProtocol"), ("ALCATEL-IND1-SLB-MIB", "slbStatsQualDataMacMask"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
slbStatsGroup = slbStatsGroup.setStatus('current')
slbTrapsObjGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6486, 801, 1, 2, 1, 20, 1, 2, 1, 7)).setObjects(("ALCATEL-IND1-SLB-MIB", "slbTrapInfoClusterName"), ("ALCATEL-IND1-SLB-MIB", "slbTrapInfoEntityGroup"), ("ALCATEL-IND1-SLB-MIB", "slbTrapInfoException"), ("ALCATEL-IND1-SLB-MIB", "slbTrapInfoOperStatus"), ("ALCATEL-IND1-SLB-MIB", "slbTrapInfoServerIpAddr"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
slbTrapsObjGroup = slbTrapsObjGroup.setStatus('current')
mibBuilder.exportSymbols("ALCATEL-IND1-SLB-MIB", slbClustersGroup=slbClustersGroup, slbTrapInfoOperStatus=slbTrapInfoOperStatus, slbTrapsGroup=slbTrapsGroup, slbProbes=slbProbes, slbTrapInfoEntityGroup=slbTrapInfoEntityGroup, slbServerUpTime=slbServerUpTime, slbTrapConfigChanged=slbTrapConfigChanged, slbStatsQualDataSlot=slbStatsQualDataSlot, slbClusterPingTimeout=slbClusterPingTimeout, slbProbeHttpPassword=slbProbeHttpPassword, slbStatsQual=slbStatsQual, slbServerOperStatus=slbServerOperStatus, slbAdminStatus=slbAdminStatus, slbProbeRetries=slbProbeRetries, slbProbePeriod=slbProbePeriod, alcatelIND1SLBMIBCompliance=alcatelIND1SLBMIBCompliance, slbClusterVIP=slbClusterVIP, slbTrapInfoServerIpAddr=slbTrapInfoServerIpAddr, slbServersGroup=slbServersGroup, alcatelIND1SLBMIBGroups=alcatelIND1SLBMIBGroups, alcatelIND1SLBMIBConformance=alcatelIND1SLBMIBConformance, slbTrapOperStatus=slbTrapOperStatus, slbStatsClusterName=slbStatsClusterName, slbClusterProbeName=slbClusterProbeName, slbClusterOperStatus=slbClusterOperStatus, slbStatsQualDataIpProtocol=slbStatsQualDataIpProtocol, slbServerIpAddress=slbServerIpAddress, slbClusterNumberOfServers=slbClusterNumberOfServers, alcatelIND1SLBMIBCompliances=alcatelIND1SLBMIBCompliances, alcatelIND1SLBMIB=alcatelIND1SLBMIB, PYSNMP_MODULE_ID=alcatelIND1SLBMIB, slbStatsQualData8021p=slbStatsQualData8021p, slbClusterPingPeriod=slbClusterPingPeriod, slbStats=slbStats, slbProbeName=slbProbeName, slbStatsTable=slbStatsTable, slbStatsTableEntry=slbStatsTableEntry, slbClusterRowStatus=slbClusterRowStatus, slbResetStatistics=slbResetStatistics, slbTrapsObjGroup=slbTrapsObjGroup, slbServerLastRTT=slbServerLastRTT, slbClusterNewFlows=slbClusterNewFlows, slbStatsQualDataTcpFlags=slbStatsQualDataTcpFlags, slbTrapInfoClusterName=slbTrapInfoClusterName, slbTrapInfoException=slbTrapInfoException, slbServers=slbServers, slbClusterName=slbClusterName, slbStatsGroup=slbStatsGroup, slbStatsQualDataMacMask=slbStatsQualDataMacMask, slbClusterPingRetries=slbClusterPingRetries, slbServerTableEntry=slbServerTableEntry, SlbRedirectAlgorithm=SlbRedirectAlgorithm, slbClusterAdminStatus=slbClusterAdminStatus, slbProbeHttpStatus=slbProbeHttpStatus, slbStatsQualDataIcmpData=slbStatsQualDataIcmpData, slbProbeMethod=slbProbeMethod, slbStatsQualDataIpMask=slbStatsQualDataIpMask, slbStatsQualDataL4Port=slbStatsQualDataL4Port, slbStatsQualTableEntry=slbStatsQualTableEntry, slbServerTable=slbServerTable, slbProbePort=slbProbePort, slbStatsQualDataVlan=slbStatsQualDataVlan, SlbAdminState=SlbAdminState, slbServerAdminStatus=slbServerAdminStatus, slbFeatureGroup=slbFeatureGroup, SlbOperState=SlbOperState, slbServerClusterName=slbServerClusterName, slbServerFlows=slbServerFlows, slbStatsQualTable=slbStatsQualTable, slbClusterPackets=slbClusterPackets, slbProbeSend=slbProbeSend, SlbHashSelector=SlbHashSelector, slbClusterTableEntry=slbClusterTableEntry, slbClusterType=slbClusterType, slbServerPingFails=slbServerPingFails, slbServerProbeName=slbServerProbeName, slbProbeExpect=slbProbeExpect, slbStatsQualDataEthertype=slbStatsQualDataEthertype, slbProbeRowStatus=slbProbeRowStatus, slbProbesGroup=slbProbesGroup, slbClusterRedirectAlgorithm=slbClusterRedirectAlgorithm, slbServerAdminWeight=slbServerAdminWeight, slbStatsQualDataEndPort=slbStatsQualDataEndPort, slbClusterIdleTimer=slbClusterIdleTimer, slbProbeTimeout=slbProbeTimeout, slbTrapException=slbTrapException, slbServerPortDown=slbServerPortDown, slbStatsQualType=slbStatsQualType, slbStatsQualDataIp=slbStatsQualDataIp, slbClusterTable=slbClusterTable, slbStatsIndex=slbStatsIndex, SlbServerOperState=SlbServerOperState, slbServerProbeStatus=slbServerProbeStatus, slbOperStatus=slbOperStatus, slbProbeTable=slbProbeTable, slbServerRowStatus=slbServerRowStatus, slbProbeHttpUsername=slbProbeHttpUsername, slbTrapsObj=slbTrapsObj, slbClustersCount=slbClustersCount, slbFeature=slbFeature, slbClusterHashType=slbClusterHashType, alcatelIND1SLBMIBObjects=alcatelIND1SLBMIBObjects, slbProbeSSL=slbProbeSSL, alcatelIND1SLBMIBNotifications=alcatelIND1SLBMIBNotifications, slbStatsCounter=slbStatsCounter, slbStatsQualDataStartPort=slbStatsQualDataStartPort, slbClusterCondition=slbClusterCondition, slbProbeHttpUrl=slbProbeHttpUrl, slbClusters=slbClusters, slbStatsQualDataTos=slbStatsQualDataTos, slbStatsQualDataMac=slbStatsQualDataMac, slbProbeTableEntry=slbProbeTableEntry)
| 137.565891
| 4,377
| 0.737237
|
ac06f44817a622b9d3881436601bd171a9198e6a
| 10,172
|
py
|
Python
|
Test/Python/Extract_Internal_Surfaces_From_Triangle_Geometry.py
|
JDuffeyBQ/DREAM3DReview
|
098ddc60d1c53764e09e21e08d4636233071be31
|
[
"BSD-3-Clause"
] | null | null | null |
Test/Python/Extract_Internal_Surfaces_From_Triangle_Geometry.py
|
JDuffeyBQ/DREAM3DReview
|
098ddc60d1c53764e09e21e08d4636233071be31
|
[
"BSD-3-Clause"
] | 18
|
2017-09-01T23:13:02.000Z
|
2021-09-02T12:58:57.000Z
|
Test/Python/Extract_Internal_Surfaces_From_Triangle_Geometry.py
|
JDuffeyBQ/DREAM3DReview
|
098ddc60d1c53764e09e21e08d4636233071be31
|
[
"BSD-3-Clause"
] | 9
|
2017-09-01T23:15:17.000Z
|
2021-09-21T13:24:19.000Z
|
# Pipeline : (01) SmallIN100 Quick Mesh (from EBSD Surface Meshing)
# Tests the Extract Internal Surfaces From Triangle Geometry filter
#
import os
import simpl
import simplpy
import simpl_helpers as sh
import simpl_test_dirs as sd
import samplingpy as sampling
import surfacemeshingpy
import dream3dreviewpy
def start_test():
# Create Data Container Array
dca = simpl.DataContainerArray()
# Read DREAM3D File
dcap = simpl.DataContainerArrayProxy()
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('EBSD Scan Data').getDataArrayProxy(
'Confidence Index').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('EBSD Scan Data').getDataArrayProxy(
'CriticalField').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('EBSD Scan Data').getDataArrayProxy(
'EulerAngles').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('EBSD Scan Data').getDataArrayProxy(
'FeatureIds').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('EBSD Scan Data').getDataArrayProxy(
'FeatureReferenceMisorientations').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('EBSD Scan Data').getDataArrayProxy(
'Fit').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('EBSD Scan Data').getDataArrayProxy(
'GBManhattanDistances').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('EBSD Scan Data').getDataArrayProxy(
'IPFColor').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('EBSD Scan Data').getDataArrayProxy(
'Image Quality').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('EBSD Scan Data').getDataArrayProxy(
'KernelAverageMisorientations').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('EBSD Scan Data').getDataArrayProxy(
'Mask').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('EBSD Scan Data').getDataArrayProxy(
'ParentIds').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('EBSD Scan Data').getDataArrayProxy(
'Phases').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('EBSD Scan Data').getDataArrayProxy(
'QPManhattanDistances').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('EBSD Scan Data').getDataArrayProxy(
'Quats').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('EBSD Scan Data').getDataArrayProxy(
'SEM Signal').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('EBSD Scan Data').getDataArrayProxy(
'TJManhattanDistances').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('Phase Data').getDataArrayProxy(
'CrystalStructures').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('Phase Data').getDataArrayProxy(
'LatticeConstants').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('Phase Data').getDataArrayProxy(
'MaterialName').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('Grain Data').getDataArrayProxy('Active').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('Grain Data').getDataArrayProxy(
'AspectRatios').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('Grain Data').getDataArrayProxy(
'AvgEuler').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('Grain Data').getDataArrayProxy(
'AvgEulerAngles').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('Grain Data').getDataArrayProxy(
'AvgQuats').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('Grain Data').getDataArrayProxy(
'AxisEulerAngles').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('Grain Data').getDataArrayProxy(
'AxisLengths').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('Grain Data').getDataArrayProxy(
'Centroids').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('Grain Data').getDataArrayProxy(
'CriticalFields').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('Grain Data').getDataArrayProxy(
'EquivalentDiameters').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('Grain Data').getDataArrayProxy(
'FeatureAvgMisorientations').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('Grain Data').getDataArrayProxy(
'MisorientationList').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('Grain Data').getDataArrayProxy(
'NeighborList').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('Grain Data').getDataArrayProxy(
'NeighborhoodList').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('Grain Data').getDataArrayProxy(
'Neighborhoods').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('Grain Data').getDataArrayProxy(
'NumElements').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('Grain Data').getDataArrayProxy(
'NumNeighbors').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('Grain Data').getDataArrayProxy(
'NumNeighbors2').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('Grain Data').getDataArrayProxy(
'Omega3s').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('Grain Data').getDataArrayProxy(
'ParentIds').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('Grain Data').getDataArrayProxy('Poles').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('Grain Data').getDataArrayProxy('Phases').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('Grain Data').getDataArrayProxy(
'Schmids').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('Grain Data').getDataArrayProxy(
'Shape Volumes').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('Grain Data').getDataArrayProxy(
'SharedSurfaceAreaList').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('Grain Data').getDataArrayProxy(
'Size Volumes').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('Grain Data').getDataArrayProxy(
'SlipSystems').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('Grain Data').getDataArrayProxy(
'Sphericity').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('Grain Data').getDataArrayProxy(
'SurfaceAreaVolumeRatio').Flag = 2
dcap.getDataContainerProxy('Small IN100').getAttributeMatrixProxy('NewGrain Data').getDataArrayProxy(
'Active').Flag = 2
err = simplpy.data_container_reader(dca,
sd.GetBuildDirectory() +
'/Data/Output/Statistics/SmallIN100_CrystalStats.dream3d',
False, dcap)
assert err == 0, f'DataContainerReader ErrorCondition {err}'
# Crop Geometry (Image)
err = sampling.crop_image_geometry(dca, '',
simpl.DataArrayPath('Small IN100', 'EBSD Scan Data', ''),
simpl.DataArrayPath('Small IN100', 'Grain Data', ''),
41, 41, 0, 140, 140, 99, True, False, True,
simpl.DataArrayPath('Small IN100', 'EBSD Scan Data', 'FeatureIds'))
assert err == 0, f'CropImageGeometry ErrorCondition {err}'
# Quick Surface Mesh
err = surfacemeshingpy.quick_surface_mesh(dca,
[],
'TriangleDataContainer',
'VertexData',
'FaceData',
simpl.DataArrayPath('Small IN100', 'EBSD Scan Data', 'FeatureIds'),
'FaceLabels',
'NodeType',
'FaceFeatureData')
assert err == 0, f'QuickSurfaceMesh ErrorCondition {err}'
# Extract Internal Surfaces From Triangle Geometry
err = dream3dreviewpy.extract_internal_surfaces_from_triangle_geometry(dca,
'TriangleDataContainer',
simpl.DataArrayPath(
'TriangleDataContainer',
'VertexData',
'NodeType'),
'InternalTrianglesDataContainer')
assert err == 0, f'ExtractInternalSurfacesFromTriangleGeometry ErrorCondition {err}'
# Write to DREAM3D file
err = sh.WriteDREAM3DFile(sd.GetBuildDirectory() +
'/Data/Output/DREAM3DReview/' +
'SmallIN100_WithInternalSurfaces.dream3d',
dca)
assert err == 0, f'WriteDREAM3DFile ErrorCondition: {err}'
if __name__ == '__main__':
print('Starting Test %s ' % os.path.basename(__file__))
start_test()
print('Ending Test %s ' % os.path.basename(__file__))
| 61.277108
| 120
| 0.658179
|
96ff9da123b45ed58c06b9f3b7c809f6ed7e62cc
| 1,098
|
py
|
Python
|
basketball_reference_web_scraper/errors.py
|
sheagk/basketball_reference_web_scraper
|
820ff9760f91bffc3efd770d91547f3213f42ec8
|
[
"MIT"
] | null | null | null |
basketball_reference_web_scraper/errors.py
|
sheagk/basketball_reference_web_scraper
|
820ff9760f91bffc3efd770d91547f3213f42ec8
|
[
"MIT"
] | null | null | null |
basketball_reference_web_scraper/errors.py
|
sheagk/basketball_reference_web_scraper
|
820ff9760f91bffc3efd770d91547f3213f42ec8
|
[
"MIT"
] | null | null | null |
class InvalidDate(Exception):
def __init__(self, day, month, year):
message = "Date with year set to {year}, month set to {month}, and day set to {day} is invalid"\
.format(
year=year,
month=month,
day=day,
)
super().__init__(message)
class InvalidSeason(Exception):
def __init__(self, season_end_year):
message = "Season end year of {season_end_year} is invalid".format(season_end_year=season_end_year)
super().__init__(message)
class InvalidPlayer(Exception):
def __init__(self, player_id):
message = "Player ID of {player_id} is invalid".format(player_id=player_id)
super().__init__(message)
class InvalidSeries(Exception):
def __init__(self, series):
message = "No series between {winning_team} and {losing_team} exists at {series_address}".format(
winning_team=series['winning_team'].value,
losing_team=series['losing_team'].value,
series_address=series['stats_link_ending'])
super().__init__(message)
| 39.214286
| 107
| 0.642987
|
9fb580182433f1fcd40ab26a6971b7399c52a782
| 615
|
py
|
Python
|
Code/generics/turtle_commands.py
|
Den1k22/python-lessons
|
cc898284e4d9b233dc023fbdae6ac41cf184ab02
|
[
"MIT"
] | null | null | null |
Code/generics/turtle_commands.py
|
Den1k22/python-lessons
|
cc898284e4d9b233dc023fbdae6ac41cf184ab02
|
[
"MIT"
] | null | null | null |
Code/generics/turtle_commands.py
|
Den1k22/python-lessons
|
cc898284e4d9b233dc023fbdae6ac41cf184ab02
|
[
"MIT"
] | null | null | null |
import turtle
turtle.showturtle()
turtle.speed(1) # speed can be from 0 - 10
turtle.forward(100)
turtle.dot()
turtle.right(90)
turtle.forward(100)
turtle.dot()
turtle.left(120)
turtle.forward(100)
print(turtle.heading())
turtle.dot()
turtle.setheading(180)
turtle.forward(200)
turtle.dot()
turtle.setheading(0)
turtle.circle(100)
# turtle.pencsize(5)
# turtle.pencolor('red')
# turtle.bgcolor('gray')
# turtle.goto(0, 100)
turtle.goto(0, 0)
turtle.goto(200, 200)
turtle.goto(-200, 200)
turtle.goto(-200, -200)
turtle.goto(200, -200)
turtle.goto(200, 200)
# turtle.pos()
# turtle.xcor()
# turtle.ycor()
| 14.302326
| 43
| 0.708943
|
08062f45f5edc2d952b3fa9aed60e05dedee01b1
| 2,008
|
py
|
Python
|
trainSCI.py
|
Ranjitha-Ramesh/HSLN-Joint-Sentence-Classification
|
1b8549010f1df11350ded130dd96c0f8f1de6c90
|
[
"MIT"
] | null | null | null |
trainSCI.py
|
Ranjitha-Ramesh/HSLN-Joint-Sentence-Classification
|
1b8549010f1df11350ded130dd96c0f8f1de6c90
|
[
"MIT"
] | null | null | null |
trainSCI.py
|
Ranjitha-Ramesh/HSLN-Joint-Sentence-Classification
|
1b8549010f1df11350ded130dd96c0f8f1de6c90
|
[
"MIT"
] | 1
|
2020-03-19T01:59:09.000Z
|
2020-03-19T01:59:09.000Z
|
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0,3"
from model.data_utils import Dataset
from model.models import HANNModel
from model.config import Config
import argparse
#import os
parser = argparse.ArgumentParser()
def main():
# create instance of config
config = Config(parser)
#print("initiating the HANN model")
#print("value of config restore is {}".format(config.restore))
# build model
model = HANNModel(config)
#print("before building the model")
model.build()
#if config.restore:
# model.restore_session("results/test/model.weights/") # optional, restore weights
#print("after building the model")
config.filename_wordvec ='/home/rxr5423/BERT/BERTGit/bert/vocab_words_SciBERT_fine.txt'
print("The word Embeddings used are: {}".format(config.filename_wordvec))
print("overridding the config values: restore {}, accuracy {}".format(config.restore, config.train_accuracy))
config.train_accuracy = True
config.restore = False
print("overriden values: restore {}, accuracy {}".format(config.restore, config.train_accuracy))
# create datasets
dev = Dataset(config.filename_dev, config.processing_word,
config.processing_tag, config.max_iter)
train = Dataset(config.filename_train, config.processing_word,
config.processing_tag, config.max_iter)
test = Dataset(config.filename_test, config.processing_word,
config.processing_tag, config.max_iter)
# train model
model.train(train, dev)
# evaluate model
model.restore_session(config.dir_model)
metrics = model.evaluate(test)
with open(os.path.join(config.dir_output, 'test_results.txt'), 'a') as file:
file.write('{}\n'.format(metrics['classification-report']))
file.write('{}\n'.format(metrics['confusion-matrix']))
file.write('{}\n\n'.format(metrics['weighted-f1']))
if __name__ == "__main__":
main()
| 37.886792
| 113
| 0.698207
|
2b781764747ff996585362f1c6da76b0d7a1cb3e
| 1,173
|
py
|
Python
|
backend/src/urls.py
|
andihaki/welldone
|
938c98c0fb6a5f5593f339c9d9318df2afd2d064
|
[
"BSD-2-Clause"
] | null | null | null |
backend/src/urls.py
|
andihaki/welldone
|
938c98c0fb6a5f5593f339c9d9318df2afd2d064
|
[
"BSD-2-Clause"
] | 12
|
2019-07-03T21:14:58.000Z
|
2022-02-17T20:10:00.000Z
|
backend/src/urls.py
|
andihaki/welldone
|
938c98c0fb6a5f5593f339c9d9318df2afd2d064
|
[
"BSD-2-Clause"
] | null | null | null |
"""backend URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
# for graphql
from django.views.decorators.csrf import csrf_exempt
from graphene_django.views import GraphQLView
urlpatterns = [
path('admin/', admin.site.urls),
path('api-auth/', include('rest_framework.urls')),
path('rest-auth/', include('rest_auth.urls')),
path('rest-auth/registration/', include('rest_auth.registration.urls')),
path('api/', include('articles.api.urls')),
path('graphql/', csrf_exempt(GraphQLView.as_view(graphiql=True))),
]
| 39.1
| 77
| 0.716113
|
0db45318bfaa1e40a85d01a5afe62aa22bbe5beb
| 9,825
|
py
|
Python
|
tests/components/remote/test_device_trigger.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 3
|
2020-11-27T06:26:27.000Z
|
2020-12-09T14:55:16.000Z
|
tests/components/remote/test_device_trigger.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 25
|
2021-11-24T06:24:10.000Z
|
2022-03-31T06:23:06.000Z
|
tests/components/remote/test_device_trigger.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 3
|
2022-01-02T18:49:54.000Z
|
2022-01-25T02:03:54.000Z
|
"""The test for remote device automation."""
from datetime import timedelta
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.device_automation import DeviceAutomationType
from homeassistant.components.remote import DOMAIN
from homeassistant.const import CONF_PLATFORM, STATE_OFF, STATE_ON
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import (
MockConfigEntry,
async_fire_time_changed,
async_get_device_automation_capabilities,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
from tests.components.blueprint.conftest import stub_blueprint_populate # noqa: F401
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_triggers(hass, device_reg, entity_reg):
"""Test we get the expected triggers from a remote."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"type": "changed_states",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"platform": "device",
"domain": DOMAIN,
"type": "turned_off",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"platform": "device",
"domain": DOMAIN,
"type": "turned_on",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
]
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry.id
)
assert triggers == expected_triggers
async def test_get_trigger_capabilities(hass, device_reg, entity_reg):
"""Test we get the expected capabilities from a remote trigger."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_capabilities = {
"extra_fields": [
{"name": "for", "optional": True, "type": "positive_time_period_dict"}
]
}
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry.id
)
for trigger in triggers:
capabilities = await async_get_device_automation_capabilities(
hass, DeviceAutomationType.TRIGGER, trigger
)
assert capabilities == expected_capabilities
async def test_if_fires_on_state_change(hass, calls, enable_custom_integrations):
"""Test for turn_on and turn_off triggers firing."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
ent1, ent2, ent3 = platform.ENTITIES
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "turned_on",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_on {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "turned_off",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "changed_states",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_on_or_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
},
]
},
)
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_ON
assert len(calls) == 0
hass.states.async_set(ent1.entity_id, STATE_OFF)
await hass.async_block_till_done()
assert len(calls) == 2
assert {calls[0].data["some"], calls[1].data["some"]} == {
f"turn_off device - {ent1.entity_id} - on - off - None",
f"turn_on_or_off device - {ent1.entity_id} - on - off - None",
}
hass.states.async_set(ent1.entity_id, STATE_ON)
await hass.async_block_till_done()
assert len(calls) == 4
assert {calls[2].data["some"], calls[3].data["some"]} == {
f"turn_on device - {ent1.entity_id} - off - on - None",
f"turn_on_or_off device - {ent1.entity_id} - off - on - None",
}
async def test_if_fires_on_state_change_with_for(
hass, calls, enable_custom_integrations
):
"""Test for triggers firing with delay."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
ent1, ent2, ent3 = platform.ENTITIES
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "turned_off",
"for": {"seconds": 5},
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
}
]
},
)
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_ON
assert len(calls) == 0
hass.states.async_set(ent1.entity_id, STATE_OFF)
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
await hass.async_block_till_done()
assert calls[0].data["some"] == "turn_off device - {} - on - off - 0:00:05".format(
ent1.entity_id
)
| 35.597826
| 87
| 0.485293
|
52dfa2ca38f3d51ef2738df8af490bb9c7289afb
| 3,000
|
py
|
Python
|
pollme/settings.py
|
farridav/Django-Poll-App
|
c86db98c2ac3a1f18c49ea8dad1911e6134d2b62
|
[
"MIT"
] | null | null | null |
pollme/settings.py
|
farridav/Django-Poll-App
|
c86db98c2ac3a1f18c49ea8dad1911e6134d2b62
|
[
"MIT"
] | null | null | null |
pollme/settings.py
|
farridav/Django-Poll-App
|
c86db98c2ac3a1f18c49ea8dad1911e6134d2b62
|
[
"MIT"
] | null | null | null |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'x*za6xf&_80ofdpae!yzq61g9ffikkx9$*iygbl$j7rr4wlf8t'
DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = [
'jazzmin',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pollme.polls.apps.PollsConfig',
'pollme.accounts.apps.AccountsConfig',
'debug_toolbar',
'django_extensions',
]
MIDDLEWARE = [
"debug_toolbar.middleware.DebugToolbarMiddleware",
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pollme.urls'
JAZZMIN_SETTINGS = {
'skin': 'blue',
'site_title': 'Polls Admin',
'site_header': 'Polls',
'site_logo': 'img/logo.svg',
'welcome_sign': 'Welcome to polls',
'copyright': 'Acme Ltd',
'navigation_expanded': True,
'search_model': 'auth.User',
'user_avatar': None,
'hide_apps': [],
'hide_models': [],
'order_with_respect_to': ['accounts', 'polls'],
'custom_links': {
'polls': [
{'name': 'Custom link', 'url': '/', 'icon': 'fa-user', 'permissions': []}
]
},
'icons': {
'auth': 'fa-people',
'auth.user': 'fa-user',
}
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'pollme/common/templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pollme.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
AUTH_PASSWORD_VALIDATORS = [
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',},
{'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',},
{'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',},
]
LANGUAGE_CODE = 'en-gb'
TIME_ZONE = 'Europe/London'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
DEBUG_TOOLBAR_CONFIG = {"SHOW_TOOLBAR_CALLBACK": lambda _: True}
| 28.301887
| 90
| 0.657333
|
d65905bff06dbcbec0a6cb0491cfc220195406a9
| 2,896
|
py
|
Python
|
scripts/keras_utils.py
|
Mukilan1600/lpr
|
e01503ecf3150e4e3604f968e50348b974eac27d
|
[
"MIT"
] | 2
|
2019-12-21T06:18:44.000Z
|
2020-09-23T18:40:41.000Z
|
scripts/keras_utils.py
|
Mukilan1600/lpr
|
e01503ecf3150e4e3604f968e50348b974eac27d
|
[
"MIT"
] | null | null | null |
scripts/keras_utils.py
|
Mukilan1600/lpr
|
e01503ecf3150e4e3604f968e50348b974eac27d
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
import time
from os.path import splitext
from src.label import Label
from src.utils import getWH, nms
from src.projection_utils import getRectPts, find_T_matrix
class DLabel (Label):
def __init__(self,cl,pts,prob):
self.pts = pts
tl = np.amin(pts,1)
br = np.amax(pts,1)
Label.__init__(self,cl,tl,br,prob)
def save_model(model,path,verbose=0):
path = splitext(path)[0]
model_json = model.to_json()
with open('%s.json' % path,'w') as json_file:
json_file.write(model_json)
model.save_weights('%s.h5' % path)
if verbose: print('Saved to %s' % path)
def load_model(path,custom_objects={},verbose=0):
from keras.models import model_from_json
path = splitext(path)[0]
with open('%s.json' % path,'r') as json_file:
model_json = json_file.read()
model = model_from_json(model_json, custom_objects=custom_objects)
model.load_weights('%s.h5' % path)
if verbose: print('Loaded from %s' % path)
return model
def reconstruct(Iorig,I,Y,out_size,threshold=.9):
net_stride = 2**4
side = ((208. + 40.)/2.)/net_stride # 7.75
Probs = Y[...,0]
Affines = Y[...,2:]
rx,ry = Y.shape[:2]
ywh = Y.shape[1::-1]
iwh = np.array(I.shape[1::-1],dtype=float).reshape((2,1))
xx,yy = np.where(Probs>threshold)
WH = getWH(I.shape)
MN = WH/net_stride
vxx = vyy = 0.5 #alpha
base = lambda vx,vy: np.matrix([[-vx,-vy,1.],[vx,-vy,1.],[vx,vy,1.],[-vx,vy,1.]]).T
labels = []
for i in range(len(xx)):
y,x = xx[i],yy[i]
affine = Affines[y,x]
prob = Probs[y,x]
mn = np.array([float(x) + .5,float(y) + .5])
A = np.reshape(affine,(2,3))
A[0,0] = max(A[0,0],0.)
A[1,1] = max(A[1,1],0.)
pts = np.array(A*base(vxx,vyy)) #*alpha
pts_MN_center_mn = pts*side
pts_MN = pts_MN_center_mn + mn.reshape((2,1))
pts_prop = pts_MN/MN.reshape((2,1))
labels.append(DLabel(0,pts_prop,prob))
final_labels = nms(labels,.1)
TLps = []
if len(final_labels):
final_labels.sort(key=lambda x: x.prob(), reverse=True)
for i,label in enumerate(final_labels):
t_ptsh = getRectPts(0,0,out_size[0],out_size[1])
ptsh = np.concatenate((label.pts*getWH(Iorig.shape).reshape((2,1)),np.ones((1,4))))
H = find_T_matrix(ptsh,t_ptsh)
Ilp = cv2.warpPerspective(Iorig,H,out_size,borderValue=.0)
TLps.append(Ilp)
return final_labels,TLps
def detect_lp(model,I,max_dim,net_step,out_size,threshold):
min_dim_img = min(I.shape[:2])
factor = float(max_dim)/min_dim_img
w,h = (np.array(I.shape[1::-1],dtype=float)*factor).astype(int).tolist()
w += (w%net_step!=0)*(net_step - w%net_step)
h += (h%net_step!=0)*(net_step - h%net_step)
Iresized = cv2.resize(I,(w,h))
T = Iresized.copy()
T = T.reshape((1,T.shape[0],T.shape[1],T.shape[2]))
start = time.time()
Yr = model.predict(T)
Yr = np.squeeze(Yr)
elapsed = time.time() - start
L,TLps = reconstruct(I,Iresized,Yr,out_size,threshold)
return L,TLps,elapsed
| 24.336134
| 87
| 0.661948
|
3b0289ff4ac7726831da195cfd05a803c3fca95d
| 5,139
|
py
|
Python
|
raspberry/Car-obd/fuction tests/test_protocol_can.py
|
dsd-m2m/vehicle-tracking
|
7542c5d13bf3e5a3a2fdbf9de8e7dd80a4820a51
|
[
"MIT"
] | 2
|
2018-10-10T12:11:00.000Z
|
2018-11-18T12:14:36.000Z
|
raspberry/Car-obd/fuction tests/test_protocol_can.py
|
dsd-m2m/vehicle-tracking
|
7542c5d13bf3e5a3a2fdbf9de8e7dd80a4820a51
|
[
"MIT"
] | 6
|
2018-10-16T21:19:30.000Z
|
2018-12-10T15:39:49.000Z
|
raspberry/Car-obd/fuction tests/test_protocol_can.py
|
dsd-m2m/vehicle-tracking
|
7542c5d13bf3e5a3a2fdbf9de8e7dd80a4820a51
|
[
"MIT"
] | 4
|
2018-10-28T18:43:32.000Z
|
2018-12-09T18:41:03.000Z
|
import random
from obd.protocols import *
from obd.protocols.protocol import Message
CAN_11_PROTOCOLS = [
ISO_15765_4_11bit_500k,
ISO_15765_4_11bit_250k,
]
CAN_29_PROTOCOLS = [
ISO_15765_4_29bit_500k,
ISO_15765_4_29bit_250k,
SAE_J1939
]
def check_message(m, num_frames, tx_id, data):
""" generic test for correct message values """
assert len(m.frames) == num_frames
assert m.tx_id == tx_id
assert m.data == bytearray(data)
def test_single_frame():
for protocol in CAN_11_PROTOCOLS:
p = protocol([])
r = p(["7E8 06 41 00 00 01 02 03"])
assert len(r) == 1
check_message(r[0], 1, 0x0, [0x41, 0x00, 0x00, 0x01, 0x02, 0x03])
# minimum valid length
r = p(["7E8 01 41"])
assert len(r) == 1
check_message(r[0], 1, 0x0, [0x41])
# maximum valid length
r = p(["7E8 07 41 00 00 01 02 03 04"])
assert len(r) == 1
check_message(r[0], 1, 0x0, [0x41, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04])
# to short
r = p(["7E8 01"])
assert len(r) == 0
# to long
r = p(["7E8 08 41 00 00 01 02 03 04 05"])
assert len(r) == 0
# drop frames with zero data
r = p(["7E8 00"])
assert len(r) == 0
# drop odd-sized frames (post padding)
r = p(["7E8 08 41 00 00 01 02 03 04 0"])
assert len(r) == 0
def test_hex_straining():
"""
If non-hex values are sent, they should be marked as ECU.UNKNOWN
"""
for protocol in CAN_11_PROTOCOLS:
p = protocol([])
# single non-hex message
r = p(["12.8 Volts"])
assert len(r) == 1
assert r[0].ecu == ECU.UNKNOWN
assert len(r[0].frames) == 1
# multiple non-hex message
r = p(["12.8 Volts", "NO DATA"])
assert len(r) == 2
for m in r:
assert m.ecu == ECU.UNKNOWN
assert len(m.frames) == 1
# mixed hex and non-hex
r = p(["NO DATA", "7E8 06 41 00 00 01 02 03"])
assert len(r) == 2
# first message should be the valid, parsable hex message
# NOTE: the parser happens to process the valid one's first
check_message(r[0], 1, 0x0, [0x41, 0x00, 0x00, 0x01, 0x02, 0x03])
# second message: invalid, non-parsable non-hex
assert r[1].ecu == ECU.UNKNOWN
assert len(r[1].frames) == 1
assert len(r[1].data) == 0 # no data
def test_multi_ecu():
for protocol in CAN_11_PROTOCOLS:
p = protocol([])
test_case = [
"7E8 06 41 00 00 01 02 03",
"7EB 06 41 00 00 01 02 03",
"7EA 06 41 00 00 01 02 03",
]
correct_data = [0x41, 0x00, 0x00, 0x01, 0x02, 0x03]
# seperate ECUs, single frames each
r = p(test_case)
assert len(r) == 3
# messages are returned in ECU order
check_message(r[0], 1, 0x0, correct_data)
check_message(r[1], 1, 0x2, correct_data)
check_message(r[2], 1, 0x3, correct_data)
def test_multi_line():
"""
Tests that valid multiline messages are recombined into single
messages.
"""
for protocol in CAN_11_PROTOCOLS:
p = protocol([])
test_case = [
"7E8 10 20 49 04 00 01 02 03",
"7E8 21 04 05 06 07 08 09 0A",
"7E8 22 0B 0C 0D 0E 0F 10 11",
"7E8 23 12 13 14 15 16 17 18"
]
correct_data = [0x49, 0x04] + list(range(25))
# in-order
r = p(test_case)
assert len(r) == 1
check_message(r[0], len(test_case), 0x0, correct_data)
# test a few out-of-order cases
for n in range(4):
random.shuffle(test_case) # mix up the frame strings
r = p(test_case)
assert len(r) == 1
check_message(r[0], len(test_case), 0x0, correct_data)
def test_multi_line_missing_frames():
"""
Missing frames in a multi-frame message should drop the message.
Tests the contiguity check, and data length byte
"""
for protocol in CAN_11_PROTOCOLS:
p = protocol([])
test_case = [
"7E8 10 20 49 04 00 01 02 03",
"7E8 21 04 05 06 07 08 09 0A",
"7E8 22 0B 0C 0D 0E 0F 10 11",
"7E8 23 12 13 14 15 16 17 18"
]
for n in range(len(test_case) - 1):
sub_test = list(test_case)
del sub_test[n]
r = p(sub_test)
assert len(r) == 0
def test_multi_line_mode_03():
"""
Tests the special handling of mode 3 commands.
Namely, Mode 03 commands have a DTC count byte that is accounted for
in the protocol layer.
"""
for protocol in CAN_11_PROTOCOLS:
p = protocol([])
test_case = [
"7E8 10 20 43 04 00 01 02 03",
"7E8 21 04 05 06 07 08 09 0A",
]
correct_data = [0x43, 0x04] + list(range(8))
r = p(test_case)
assert len(r) == 1
check_message(r[0], len(test_case), 0, correct_data)
def test_can_29():
pass
| 24.946602
| 79
| 0.542518
|
d7c41708f168d585e9dc5663e20b93a50bda4176
| 8,806
|
py
|
Python
|
python/lib/python2.7/site-packages/hgext/largefiles/uisetup.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | 1
|
2019-12-19T01:53:13.000Z
|
2019-12-19T01:53:13.000Z
|
python/lib/python2.7/site-packages/hgext/largefiles/uisetup.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | null | null | null |
python/lib/python2.7/site-packages/hgext/largefiles/uisetup.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2009-2010 Gregory P. Ward
# Copyright 2009-2010 Intelerad Medical Systems Incorporated
# Copyright 2010-2011 Fog Creek Software
# Copyright 2010-2011 Unity Technologies
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''setup for largefiles extension: uisetup'''
from __future__ import absolute_import
from mercurial.i18n import _
from mercurial.hgweb import (
hgweb_mod,
webcommands,
)
from mercurial import (
archival,
cmdutil,
commands,
copies,
exchange,
extensions,
filemerge,
hg,
httppeer,
merge,
scmutil,
sshpeer,
subrepo,
wireproto,
)
from . import (
overrides,
proto,
)
def uisetup(ui):
# Disable auto-status for some commands which assume that all
# files in the result are under Mercurial's control
entry = extensions.wrapcommand(commands.table, 'add',
overrides.overrideadd)
addopt = [('', 'large', None, _('add as largefile')),
('', 'normal', None, _('add as normal file')),
('', 'lfsize', '', _('add all files above this size '
'(in megabytes) as largefiles '
'(default: 10)'))]
entry[1].extend(addopt)
# The scmutil function is called both by the (trivial) addremove command,
# and in the process of handling commit -A (issue3542)
entry = extensions.wrapfunction(scmutil, 'addremove',
overrides.scmutiladdremove)
extensions.wrapfunction(cmdutil, 'add', overrides.cmdutiladd)
extensions.wrapfunction(cmdutil, 'remove', overrides.cmdutilremove)
extensions.wrapfunction(cmdutil, 'forget', overrides.cmdutilforget)
extensions.wrapfunction(copies, 'pathcopies', overrides.copiespathcopies)
# Subrepos call status function
entry = extensions.wrapcommand(commands.table, 'status',
overrides.overridestatus)
entry = extensions.wrapfunction(subrepo.hgsubrepo, 'status',
overrides.overridestatusfn)
entry = extensions.wrapcommand(commands.table, 'log',
overrides.overridelog)
entry = extensions.wrapcommand(commands.table, 'rollback',
overrides.overriderollback)
entry = extensions.wrapcommand(commands.table, 'verify',
overrides.overrideverify)
verifyopt = [('', 'large', None,
_('verify that all largefiles in current revision exists')),
('', 'lfa', None,
_('verify largefiles in all revisions, not just current')),
('', 'lfc', None,
_('verify local largefile contents, not just existence'))]
entry[1].extend(verifyopt)
entry = extensions.wrapcommand(commands.table, 'debugstate',
overrides.overridedebugstate)
debugstateopt = [('', 'large', None, _('display largefiles dirstate'))]
entry[1].extend(debugstateopt)
outgoing = lambda orgfunc, *arg, **kwargs: orgfunc(*arg, **kwargs)
entry = extensions.wrapcommand(commands.table, 'outgoing', outgoing)
outgoingopt = [('', 'large', None, _('display outgoing largefiles'))]
entry[1].extend(outgoingopt)
cmdutil.outgoinghooks.add('largefiles', overrides.outgoinghook)
entry = extensions.wrapcommand(commands.table, 'summary',
overrides.overridesummary)
summaryopt = [('', 'large', None, _('display outgoing largefiles'))]
entry[1].extend(summaryopt)
cmdutil.summaryremotehooks.add('largefiles', overrides.summaryremotehook)
entry = extensions.wrapcommand(commands.table, 'pull',
overrides.overridepull)
pullopt = [('', 'all-largefiles', None,
_('download all pulled versions of largefiles (DEPRECATED)')),
('', 'lfrev', [],
_('download largefiles for these revisions'), _('REV'))]
entry[1].extend(pullopt)
entry = extensions.wrapcommand(commands.table, 'push',
overrides.overridepush)
pushopt = [('', 'lfrev', [],
_('upload largefiles for these revisions'), _('REV'))]
entry[1].extend(pushopt)
entry = extensions.wrapfunction(exchange, 'pushoperation',
overrides.exchangepushoperation)
entry = extensions.wrapcommand(commands.table, 'clone',
overrides.overrideclone)
cloneopt = [('', 'all-largefiles', None,
_('download all versions of all largefiles'))]
entry[1].extend(cloneopt)
entry = extensions.wrapfunction(hg, 'clone', overrides.hgclone)
entry = extensions.wrapcommand(commands.table, 'cat',
overrides.overridecat)
entry = extensions.wrapfunction(merge, '_checkunknownfile',
overrides.overridecheckunknownfile)
entry = extensions.wrapfunction(merge, 'calculateupdates',
overrides.overridecalculateupdates)
entry = extensions.wrapfunction(merge, 'recordupdates',
overrides.mergerecordupdates)
entry = extensions.wrapfunction(merge, 'update',
overrides.mergeupdate)
entry = extensions.wrapfunction(filemerge, '_filemerge',
overrides.overridefilemerge)
entry = extensions.wrapfunction(cmdutil, 'copy',
overrides.overridecopy)
# Summary calls dirty on the subrepos
entry = extensions.wrapfunction(subrepo.hgsubrepo, 'dirty',
overrides.overridedirty)
entry = extensions.wrapfunction(cmdutil, 'revert',
overrides.overriderevert)
extensions.wrapcommand(commands.table, 'archive',
overrides.overridearchivecmd)
extensions.wrapfunction(archival, 'archive', overrides.overridearchive)
extensions.wrapfunction(subrepo.hgsubrepo, 'archive',
overrides.hgsubrepoarchive)
extensions.wrapfunction(webcommands, 'archive',
overrides.hgwebarchive)
extensions.wrapfunction(cmdutil, 'bailifchanged',
overrides.overridebailifchanged)
extensions.wrapfunction(cmdutil, 'postcommitstatus',
overrides.postcommitstatus)
extensions.wrapfunction(scmutil, 'marktouched',
overrides.scmutilmarktouched)
# create the new wireproto commands ...
wireproto.commands['putlfile'] = (proto.putlfile, 'sha')
wireproto.commands['getlfile'] = (proto.getlfile, 'sha')
wireproto.commands['statlfile'] = (proto.statlfile, 'sha')
# ... and wrap some existing ones
wireproto.commands['capabilities'] = (proto.capabilities, '')
wireproto.commands['heads'] = (proto.heads, '')
wireproto.commands['lheads'] = (wireproto.heads, '')
# make putlfile behave the same as push and {get,stat}lfile behave
# the same as pull w.r.t. permissions checks
hgweb_mod.perms['putlfile'] = 'push'
hgweb_mod.perms['getlfile'] = 'pull'
hgweb_mod.perms['statlfile'] = 'pull'
extensions.wrapfunction(webcommands, 'decodepath', overrides.decodepath)
# the hello wireproto command uses wireproto.capabilities, so it won't see
# our largefiles capability unless we replace the actual function as well.
proto.capabilitiesorig = wireproto.capabilities
wireproto.capabilities = proto.capabilities
# can't do this in reposetup because it needs to have happened before
# wirerepo.__init__ is called
proto.ssholdcallstream = sshpeer.sshpeer._callstream
proto.httpoldcallstream = httppeer.httppeer._callstream
sshpeer.sshpeer._callstream = proto.sshrepocallstream
httppeer.httppeer._callstream = proto.httprepocallstream
# override some extensions' stuff as well
for name, module in extensions.extensions():
if name == 'purge':
extensions.wrapcommand(getattr(module, 'cmdtable'), 'purge',
overrides.overridepurge)
if name == 'rebase':
extensions.wrapcommand(getattr(module, 'cmdtable'), 'rebase',
overrides.overriderebase)
extensions.wrapfunction(module, 'rebase',
overrides.overriderebase)
if name == 'transplant':
extensions.wrapcommand(getattr(module, 'cmdtable'), 'transplant',
overrides.overridetransplant)
| 43.166667
| 79
| 0.6206
|
392a5c9b285f3bb2dd59c4c76f5b8808338682c7
| 6,436
|
py
|
Python
|
catUploadScripts/makeCSVfromSNANA.py
|
chrisfrohmaier/tides_cadence_note
|
b7681a9035d2fca56084f4596a161605150343c7
|
[
"MIT"
] | null | null | null |
catUploadScripts/makeCSVfromSNANA.py
|
chrisfrohmaier/tides_cadence_note
|
b7681a9035d2fca56084f4596a161605150343c7
|
[
"MIT"
] | null | null | null |
catUploadScripts/makeCSVfromSNANA.py
|
chrisfrohmaier/tides_cadence_note
|
b7681a9035d2fca56084f4596a161605150343c7
|
[
"MIT"
] | null | null | null |
import argparse
import pandas as pd
from astropy.table import Table
import os
import numpy as np
import sys
cols = ['SUBSURVEY','SNID','IAUC','FAKE','RA','DEC','PIXSIZE','NXPIX','NYPIX','CCDNUM',\
'SNTYPE','NOBS','PTROBS_MIN','PTROBS_MAX','MWEBV','MWEBV_ERR','REDSHIFT_HELIO',\
'REDSHIFT_HELIO_ERR','REDSHIFT_FINAL','REDSHIFT_FINAL_ERR','VPEC','VPEC_ERR',\
'HOSTGAL_NMATCH','HOSTGAL_NMATCH2','HOSTGAL_OBJID','HOSTGAL_PHOTOZ',\
'HOSTGAL_PHOTOZ_ERR','HOSTGAL_SPECZ','HOSTGAL_SPECZ_ERR','HOSTGAL_RA','HOSTGAL_DEC',\
'HOSTGAL_SNSEP','HOSTGAL_DDLR','HOSTGAL_CONFUSION','HOSTGAL_LOGMASS',\
'HOSTGAL_LOGMASS_ERR','HOSTGAL_sSFR','HOSTGAL_sSFR_ERR','HOSTGAL_MAG_u',\
'HOSTGAL_MAG_g','HOSTGAL_MAG_r','HOSTGAL_MAG_i','HOSTGAL_MAG_z','HOSTGAL_MAG_Y',\
'HOSTGAL_MAGERR_u','HOSTGAL_MAGERR_g','HOSTGAL_MAGERR_r','HOSTGAL_MAGERR_i',\
'HOSTGAL_MAGERR_z','HOSTGAL_MAGERR_Y','HOSTGAL_SB_FLUXCAL_u','HOSTGAL_SB_FLUXCAL_g',\
'HOSTGAL_SB_FLUXCAL_r','HOSTGAL_SB_FLUXCAL_i','HOSTGAL_SB_FLUXCAL_z',\
'HOSTGAL_SB_FLUXCAL_Y','PEAKMJD','SEARCH_TYPE','SIM_MODEL_NAME','SIM_MODEL_INDEX',\
'SIM_TYPE_INDEX','SIM_TYPE_NAME','SIM_TEMPLATE_INDEX','SIM_LIBID','SIM_NGEN_LIBID',\
'SIM_NOBS_UNDEFINED','SIM_SEARCHEFF_MASK','SIM_REDSHIFT_HELIO','SIM_REDSHIFT_CMB',\
'SIM_REDSHIFT_HOST','SIM_REDSHIFT_FLAG','SIM_VPEC','SIM_DLMU','SIM_LENSDMU','SIM_RA',\
'SIM_DEC','SIM_MWEBV','SIM_PEAKMJD','SIM_MAGSMEAR_COH','SIM_AV','SIM_RV','SIM_PEAKMAG_u',\
'SIM_PEAKMAG_g','SIM_PEAKMAG_r','SIM_PEAKMAG_i','SIM_PEAKMAG_z','SIM_PEAKMAG_Y',\
'SIM_EXPOSURE_u','SIM_EXPOSURE_g','SIM_EXPOSURE_r','SIM_EXPOSURE_i','SIM_EXPOSURE_z',\
'SIM_EXPOSURE_Y','SIM_GALFRAC_u','SIM_GALFRAC_g','SIM_GALFRAC_r','SIM_GALFRAC_i',\
'SIM_GALFRAC_z','SIM_GALFRAC_Y','SIM_SUBSAMPLE_INDEX']
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--path', help="Full path to the HEADfile")
# Execute the parse_args() method
args = parser.parse_args()
input_head = args.path
#input_head = '/Users/cfrohmaier/Documents/TiDES/lsstFellow/Jan2021/TiDES_LSST/MV_LSSTDDF_CART/LSSTDDF_NONIaMODEL0-0012_HEAD.FITS.gz'
base, headin = os.path.split(input_head)
input_phot = base+'/'+headin.split('HEAD')[0]+'PHOT'+headin.split('HEAD')[1]
#print(input_phot)
try:
fin1 = Table.read(input_head, format='fits')
except:
print('Failed to load file: ', input_head)
sys.exit()
fin1.convert_bytestring_to_unicode()
#phots = fits.open(phot_in[k])
try:
phot1 = Table.read(input_phot, format='fits')
except:
print('Failed to load file: ', input_phot)
sys.exit()
phot1.convert_bytestring_to_unicode()
df_head = fin1.to_pandas() #pd.DataFrame(fin[1].data)
df_phots = phot1.to_pandas()#pd.DataFrame(phots[1].data)
### Update 1st April because Rick changed SNANA
if 'CCDNUM' not in df_head.columns:
df_head['CCDNUM'] = 0
#!!! Ra, DEC scatter put that in now
def radecScatter():
'''We need this function because SNANA only creates 50,000
unique LIBIDs. This means that a modest number of our transients will
have repeated Ra, DEC values. We only need to scatter the RA, DECs by
0.338 degrees to combat this. It is not as simple as a circular scatter,
but we can approximate it as one.
'''
a = np.random.random() * 2 *np.pi #Random angle to scatter in circle
r = np.random.random() * 0.338 # Random radius to scatter coordinates
##Convert back to cartesian
xRa = r * np.cos(a) #Check angle is in radians (0->2pi) yes
yDec = r * np.sin(a)
## The returned coordinates will be the amount I shift the SN and Galaxy Ra, DEC
return [xRa, yDec]
##Firstly, we dump the fin files
df_head['SUBSURVEY'] = df_head['SUBSURVEY'].str.strip()
df_head['SNID']=df_head['SNID'].astype(int)
df_head.replace('NULL ', "",inplace=True)
df_head.replace(' ', "",inplace=True)
if headin.split('_')[0] == 'LSSTWFD':
#print('WFD Field')
scatt = np.array([radecScatter() for x in range(len(df_head))])
df_head['RA'] = df_head['RA'] + scatt[:,0]
df_head['DEC'] = df_head['DEC'] + scatt[:,1]
df_head['HOSTGAL_RA'] = df_head['HOSTGAL_RA'] + scatt[:,0]
df_head['HOSTGAL_DEC'] = df_head['HOSTGAL_DEC'] + scatt[:,1]
df_head[cols].to_csv(base+'/'+headin.split('HEAD')[0]+'HEAD.csv', sep=',', header=True, index=False)
df_phots['SNID'] = 0
df_phots.replace(' ', "",inplace=True)
df_phots['FIELD'] = df_phots['FIELD'].str.strip()
df_phots.replace('NULL', "",inplace=True)
# print(df_phots)
photSNIDs = np.zeros(len(df_phots))
# for i in range(0,len(df_head)):
# idx = np.zeros(len(df_phots))
# print(df_head['PTROBS_MIN'].iloc[i])
# idx[int(df_head['PTROBS_MIN'].iloc[i])-1:int(df_head['PTROBS_MAX'].iloc[1])-1] = 1
# df_phots['SNID'][idx] = int(df_head['SNID'].iloc[i])
for index, lc in df_head.iterrows():
#print([lc['PTROBS_MIN'],lc['PTROBS_MAX']])
photSNIDs[int(lc['PTROBS_MIN'])-1:int(lc['PTROBS_MAX'])] = int(lc['SNID'])
# df_phots['SNID'][idx] = int(lc['SNID'])
# # lc_slice=df_phots[lc['PTROBS_MIN']:lc['PTROBS_MAX']].copy()
# # #!!! Filter strip! Do that like df_head['SUBSURVEY'] = df_head['SUBSURVEY'].str.strip()
# # #!!! Ra, DEC scatter put that in now
# # snid_col = pd.DataFrame([snid for x in range(len(lc_slice))], columns=['SNID'])
# # phot_dump = pd.DataFrame(np.column_stack((snid_col,lc_slice)), columns=list(snid_col.columns)+list(lc_slice.columns))
# # #print(phot_dump)
# # output_phot = io.StringIO()
# # phot_dump.to_csv(output_phot, sep='\t', header=False, index=False)
# # output_phot.seek(0)
# # contents = output_phot.getvalue()
# # cur.copy_from(output_phot, 's10_4most_ddf_sn_phot', null="") # null values become ''
# # conn.commit()
# df_phots['FLT'] = df_phots['FLT'].str.strip()
df_phots['SNID'] = photSNIDs.astype(int)
df_phots = df_phots[df_phots['SNID']>0]
df_phots = df_phots[df_phots['FLUXCAL']>0]
#Update 1st April because Rick renames columns without telling anyone. Fucks sake.
if 'FLT' not in df_phots.columns:
df_phots.rename(columns={'BAND':'FLT'}, inplace=True)
photCols = ['SNID', 'MJD', 'FLT', 'FIELD', 'PHOTFLAG', 'PHOTPROB', 'FLUXCAL', 'FLUXCALERR', 'PSF_SIG1', 'SKY_SIG', 'ZEROPT', 'SIM_MAGOBS']
df_phots['FLT'] = df_phots['FLT'].str.strip()
df_phots[photCols].to_csv(base+'/'+headin.split('HEAD')[0]+'PHOT.csv', sep=',', header=True, index=False)
| 46.302158
| 138
| 0.686451
|
8eae08cb41bf5f4e64e8d68458e7d8ec07005451
| 937
|
py
|
Python
|
emoticons.py
|
nik-panekin/freelance_assistant_bot
|
6d157ef8a706ce92742dce721ea40eb507121cdb
|
[
"MIT"
] | null | null | null |
emoticons.py
|
nik-panekin/freelance_assistant_bot
|
6d157ef8a706ce92742dce721ea40eb507121cdb
|
[
"MIT"
] | null | null | null |
emoticons.py
|
nik-panekin/freelance_assistant_bot
|
6d157ef8a706ce92742dce721ea40eb507121cdb
|
[
"MIT"
] | null | null | null |
"""Здесь хранятся заранее подготовленные эмодзи.
"""
from emoji import emojize
EMO_POINT_RIGHT = emojize(':point_right:', use_aliases=True)
EMO_INFORMATION = emojize(':information:', use_aliases=True)
EMO_WARNING = emojize(':warning:', use_aliases=True)
EMO_CROSS_MARK = emojize(':cross_mark:', use_aliases=True)
EMO_CHECK_MARK = emojize(':white_check_mark:', use_aliases=True)
EMO_BELL = emojize(':bell:', use_aliases=True)
EMO_NO_BELL = emojize(':no_bell:', use_aliases=True)
EMO_EMAIL = emojize(':email:', use_aliases=True)
EMO_NO_ENTRY = emojize(':no_entry:', use_aliases=True)
EMO_REWIND = emojize(':rewind:', use_aliases=True)
EMO_KEY = emojize(':key:', use_aliases=True)
EMO_CLIPBOARD = emojize(':clipboard:', use_aliases=True)
EMO_UKRAINE = emojize(':Ukraine:', use_aliases=True)
EMO_RUSSIA = emojize(':Russia:', use_aliases=True)
EMO_MEMO= emojize(':memo:', use_aliases=True)
EMO_MONEY= emojize(':moneybag:', use_aliases=True)
| 44.619048
| 64
| 0.760939
|
b897cb5212199cd615a5b912c81436a7b2cd0664
| 915
|
py
|
Python
|
Auto-Staker/staking.py
|
LeeChunHao2000/ftx-srm-auto-staker
|
51ffd1e16ea50dee30a0eae1eb12d749af791eab
|
[
"MIT"
] | 3
|
2020-09-29T01:27:41.000Z
|
2021-01-15T11:02:40.000Z
|
Auto-Staker/staking.py
|
a0981456759/ftx-srm-auto-staker
|
51ffd1e16ea50dee30a0eae1eb12d749af791eab
|
[
"MIT"
] | null | null | null |
Auto-Staker/staking.py
|
a0981456759/ftx-srm-auto-staker
|
51ffd1e16ea50dee30a0eae1eb12d749af791eab
|
[
"MIT"
] | 3
|
2020-09-29T04:19:33.000Z
|
2021-07-31T20:41:17.000Z
|
import sys, logging
from FTX.client import Client
logging.basicConfig(
handlers = [logging.StreamHandler(sys.stdout), logging.FileHandler('log.txt')],
level = logging.INFO,
format = '[%(asctime)s %(levelname)-8s] %(message)s',
datefmt = '%Y%m%d %H:%M:%S',
)
# SRM Coins
SRMS = ['SRM', 'SRM_LOCKED', 'MSRM', 'MSRM_LOCKED']
def stake(client, coin):
"""
:param client: the FTX client class
:param coin: the staking coin of srm
:return: a list contains result
"""
balance = client.get_private_wallet_single_balance(coin)
if balance is None:
return
free = balance['free']
if free > 0:
client.set_private_srm_stake(coin, free)
logging.info(f"Coin: {coin} Stake: {free}")
def check(client):
"""
:param client: the FTX client class
:return: a list contains result
"""
for coin in SRMS:
stake(client, coin)
| 22.875
| 83
| 0.62623
|
e9d7c022e1278366f3ca5a8b6dd191614ed9e77e
| 39,589
|
py
|
Python
|
vispy/geometry/triangulation.py
|
chongxi/vispy
|
3683ea1f58e43b4aa1b32a3e69656bead8a31e99
|
[
"BSD-3-Clause"
] | null | null | null |
vispy/geometry/triangulation.py
|
chongxi/vispy
|
3683ea1f58e43b4aa1b32a3e69656bead8a31e99
|
[
"BSD-3-Clause"
] | null | null | null |
vispy/geometry/triangulation.py
|
chongxi/vispy
|
3683ea1f58e43b4aa1b32a3e69656bead8a31e99
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf8 -*-
from __future__ import division, print_function
import sys
from itertools import permutations
import numpy as np
from ..ext.ordereddict import OrderedDict
try:
# Try to use the C++ triangle library, faster than the
# pure Python version.
# The latest stable release only works with Python 2. The GitHub version
# works on Python 3 though, but the release has yet to be done.
import triangle
assert sys.version_info.major == 2
_TRIANGLE_AVAILABLE = True
except (ImportError, AssertionError):
_TRIANGLE_AVAILABLE = False
class Triangulation(object):
"""Constrained delaunay triangulation
Implementation based on [1]_.
Parameters
----------
pts : array
Nx2 array of points.
edges : array
Nx2 array of edges (dtype=int).
Notes
-----
* Delaunay legalization is not yet implemented. This produces a proper
triangulation, but adding legalisation would produce fewer thin
triangles.
* The pts and edges arrays may be modified.
References
----------
.. [1] Domiter, V. and Žalik, B. Sweep‐line algorithm for constrained
Delaunay triangulation
"""
def __init__(self, pts, edges):
self.pts = pts[:, :2].astype(np.float32)
self.edges = edges
if self.pts.ndim != 2 or self.pts.shape[1] != 2:
raise TypeError('pts argument must be ndarray of shape (N, 2).')
if self.edges.ndim != 2 or self.edges.shape[1] != 2:
raise TypeError('edges argument must be ndarray of shape (N, 2).')
# described in initialize()
self._front = None
self.tris = OrderedDict()
self._edges_lookup = {}
def _normalize(self):
# Clean up data (not discussed in original publication)
# (i) Split intersecting edges. Every edge that intersects another
# edge or point is split. This extends self.pts and self.edges.
self._split_intersecting_edges()
# (ii) Merge identical points. If any two points are found to be equal,
# the second is removed and the edge table is updated accordingly.
self._merge_duplicate_points()
# (iii) Remove duplicate edges
# TODO
def _initialize(self):
self._normalize()
## Initialization (sec. 3.3)
# sort points by y, then x
flat_shape = self.pts.shape[0] * self.pts.shape[1]
pts = self.pts.reshape(flat_shape).view([('x', np.float32),
('y', np.float32)])
order = np.argsort(pts, order=('y', 'x'))
pts = pts[order]
# update edges to match new point order
invorder = np.argsort(order)
self.edges = invorder[self.edges]
self.pts = pts.view(np.float32).reshape(len(pts), 2)
# make artificial points P-1 and P-2
xmax = self.pts[:, 0].max()
xmin = self.pts[:, 0].min()
ymax = self.pts[:, 1].max()
ymin = self.pts[:, 1].min()
xa = (xmax-xmin) * 0.3
ya = (ymax-ymin) * 0.3
p1 = (xmin - xa, ymin - ya)
p2 = (xmax + xa, ymin - ya)
# prepend artificial points to point list
newpts = np.empty((self.pts.shape[0]+2, 2), dtype=float)
newpts[0] = p1
newpts[1] = p2
newpts[2:] = self.pts
self.pts = newpts
self.edges += 2
# find topmost point in each edge
self._tops = self.edges.max(axis=1)
self._bottoms = self.edges.min(axis=1)
# inintialize sweep front
# values in this list are indexes into self.pts
self._front = [0, 2, 1]
# empty triangle list.
# This will contain [(a, b, c), ...] where a,b,c are indexes into
# self.pts
self.tris = OrderedDict()
# For each triangle, maps (a, b): c
# This is used to look up the thrid point in a triangle, given any
# edge. Since each edge has two triangles, they are independently
# stored as (a, b): c and (b, a): d
self._edges_lookup = {}
def triangulate(self):
"""Do the triangulation
"""
self._initialize()
pts = self.pts
front = self._front
## Begin sweep (sec. 3.4)
for i in range(3, pts.shape[0]):
pi = pts[i]
#debug("========== New point %d: %s ==========" % (i, pi))
# First, triangulate from front to new point
# This applies to both "point events" (3.4.1)
# and "edge events" (3.4.2).
# get index along front that intersects pts[i]
l = 0
while pts[front[l+1], 0] <= pi[0]:
l += 1
pl = pts[front[l]]
# "(i) middle case"
if pi[0] > pl[0]:
#debug(" mid case")
# Add a single triangle connecting pi,pl,pr
self._add_tri(front[l], front[l+1], i)
front.insert(l+1, i)
# "(ii) left case"
else:
#debug(" left case")
# Add triangles connecting pi,pl,ps and pi,pl,pr
self._add_tri(front[l], front[l+1], i)
self._add_tri(front[l-1], front[l], i)
front[l] = i
#debug(front)
# Continue adding triangles to smooth out front
# (heuristics shown in figs. 9, 10)
#debug("Smoothing front...")
for direction in -1, 1:
while True:
# Find point connected to pi
ind0 = front.index(i)
ind1 = ind0 + direction
ind2 = ind1 + direction
if ind2 < 0 or ind2 >= len(front):
break
# measure angle made with front
p1 = pts[front[ind1]]
p2 = pts[front[ind2]]
err = np.geterr()
np.seterr(invalid='ignore')
try:
angle = np.arccos(self._cosine(pi, p1, p2))
finally:
np.seterr(**err)
# if angle is < pi/2, make new triangle
#debug("Smooth angle:", pi, p1, p2, angle)
if angle > np.pi/2. or np.isnan(angle):
break
assert (i != front[ind1] and
front[ind1] != front[ind2] and
front[ind2] != i)
self._add_tri(i, front[ind1], front[ind2],
source='smooth1')
front.pop(ind1)
#debug("Finished smoothing front.")
# "edge event" (sec. 3.4.2)
# remove any triangles cut by completed edges and re-fill
# the holes.
if i in self._tops:
for j in self._bottoms[self._tops == i]:
# Make sure edge (j, i) is present in mesh
# because edge event may have created a new front list
self._edge_event(i, j)
front = self._front
self._finalize()
self.tris = np.array(list(self.tris.keys()), dtype=int)
#debug("Finished with %d tris:" % self.tris.shape[0])
#debug(str(self.tris))
def _finalize(self):
## Finalize (sec. 3.5)
# (i) Add bordering triangles to fill hull
#debug("== Fill hull")
front = list(OrderedDict.fromkeys(self._front))
l = len(front) - 2
k = 1
while k < l-1:
# if edges lie in counterclockwise direction, then signed area
# is positive
if self._iscounterclockwise(front[k], front[k+1], front[k+2]):
self._add_tri(front[k], front[k+1], front[k+2], legal=False,
source='fill_hull')
front.pop(k+1)
l -= 1
continue
k += 1
# (ii) Remove all triangles not inside the hull
# (not described in article)
#debug("== Remove triangles outside hull")
tris = [] # triangles to check
tri_state = {} # 0 for outside, 1 for inside
# find a starting triangle
for t in self.tris:
if 0 in t or 1 in t:
tri_state[t] = 0
tris.append(t)
break
while tris:
#debug("iterate:", tris)
next_tris = []
for t in tris:
v = tri_state[t]
for i in (0, 1, 2):
edge = (t[i], t[(i + 1) % 3])
pt = t[(i + 2) % 3]
t2 = self._adjacent_tri(edge, pt)
if t2 is None:
continue
t2a = t2[1:3] + t2[0:1]
t2b = t2[2:3] + t2[0:2]
if t2 in tri_state or t2a in tri_state or t2b in tri_state:
continue
if self._is_constraining_edge(edge):
tri_state[t2] = 1 - v
else:
tri_state[t2] = v
next_tris.append(t2)
tris = next_tris
for t, v in tri_state.items():
if v == 0:
self._remove_tri(*t)
def _edge_event(self, i, j):
"""
Force edge (i, j) to be present in mesh.
This works by removing intersected triangles and filling holes up to
the cutting edge.
"""
front_index = self._front.index(i)
#debug(" == edge event ==")
front = self._front
# First just see whether this edge is already present
# (this is not in the published algorithm)
if (i, j) in self._edges_lookup or (j, i) in self._edges_lookup:
#debug(" already added.")
return
#debug(" Edge (%d,%d) not added yet. Do edge event. (%s - %s)" %
# (i, j, pts[i], pts[j]))
# traverse in two different modes:
# 1. If cutting edge is below front, traverse through triangles. These
# must be removed and the resulting hole re-filled. (fig. 12)
# 2. If cutting edge is above the front, then follow the front until
# crossing under again. (fig. 13)
# We must be able to switch back and forth between these
# modes (fig. 14)
# Collect points that draw the open polygons on either side of the
# cutting edge. Note that our use of 'upper' and 'lower' is not strict;
# in some cases the two may be swapped.
upper_polygon = [i]
lower_polygon = [i]
# Keep track of which section of the front must be replaced
# and with what it should be replaced
front_holes = [] # contains indexes for sections of front to remove
next_tri = None # next triangle to cut (already set if in mode 1)
last_edge = None # or last triangle edge crossed (if in mode 1)
# Which direction to traverse front
front_dir = 1 if self.pts[j][0] > self.pts[i][0] else -1
# Initialize search state
if self._edge_below_front((i, j), front_index):
mode = 1 # follow triangles
tri = self._find_cut_triangle((i, j))
last_edge = self._edge_opposite_point(tri, i)
next_tri = self._adjacent_tri(last_edge, i)
assert next_tri is not None
self._remove_tri(*tri)
# todo: does this work? can we count on last_edge to be clockwise
# around point i?
lower_polygon.append(last_edge[1])
upper_polygon.append(last_edge[0])
else:
mode = 2 # follow front
# Loop until we reach point j
while True:
#debug(" == edge_event loop: mode %d ==" % mode)
#debug(" front_holes:", front_holes, front)
#debug(" front_index:", front_index)
#debug(" next_tri:", next_tri)
#debug(" last_edge:", last_edge)
#debug(" upper_polygon:", upper_polygon)
#debug(" lower_polygon:", lower_polygon)
#debug(" =====")
if mode == 1:
# crossing from one triangle into another
if j in next_tri:
#debug(" -> hit endpoint!")
# reached endpoint!
# update front / polygons
upper_polygon.append(j)
lower_polygon.append(j)
#debug(" Appended to upper_polygon:", upper_polygon)
#debug(" Appended to lower_polygon:", lower_polygon)
self._remove_tri(*next_tri)
break
else:
# next triangle does not contain the end point; we will
# cut one of the two far edges.
tri_edges = self._edges_in_tri_except(next_tri, last_edge)
# select the edge that is cut
last_edge = self._intersected_edge(tri_edges, (i, j))
#debug(" set last_edge to intersected edge:", last_edge)
last_tri = next_tri
next_tri = self._adjacent_tri(last_edge, last_tri)
#debug(" set next_tri:", next_tri)
self._remove_tri(*last_tri)
# Crossing an edge adds one point to one of the polygons
if lower_polygon[-1] == last_edge[0]:
upper_polygon.append(last_edge[1])
#debug(" Appended to upper_polygon:", upper_polygon)
elif lower_polygon[-1] == last_edge[1]:
upper_polygon.append(last_edge[0])
#debug(" Appended to upper_polygon:", upper_polygon)
elif upper_polygon[-1] == last_edge[0]:
lower_polygon.append(last_edge[1])
#debug(" Appended to lower_polygon:", lower_polygon)
elif upper_polygon[-1] == last_edge[1]:
lower_polygon.append(last_edge[0])
#debug(" Appended to lower_polygon:", lower_polygon)
else:
raise RuntimeError("Something went wrong..")
# If we crossed the front, go to mode 2
x = self._edge_in_front(last_edge)
if x >= 0: # crossing over front
#debug(" -> crossed over front, prepare for mode 2")
mode = 2
next_tri = None
#debug(" set next_tri: None")
# where did we cross the front?
# nearest to new point
front_index = x + (1 if front_dir == -1 else 0)
#debug(" set front_index:", front_index)
# Select the correct polygon to be lower_polygon
# (because mode 2 requires this).
# We know that last_edge is in the front, and
# front[front_index] is the point _above_ the front.
# So if this point is currently the last element in
# lower_polygon, then the polys must be swapped.
if lower_polygon[-1] == front[front_index]:
tmp = lower_polygon, upper_polygon
upper_polygon, lower_polygon = tmp
#debug(' Swap upper/lower polygons')
else:
assert upper_polygon[-1] == front[front_index]
else:
assert next_tri is not None
else: # mode == 2
# At each iteration, we require:
# * front_index is the starting index of the edge _preceding_
# the edge that will be handled in this iteration
# * lower_polygon is the polygon to which points should be
# added while traversing the front
front_index += front_dir
#debug(" Increment front_index: %d" % front_index)
next_edge = (front[front_index], front[front_index+front_dir])
#debug(" Set next_edge: %s" % repr(next_edge))
assert front_index >= 0
if front[front_index] == j:
# found endpoint!
#debug(" -> hit endpoint!")
lower_polygon.append(j)
upper_polygon.append(j)
#debug(" Appended to upper_polygon:", upper_polygon)
#debug(" Appended to lower_polygon:", lower_polygon)
break
# Add point to lower_polygon.
# The conditional is because there are cases where the
# point was already added if we just crossed from mode 1.
if lower_polygon[-1] != front[front_index]:
lower_polygon.append(front[front_index])
#debug(" Appended to lower_polygon:", lower_polygon)
front_holes.append(front_index)
#debug(" Append to front_holes:", front_holes)
if self._edges_intersect((i, j), next_edge):
# crossing over front into triangle
#debug(" -> crossed over front, prepare for mode 1")
mode = 1
last_edge = next_edge
#debug(" Set last_edge:", last_edge)
# we are crossing the front, so this edge only has one
# triangle.
next_tri = self._tri_from_edge(last_edge)
#debug(" Set next_tri:", next_tri)
upper_polygon.append(front[front_index+front_dir])
#debug(" Appended to upper_polygon:", upper_polygon)
#else:
#debug(" -> did not cross front..")
#debug("Finished edge_event:")
#debug(" front_holes:", front_holes)
#debug(" upper_polygon:", upper_polygon)
#debug(" lower_polygon:", lower_polygon)
# (iii) triangluate empty areas
#debug("Filling edge_event polygons...")
for polygon in [lower_polygon, upper_polygon]:
dist = self._distances_from_line((i, j), polygon)
#debug("Distances:", dist)
while len(polygon) > 2:
ind = np.argmax(dist)
#debug("Next index: %d" % ind)
self._add_tri(polygon[ind], polygon[ind-1],
polygon[ind+1], legal=False,
source='edge_event')
polygon.pop(ind)
dist.pop(ind)
#debug("Finished filling edge_event polygons.")
# update front by removing points in the holes (places where front
# passes below the cut edge)
front_holes.sort(reverse=True)
for i in front_holes:
front.pop(i)
#debug("Finished updating front after edge_event.")
def _find_cut_triangle(self, edge):
"""
Return the triangle that has edge[0] as one of its vertices and is
bisected by edge.
Return None if no triangle is found.
"""
edges = [] # opposite edge for each triangle attached to edge[0]
for tri in self.tris:
if edge[0] in tri:
edges.append(self._edge_opposite_point(tri, edge[0]))
for oedge in edges:
o1 = self._orientation(edge, oedge[0])
o2 = self._orientation(edge, oedge[1])
#debug(edge, oedge, o1, o2)
#debug(self.pts[np.array(edge)])
#debug(self.pts[np.array(oedge)])
if o1 != o2:
return (edge[0], oedge[0], oedge[1])
return None
def _edge_in_front(self, edge):
""" Return the index where *edge* appears in the current front.
If the edge is not in the front, return -1
"""
e = (list(edge), list(edge)[::-1])
for i in range(len(self._front)-1):
if self._front[i:i+2] in e:
return i
return -1
def _edge_opposite_point(self, tri, i):
""" Given a triangle, return the edge that is opposite point i.
Vertexes are returned in the same orientation as in tri.
"""
ind = tri.index(i)
return (tri[(ind+1) % 3], tri[(ind+2) % 3])
def _adjacent_tri(self, edge, i):
"""
Given a triangle formed by edge and i, return the triangle that shares
edge. *i* may be either a point or the entire triangle.
"""
if not np.isscalar(i):
i = [x for x in i if x not in edge][0]
try:
pt1 = self._edges_lookup[edge]
pt2 = self._edges_lookup[(edge[1], edge[0])]
except KeyError:
return None
if pt1 == i:
return (edge[1], edge[0], pt2)
elif pt2 == i:
return (edge[1], edge[0], pt1)
else:
raise RuntimeError("Edge %s and point %d do not form a triangle "
"in this mesh." % (edge, i))
def _tri_from_edge(self, edge):
"""Return the only tri that contains *edge*. If two tris share this
edge, raise an exception.
"""
edge = tuple(edge)
p1 = self._edges_lookup.get(edge, None)
p2 = self._edges_lookup.get(edge[::-1], None)
if p1 is None:
if p2 is None:
raise RuntimeError("No tris connected to edge %r" % (edge,))
return edge + (p2,)
elif p2 is None:
return edge + (p1,)
else:
raise RuntimeError("Two triangles connected to edge %r" % (edge,))
def _edges_in_tri_except(self, tri, edge):
"""Return the edges in *tri*, excluding *edge*.
"""
edges = [(tri[i], tri[(i+1) % 3]) for i in range(3)]
try:
edges.remove(tuple(edge))
except ValueError:
edges.remove(tuple(edge[::-1]))
return edges
def _edge_below_front(self, edge, front_index):
"""Return True if *edge* is below the current front.
One of the points in *edge* must be _on_ the front, at *front_index*.
"""
f0 = self._front[front_index-1]
f1 = self._front[front_index+1]
return (self._orientation(edge, f0) > 0 and
self._orientation(edge, f1) < 0)
def _is_constraining_edge(self, edge):
mask1 = self.edges == edge[0]
mask2 = self.edges == edge[1]
return (np.any(mask1[:, 0] & mask2[:, 1]) or
np.any(mask2[:, 0] & mask1[:, 1]))
def _intersected_edge(self, edges, cut_edge):
""" Given a list of *edges*, return the first that is intersected by
*cut_edge*.
"""
for edge in edges:
if self._edges_intersect(edge, cut_edge):
return edge
def _find_edge_intersections(self):
"""
Return a dictionary containing, for each edge in self.edges, a list
of the positions at which the edge should be split.
"""
edges = self.pts[self.edges]
cuts = {} # { edge: [(intercept, point), ...], ... }
for i in range(edges.shape[0]-1):
# intersection of edge i onto all others
int1 = self._intersect_edge_arrays(edges[i:i+1], edges[i+1:])
# intersection of all edges onto edge i
int2 = self._intersect_edge_arrays(edges[i+1:], edges[i:i+1])
# select for pairs that intersect
err = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
try:
mask1 = (int1 >= 0) & (int1 <= 1)
mask2 = (int2 >= 0) & (int2 <= 1)
mask3 = mask1 & mask2 # all intersections
finally:
np.seterr(**err)
# compute points of intersection
inds = np.argwhere(mask3)[:, 0]
if len(inds) == 0:
continue
h = int2[inds][:, np.newaxis]
pts = (edges[i, 0][np.newaxis, :] * (1.0 - h) +
edges[i, 1][np.newaxis, :] * h)
# record for all edges the location of cut points
edge_cuts = cuts.setdefault(i, [])
for j, ind in enumerate(inds):
if 0 < int2[ind] < 1:
edge_cuts.append((int2[ind], pts[j]))
if 0 < int1[ind] < 1:
other_cuts = cuts.setdefault(ind+i+1, [])
other_cuts.append((int1[ind], pts[j]))
# sort all cut lists by intercept, remove duplicates
for k, v in cuts.items():
v.sort(key=lambda x: x[0])
for i in range(len(v)-2, -1, -1):
if v[i][0] == v[i+1][0]:
v.pop(i+1)
return cuts
def _split_intersecting_edges(self):
# we can do all intersections at once, but this has excessive memory
# overhead.
#int1 = self._intersection_matrix(edges)
#int2 = int1.T
# measure intersection point between all pairs of edges
all_cuts = self._find_edge_intersections()
# cut edges at each intersection
add_pts = []
add_edges = []
for edge, cuts in all_cuts.items():
if len(cuts) == 0:
continue
#debug("Edge intersections:", edge, self.edges[edge],
# self.pts[self.edges[edge]], cuts)
# add new points
pt_offset = self.pts.shape[0] + len(add_pts)
new_pts = [x[1] for x in cuts]
add_pts.extend(new_pts)
#debug("Add new points:", new_pts)
# list of point indexes for all new edges
pt_indexes = list(range(pt_offset, pt_offset + len(cuts)))
pt_indexes.append(self.edges[edge, 1])
# modify original edge
self.edges[edge, 1] = pt_indexes[0]
# add new edges
new_edges = [[pt_indexes[i-1], pt_indexes[i]]
for i in range(1, len(pt_indexes))]
add_edges.extend(new_edges)
#debug("Adding %d points and %d edges to remove intersections." %
# (len(add_pts), len(add_edges)))
if add_pts:
add_pts = np.array(add_pts, dtype=self.pts.dtype)
self.pts = np.append(self.pts, add_pts, axis=0)
if add_edges:
add_edges = np.array(add_edges, dtype=self.edges.dtype)
self.edges = np.append(self.edges, add_edges, axis=0)
def _merge_duplicate_points(self):
# generate a list of all pairs (i,j) of identical points
dups = []
for i in range(self.pts.shape[0]-1):
test_pt = self.pts[i:i+1]
comp_pts = self.pts[i+1:]
eq = test_pt == comp_pts
eq = eq[:, 0] & eq[:, 1]
for j in np.argwhere(eq)[:, 0]:
dups.append((i, i+1+j))
dups_arr = np.array(dups)
# remove duplicate points
pt_mask = np.ones(self.pts.shape[0], dtype=bool)
for i, inds in enumerate(dups_arr):
# remove j from points
# (note we pull the index from the original dups instead of
# dups_arr because the indexes in pt_mask do not change)
pt_mask[dups[i][1]] = False
i, j = inds
# rewrite edges to use i instead of j
self.edges[self.edges == j] = i
#assert not np.any(self.edges[:,0] == self.edges[:,1])
# decrement all point indexes > j
self.edges[self.edges > j] -= 1
dups_arr[dups_arr > j] -= 1
#assert not np.any(self.edges[:,0] == self.edges[:,1])
self.pts = self.pts[pt_mask]
# remove zero-length edges
mask = self.edges[:, 0] != self.edges[:, 1]
self.edges = self.edges[mask]
def _distance(self, A, B):
# Distance between points A and B
n = len(A)
assert len(B) == n
return np.linalg.norm(np.array(list(A)) - np.array(list(B)))
def _distances_from_line(self, edge, points):
# Distance of a set of points from a given line
#debug("distance from %r to %r" % (points, edge))
e1 = self.pts[edge[0]]
e2 = self.pts[edge[1]]
distances = []
for i in points:
p = self.pts[i]
proj = self._projection(e1, p, e2)
distances.append(((p - proj)**2).sum()**0.5)
assert distances[0] == 0 and distances[-1] == 0
return distances
def _projection(self, a, b, c):
"""Return projection of (a,b) onto (a,c)
Arguments are point locations, not indexes.
"""
ab = b - a
ac = c - a
return a + ((ab*ac).sum() / (ac*ac).sum()) * ac
def _cosine(self, A, B, C):
# Cosine of angle ABC
a = ((C - B)**2).sum()
b = ((C - A)**2).sum()
c = ((B - A)**2).sum()
d = (a + c - b) / ((4 * a * c)**0.5)
return d
#def _barycentric(self, A, B, C, p, q, r):
## Cartesian coordinates of the point whose barycentric coordinates
## with respect to the triangle ABC are [p,q,r]
#n = len(A)
#assert len(B) == len(C) == n
#s = p+q+r
#p, q, r = p/s, q/s, r/s
#return tuple([p*A[i]+q*B[i]+r*C[i] for i in range(n)])
#def _trilinear(self, A, B, C, alpha, beta, gamma):
## Cartesian coordinates of the point whose trilinear coordinates
## with respect to the triangle ABC are [alpha,beta,gamma]
#a = distance(B, C)
#b = distance(A, C)
#c = distance(A, B)
#return barycentric(A, B, C, a*alpha, b*beta, c*gamma)
#def _circuminfo(self, A, B, C):
## Cartesian coordinates of the circumcenter of triangle ABC
#cosA = cosine(C, A, B)
#cosB = cosine(A, B, C)
#cosC = cosine(B, C, A)
#cc = trilinear(A, B, C, cosA, cosB, cosC)
## returns circumcenter and circumradius
#return cc, distance(cc, A)
def _iscounterclockwise(self, a, b, c):
# Check if the points lie in counter-clockwise order or not
A = self.pts[a]
B = self.pts[b]
C = self.pts[c]
return np.cross(B-A, C-B) > 0
def _edges_intersect(self, edge1, edge2):
"""
Return 1 if edges intersect completely (endpoints excluded)
"""
h12 = self._intersect_edge_arrays(self.pts[np.array(edge1)],
self.pts[np.array(edge2)])
h21 = self._intersect_edge_arrays(self.pts[np.array(edge2)],
self.pts[np.array(edge1)])
err = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
try:
out = (0 < h12 < 1) and (0 < h21 < 1)
finally:
np.seterr(**err)
return out
def _intersection_matrix(self, lines):
"""
Return a 2D array of intercepts such that
intercepts[i, j] is the intercept of lines[i] onto lines[j].
*lines* must be an array of point locations with shape (N, 2, 2), where
the axes are (lines, points_per_line, xy_per_point).
The intercept is described in intersect_edge_arrays().
"""
return self._intersect_edge_arrays(lines[:, np.newaxis, ...],
lines[np.newaxis, ...])
def _intersect_edge_arrays(self, lines1, lines2):
"""Return the intercepts of all lines defined in *lines1* as they
intersect all lines in *lines2*.
Arguments are of shape (..., 2, 2), where axes are:
0: number of lines
1: two points per line
2: x,y pair per point
Lines are compared elementwise across the arrays (lines1[i] is compared
against lines2[i]). If one of the arrays has N=1, then that line is
compared against all lines in the other array.
Returns an array of shape (N,) where each value indicates the intercept
relative to the defined line segment. A value of 0 indicates
intersection at the first endpoint, and a value of 1 indicates
intersection at the second endpoint. Values between 1 and 0 are on the
segment, whereas values outside 1 and 0 are off of the segment.
"""
# vector for each line in lines1
l1 = lines1[..., 1, :] - lines1[..., 0, :]
# vector for each line in lines2
l2 = lines2[..., 1, :] - lines2[..., 0, :]
# vector between first point of each line
diff = lines1[..., 0, :] - lines2[..., 0, :]
p = l1.copy()[..., ::-1] # vectors perpendicular to l1
p[..., 0] *= -1
f = (l2 * p).sum(axis=-1) # l2 dot p
# tempting, but bad idea!
#f = np.where(f==0, 1, f)
err = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
try:
h = (diff * p).sum(axis=-1) / f # diff dot p / f
finally:
np.seterr(**err)
return h
def _orientation(self, edge, point):
""" Returns +1 if edge[0]->point is clockwise from edge[0]->edge[1],
-1 if counterclockwise, and 0 if parallel.
"""
v1 = self.pts[point] - self.pts[edge[0]]
v2 = self.pts[edge[1]] - self.pts[edge[0]]
c = np.cross(v1, v2) # positive if v1 is CW from v2
return 1 if c > 0 else (-1 if c < 0 else 0)
#def _legalize(self, p):
### Legalize recursively - incomplete
#return p # disabled for now
#f00, f11, p = p
#debug("Legalizing points = {}, {}, {}".format(f00, f11, p))
#a = pts[f00]
#b = pts[f11]
#c = pts[p]
#cc, cr = circuminfo(a, b, c)
#for point in pts:
# if np.all(point == a) or np.all(point == b) or np.all(point == c):
# continue
# elif distance(cc, point) < cr:
# #debug("Illegal point")
# #debug(point)
# pass
#return (f00, f11, p)
def _add_tri(self, a, b, c, legal=True, source=None):
# source is just used for #debugging
#debug("Add triangle [%s]:" % source, (a, b, c))
# sanity check
assert a != b and b != c and c != a
# ignore flat tris
pa = self.pts[a]
pb = self.pts[b]
pc = self.pts[c]
if np.all(pa == pb) or np.all(pb == pc) or np.all(pc == pa):
#debug(" Triangle is flat; refusing to add.")
return
# check this tri is unique
for t in permutations((a, b, c)):
if t in self.tris:
raise Exception("Cannot add %s; already have %s" %
((a, b, c), t))
# TODO: should add to edges_lookup after legalization??
if self._iscounterclockwise(a, b, c):
#debug(" ", (a, b), (b, c), (c, a))
assert (a, b) not in self._edges_lookup
assert (b, c) not in self._edges_lookup
assert (c, a) not in self._edges_lookup
self._edges_lookup[(a, b)] = c
self._edges_lookup[(b, c)] = a
self._edges_lookup[(c, a)] = b
else:
#debug(" ", (b, a), (c, b), (a, c))
assert (b, a) not in self._edges_lookup
assert (c, b) not in self._edges_lookup
assert (a, c) not in self._edges_lookup
self._edges_lookup[(b, a)] = c
self._edges_lookup[(c, b)] = a
self._edges_lookup[(a, c)] = b
#if legal:
#tri = self._legalize((a, b, c))
#else:
tri = (a, b, c)
self.tris[tri] = None
def _remove_tri(self, a, b, c):
#debug("Remove triangle:", (a, b, c))
for k in permutations((a, b, c)):
if k in self.tris:
break
del self.tris[k]
(a, b, c) = k
if self._edges_lookup.get((a, b), -1) == c:
#debug(" ", (a,b), (b,c), (c,a))
del self._edges_lookup[(a, b)]
del self._edges_lookup[(b, c)]
del self._edges_lookup[(c, a)]
elif self._edges_lookup.get((b, a), -1) == c:
#debug(" ", (b,a), (c,b), (a,c))
del self._edges_lookup[(b, a)]
del self._edges_lookup[(a, c)]
del self._edges_lookup[(c, b)]
else:
raise RuntimeError("Lost edges_lookup for tri (%d, %d, %d)" %
(a, b, c))
return k
def _triangulate_python(vertices_2d, segments):
segments = segments.reshape(int(len(segments) / 2), 2)
T = Triangulation(vertices_2d, segments)
T.triangulate()
vertices_2d = T.pts
triangles = T.tris.ravel()
return vertices_2d, triangles
def _triangulate_cpp(vertices_2d, segments):
T = triangle.triangulate({'vertices': vertices_2d,
'segments': segments}, "p")
vertices_2d = T["vertices"]
triangles = T["triangles"]
return vertices_2d, triangles
def triangulate(vertices):
"""Triangulate a set of vertices
Parameters
----------
vertices : array-like
The vertices.
Returns
-------
vertices : array-like
The vertices.
tringles : array-like
The triangles.
"""
n = len(vertices)
vertices = np.asarray(vertices)
zmean = vertices[:, 2].mean()
vertices_2d = vertices[:, :2]
segments = np.repeat(np.arange(n + 1), 2)[1:-1]
segments[-2:] = n - 1, 0
if _TRIANGLE_AVAILABLE:
vertices_2d, triangles = _triangulate_cpp(vertices_2d, segments)
else:
vertices_2d, triangles = _triangulate_python(vertices_2d, segments)
vertices = np.empty((len(vertices_2d), 3))
vertices[:, :2] = vertices_2d
vertices[:, 2] = zmean
return vertices, triangles
# Note: using custom #debug instead of logging because
# there are MANY messages and logger might be too expensive.
# After this becomes stable, we might just remove them altogether.
def debug(*args):
print(*args)
| 38.176471
| 80
| 0.502741
|
8add77a83a1ddb1efef81fad5c4428e336f5e622
| 5,002
|
py
|
Python
|
heltour/tournament/oauth.py
|
nathanj/heltour
|
14c6e926e02e1c035c9037955af5673c5012d16c
|
[
"MIT"
] | null | null | null |
heltour/tournament/oauth.py
|
nathanj/heltour
|
14c6e926e02e1c035c9037955af5673c5012d16c
|
[
"MIT"
] | null | null | null |
heltour/tournament/oauth.py
|
nathanj/heltour
|
14c6e926e02e1c035c9037955af5673c5012d16c
|
[
"MIT"
] | null | null | null |
import base64
import json
import requests
from django.contrib.auth import login
from django.core import signing
from django.http.response import HttpResponse
from django.shortcuts import redirect, reverse
from heltour.tournament.models import *
from heltour.tournament import lichessapi
_SCOPES = [
'email:read',
'challenge:read',
'challenge:write'
]
def redirect_for_authorization(request, league_tag, secret_token):
# Redirect to lichess's OAuth2 consent screen
# We don't care if anyone else initiates a request, so we can use the state variable to store
# the league tag so we can redirect properly
state = {
'league': league_tag,
'token': secret_token
}
auth = f'{settings.LICHESS_OAUTH_AUTHORIZE_URL}' + \
f'?response_type=code' + \
f'&client_id={settings.LICHESS_OAUTH_CLIENTID}' + \
f'&redirect_uri={_get_redirect_uri(request)}' + \
f'&scope={" ".join(_SCOPES)}' + \
f'&state={_encode_state(state)}'
return redirect(auth)
def login_with_code(request, code, encoded_state):
state = _decode_state(encoded_state)
oauth_token = _get_oauth_token(request, code)
username = _get_account_username(oauth_token)
oauth_token.account_username = username
# TODO: This slows down login. Figure out what to do with this.
# oauth_token.account_email = _get_account_email(oauth_token)
player = Player.get_or_create(username)
# Ensure the player's profile is present so we can display ratings, etc.
_ensure_profile_present(player)
# At this point all http requests are successful, so we can start persisting everything
oauth_token.save()
player.oauth_token = oauth_token
player.save()
user = User.objects.filter(username__iexact=username).first()
if not user:
# Create the user with a password no one will ever use; it can always be manually reset if needed
with reversion.create_revision():
reversion.set_comment('Create user from lichess OAuth2 login')
user = User.objects.create_user(username=username, password=create_api_token())
user.backend = 'django.contrib.auth.backends.ModelBackend'
login(request, user)
# Slack linking?
if state['token']:
token = LoginToken.objects.filter(secret_token=state['token']).first()
if token and not token.is_expired() and token.slack_user_id:
Player.link_slack_account(username, token.slack_user_id)
request.session['slack_linked'] = True
# Success. Now redirect
redir_url = request.session.get('login_redirect')
if redir_url:
request.session['login_redirect'] = None
return redirect(redir_url)
else:
return redirect('by_league:user_dashboard', state['league'])
def _ensure_profile_present(player):
if not player.profile:
user_meta = lichessapi.get_user_meta(player.lichess_username, priority=100)
player.update_profile(user_meta)
def _get_account_username(oauth_token):
response = requests.get(settings.LICHESS_OAUTH_ACCOUNT_URL,
headers=_get_auth_headers(oauth_token.access_token))
if response.status_code != 200:
return HttpResponse(f'Received {response.status_code} from account endpoint', 401)
return response.json()['username']
def _get_account_email(oauth_token):
response = requests.get(settings.LICHESS_OAUTH_EMAIL_URL,
headers=_get_auth_headers(oauth_token.access_token))
if response.status_code != 200:
return HttpResponse(f'Received {response.status_code} from email endpoint', 401)
return response.json()['email']
def _get_oauth_token(request, code):
token_response = requests.post(settings.LICHESS_OAUTH_TOKEN_URL, {
'grant_type': 'authorization_code',
'code': code,
'redirect_uri': _get_redirect_uri(request),
'client_id': settings.LICHESS_OAUTH_CLIENTID,
'client_secret': settings.LICHESS_OAUTH_CLIENTSECRET
})
if token_response.status_code != 200:
return HttpResponse(f'Received {token_response.status_code} from token endpoint', 401)
token_json = token_response.json()
return OauthToken(access_token=token_json['access_token'],
token_type=token_json['token_type'],
expires=timezone.now() + timedelta(
seconds=token_json['expires_in']),
refresh_token=token_json.get('refresh_token'),
scope=token_json.get('scope', ' '.join(_SCOPES)))
def _get_redirect_uri(request):
return request.build_absolute_uri(reverse('lichess_auth'))
def _encode_state(state):
# This state isn't actually security critical, but it's just good practice to sign
return signing.dumps(state)
def _decode_state(state):
return signing.loads(state)
def _get_auth_headers(access_token):
return {'Authorization': f'Bearer {access_token}'}
| 37.609023
| 105
| 0.69952
|
ae01eec9c4809cbadc3a6cd6ce7ff66a402049aa
| 2,045
|
py
|
Python
|
datetime_drill.py
|
gcguevara/file_transfer_app
|
a2401f6aa9915d22242844c926e79ff7743766bf
|
[
"MIT"
] | null | null | null |
datetime_drill.py
|
gcguevara/file_transfer_app
|
a2401f6aa9915d22242844c926e79ff7743766bf
|
[
"MIT"
] | null | null | null |
datetime_drill.py
|
gcguevara/file_transfer_app
|
a2401f6aa9915d22242844c926e79ff7743766bf
|
[
"MIT"
] | null | null | null |
""" Python Workplace Simulation
Author: Garrett Guevara
Written/Tested in Python Version 3.5.2
Task: Your company is headquartered in Portland, OR.
They've opened two new branches in NYC and London. They
ask that you create a program that tells if the branches
are open or closed based on the current time at HQ.
All branches are open 9:00AM-9:00PM
"""
import datetime
class Branch(object):
""" A branch object for the hypothetical company. Each branch
has the following attributes:
name: A string with the branch name by location, i.e. "Portland"
timezone: An integer that is the correct hour difference from UTC
"""
# declare local opening and closing hour for branch, 9 AM to 9 PM
opening_hour = 9 # 9 AM
closing_hour = opening_hour + 12 # 9 PM
def __init__(self, name, timezone=0):
self.name = name
self.timezone = timezone
def __str__(self):
return "The " + self.name + " branch is " + self.is_open() + "."
def is_open(self):
""" Compares if the current time adjusted for timezone is between
the variables opening_hour and closing_hour. Returns "open" or "closed".
"""
# find the current time in UTC
now = datetime.datetime.utcnow()
# add the now variable to the timezone argument
hour_in_timezone = now.hour + self.timezone
# if that hour is between 9 AM or 9 PM, return "open", else "closed"
if self.opening_hour <= hour_in_timezone < self.closing_hour:
return "open"
else:
return "closed"
# tell the person the current time based on the server they are using
currtime = datetime.datetime.now()
print("Hello, your current time is " + currtime.strftime('%H:%M:%S') + ".\n")
# declare array of three branches with correct timezone argument
branches = [
Branch('Portland', -8),
Branch('New York', -5),
Branch('London', 0)
]
# loop through list and print a string telling if it's open or closed
for branch in branches:
print(branch)
| 29.637681
| 80
| 0.670905
|
3200944a6d21f3e53c199619aa9848cfe124983b
| 9,395
|
py
|
Python
|
homura/callbacks/reporters.py
|
Fragile-azalea/homura
|
900d1d63affb9c8af3accd9b196b5276cb2e14b6
|
[
"Apache-2.0"
] | 1
|
2020-06-30T01:55:41.000Z
|
2020-06-30T01:55:41.000Z
|
homura/callbacks/reporters.py
|
Fragile-azalea/homura
|
900d1d63affb9c8af3accd9b196b5276cb2e14b6
|
[
"Apache-2.0"
] | null | null | null |
homura/callbacks/reporters.py
|
Fragile-azalea/homura
|
900d1d63affb9c8af3accd9b196b5276cb2e14b6
|
[
"Apache-2.0"
] | null | null | null |
import json
from numbers import Number
from pathlib import Path
from typing import Mapping, Iterable, Optional, List
import torch
import tqdm
from torchvision.utils import save_image as _save_image
from homura import liblog
from homura.utils import is_master, get_args
from homura.utils._vocabulary import *
from .base import Callback, CallbackList
from .metrics import MetricCallback
Callbacks = Callback or Iterable[Callback]
Vector = torch.Tensor or List[Number] or Number
class _ReporterBase(Callback):
# Actual base class for reporters, but users do not inherit class
def __init__(self, *args, **kwargs):
self.callbacks = None
def register_callbacks(self,
callbacks: CallbackList):
self.callbacks = callbacks
def before_all(self,
data: Mapping) -> Mapping:
if self.callbacks is None:
raise RuntimeError("`callbacks` are not registered.")
return self.callbacks.before_all(data)
def before_epoch(self,
data: Mapping) -> Mapping:
return self.callbacks.before_epoch(data)
def before_iteration(self,
data: Mapping) -> Mapping:
return self.callbacks.before_iteration(data)
def after_all(self,
data: Mapping) -> Mapping:
return self.callbacks.after_all(data)
def after_epoch(self,
data: Mapping) -> Mapping:
return self.callbacks.after_epoch(data)
def after_iteration(self,
data: Mapping) -> Mapping:
return self.callbacks.after_iteration(data)
def close(self):
self.callbacks.close()
class Reporter(_ReporterBase):
""" Virtual base class of reporters.
If `global_rank>0`, i.e., not master node in distributed learning, `_ReporterBase` is used.
"""
master_only = True
def __new__(cls,
*args,
**kwargs):
if cls.master_only and not is_master():
return _ReporterBase(*args, **kwargs)
return object.__new__(cls)
def __init__(self, *args, **kwargs):
super(Reporter, self).__init__(*args, **kwargs)
@staticmethod
def _is_scalar(v: Vector) -> bool:
if torch.is_tensor(v):
return v.numel() == 1
return isinstance(v, Number)
@staticmethod
def _is_vector(v: Vector) -> bool:
return (torch.is_tensor(v) and v.dim() == 1) or all([Reporter._is_scalar(e) for e in v])
@staticmethod
def _is_images(t: torch.Tensor) -> bool:
return (t.dim() == 3) or (t.dim() == 4 and t.size(1) in (1, 3))
@staticmethod
def to_serializable(tensor: Vector):
if torch.is_tensor(tensor):
if tensor.numel() == 1:
return tensor.item()
else:
return tensor.tolist()
return tensor
class TQDMReporter(Reporter):
""" Reporter with TQDM
:param iterator: iterator to be wrapped.
>>> t_reporter = TQDMReporter(range(100))
>>> for ep in t_reporter:
>>> pass
"""
# to enable __iter__ and __len__
master_only = False
def __init__(self,
iterator: Iterable,
verb: bool = False):
super(TQDMReporter, self).__init__()
self.writer = tqdm.tqdm(
iterator, dynamic_ncols=True) if is_master() else iterator
self._verb = verb
self._logger = liblog.get_logger(__name__)
self._length = len(iterator)
self._max_accuracy = -1.0
liblog._set_tqdm_handler()
liblog._set_tqdm_print()
def __iter__(self):
for i in self.writer:
yield i
def __len__(self):
return self._length
def add_text(self,
text: str):
if is_master():
self.writer.write(text)
def after_epoch(self,
data: Mapping):
reportable = {}
results = super(TQDMReporter, self).after_epoch(data)
if is_master():
if data[MODE] == TEST:
accuracy_test = self.to_serializable(results['accuracy_test'])
if accuracy_test > self._max_accuracy:
self._max_accuracy = accuracy_test
reportable['best'] = self._max_accuracy
for k, v in results.items():
if self._is_scalar(v):
reportable[k] = self.to_serializable(v)
elif isinstance(v, dict):
reportable.update({k: self.to_serializable(e)
for k, e in v.items()})
self.writer.set_postfix(reportable)
if self._verb:
log = ""
for k, v in reportable.items():
v = f"{v:.4f}" if isinstance(v, Number) else v
log += f"{k}={v}, "
self._logger.info(log[:-2])
class TensorboardReporter(Reporter):
""" Reporter with Tensorboard
:param save_dir: directory where the log is saved
:report_freq: Frequency of reporting in iteration. If `None`, reported by epoch.
"""
def __init__(self,
save_dir: Optional[str or Path],
report_freq: Optional[int] = None,
is_global_step_epoch: bool = True):
super(TensorboardReporter, self).__init__()
from torch.utils import tensorboard
save_dir = Path(save_dir) / BASIC_DIR_NAME
save_dir.mkdir(exist_ok=True, parents=True)
self.writer = tensorboard.SummaryWriter(save_dir)
self._report_freq = report_freq
self._use_epoch = is_global_step_epoch
self.writer.add_text("exec", ' '.join(get_args()))
def after_iteration(self,
data: Mapping):
results = super(TensorboardReporter, self).after_iteration(data)
global_step = data[EPOCH if self._use_epoch else ITERATION]
for k, v in results.items():
if self._report_freq is not None and data[ITERATION] % self._report_freq == 0:
self._report_values(k, v, global_step)
elif torch.is_tensor(v) and self._is_images(v):
self.writer.add_images(k, v, global_step)
def after_epoch(self,
data: Mapping):
results = super(TensorboardReporter, self).after_epoch(data)
global_step = data[EPOCH if self._use_epoch else ITERATION]
if self._report_freq is None:
for k, v in results.items():
self._report_values(k, v, global_step)
def _report_values(self,
k: str,
v: Vector or dict,
global_step: int):
if self._is_scalar(v):
self.writer.add_scalar(k, v, global_step)
elif isinstance(v, dict):
self.writer.add_scalars(k, v, global_step)
elif self._is_vector(v):
self.writer.add_scalars(
k, {str(i): e for i, e in enumerate(v)}, global_step)
class IOReporter(Reporter):
""" Reporter based on IO, i.e., save json files for scalars and image files for images.
"""
def __init__(self,
save_dir: Optional[str or Path]):
super(IOReporter, self).__init__()
save_dir = Path(save_dir) / BASIC_DIR_NAME
save_dir.mkdir(exist_ok=True, parents=True)
self.save_dir = save_dir
def after_iteration(self,
data: Mapping):
# save image
results = super(IOReporter, self).after_iteration(data)
for k, v in results.items():
if torch.is_tensor(v) and self._is_images(v):
self.save_image(self.save_dir, v, k, data[EPOCH])
def close(self):
# save text
history = {"exec": ' '.join(get_args())}
if hasattr(self.callbacks, "callbacks"):
for c in self.callbacks.callbacks:
if isinstance(c, MetricCallback):
history[c.metric_name] = c.history
with (self.save_dir / "results.json").open('w') as f:
json.dump(history, f)
@staticmethod
def save_image(path: Path,
img: torch.Tensor,
name: str,
idx: int) -> None:
(path / "images").mkdir(exist_ok=True, parents=True)
filename = path / "images" / f"{name}-{idx}.png"
_save_image(img, filename)
class CallImage(Callback):
""" Fetch image from `data` by `key`.
If want to report image by epoch, set `report_freq=None` (default)
"""
master_only = True
def __init__(self,
key: str,
report_freq: Optional[int] = None):
self.key = key
self.report_freq = report_freq
def after_iteration(self,
data: Mapping):
if data[ITERATION] == 0:
if data.get(self.key) is None:
raise RuntimeError(
f"key for image `{self.key}` is not found in `data`")
if self.report_freq is None:
# epochwise, because `data` of `after_epoch` does not have images
if data[ITERATION] % data[ITER_PER_EPOCH] != data[ITER_PER_EPOCH] - 1:
return
else:
if data[ITERATION] % self.report_freq != 0:
return
return {self.key: data[self.key]}
| 32.964912
| 96
| 0.578286
|
6a981bfb23cc6c2a3b86fbdda2d790b7ef3944d3
| 5,667
|
py
|
Python
|
evaluate_image.py
|
mklasby/mri-variationalnetwork
|
b784fbfcf24d833edb4a41dc70cd863052528f19
|
[
"MIT"
] | 119
|
2017-09-22T01:10:25.000Z
|
2022-03-17T18:44:39.000Z
|
evaluate_image.py
|
mklasby/mri-variationalnetwork
|
b784fbfcf24d833edb4a41dc70cd863052528f19
|
[
"MIT"
] | 11
|
2017-12-26T10:45:18.000Z
|
2021-03-04T17:10:04.000Z
|
evaluate_image.py
|
mklasby/mri-variationalnetwork
|
b784fbfcf24d833edb4a41dc70cd863052528f19
|
[
"MIT"
] | 55
|
2018-06-18T05:37:52.000Z
|
2022-03-14T22:41:27.000Z
|
import os
import sys
import argparse
import glob
import traceback
import time
import vn
import tensorflow as tf
import numpy as np
from mridata import VnMriReconstructionData
import mriutils
import icg
parser = argparse.ArgumentParser(description='reconstruct a given image data using a model')
parser.add_argument('image_config', type=str, help='config file for reconstruct')
parser.add_argument('model_name', type=str, help='name of the model in the log dir')
parser.add_argument('--o', dest='output_name', type=str, default='result', help='output name')
parser.add_argument('--epoch', type=int, default=None, help='epoch to evaluate')
parser.add_argument('--training_config', type=str, default='./configs/training.yaml', help='training config file')
if __name__ == '__main__':
# parse the input arguments
args = parser.parse_args()
# image and model
data_config = icg.utils.loadYaml(args.image_config, ['data_config'])
model_name = args.model_name
output_name = args.output_name
epoch = args.epoch
checkpoint_config = icg.utils.loadYaml(args.training_config, ['checkpoint_config'])
all_models = glob.glob(checkpoint_config['log_dir'] + '/*')
all_models = sorted([d.split('/')[-1] for d in all_models if os.path.isdir(d)])
if not model_name in all_models:
print('model not found in "{}"'.format(checkpoint_config['log_dir']))
sys.exit(-1)
ckpt_dir = checkpoint_config['log_dir'] + '/' + model_name + '/checkpoints/'
eval_output_dir = checkpoint_config['log_dir'] + '/' + model_name + '/test/'
with tf.compat.v1.Session() as sess:
try:
# load from checkpoint if required
epoch = vn.utils.loadCheckpoint(sess, ckpt_dir, epoch=epoch)
except Exception as e:
print(traceback.print_exc())
# extract operators and variables from the graph
u_op = tf.compat.v1.get_collection('u_op')[0]
u_var = tf.compat.v1.get_collection('u_var')
c_var = tf.compat.v1.get_collection('c_var')
m_var = tf.compat.v1.get_collection('m_var')
f_var = tf.compat.v1.get_collection('f_var')
g_var = tf.compat.v1.get_collection('g_var')
# create data object
data = VnMriReconstructionData(data_config,
u_var=u_var,
f_var=f_var,
c_var=c_var,
m_var=m_var,
g_var=g_var,
load_eval_data=False,
load_target=True)
# run the model
print('start reconstruction')
eval_start_time = time.time()
feed_dict, norm = data.get_test_feed_dict(data_config['dataset'],
data_config['dataset']['patient'],
data_config['dataset']['slice'],
return_norm=True)
# get the reconstruction, re-normalize and postprocesss it
u_i = sess.run(u_op, feed_dict=feed_dict)[0]
u_i = u_i * norm
u_i = mriutils.postprocess(u_i, data_config['dataset']['name'])
# target
target = feed_dict[data.target][0]*norm
target = mriutils.postprocess(target, data_config['dataset']['name'])
# zero filling
zero_filling = feed_dict[data.u][0]*norm
zero_filling = mriutils.postprocess(zero_filling, data_config['dataset']['name'])
# evaluation
rmse_vn = mriutils.rmse(u_i, target)
rmse_zf = mriutils.rmse(zero_filling, target)
ssim_vn = mriutils.ssim(u_i, target)
ssim_zf = mriutils.ssim(zero_filling, target)
print("Zero filling: RMSE={:.4f} SSIM={:.4f} VN: RMSE={:.4f} SSIM={:.4f}".format(rmse_zf, ssim_zf, rmse_vn, ssim_vn))
time_reco = time.time() - eval_start_time
print('reconstruction of {} image took {:f}s'.format(u_i.shape, time_reco))
print('saving reconstructed image to "{}"'.format(output_name))
# save mat files
patient_id = '%s-p%d-sl%d' % (data_config['dataset']['name'],
data_config['dataset']['patient'],
data_config['dataset']['slice'])
mriutils.saveAsMat(u_i, '%s-vn-%s' % (output_name, patient_id), 'result_vn',
mat_dict={'normalization': np.asarray(norm)})
mriutils.saveAsMat(zero_filling, '%s-zf-%s' % (output_name, patient_id), 'result_zf',
mat_dict={'normalization': np.asarray(norm)})
mriutils.saveAsMat(target, '%s-ref-%s' % (output_name, patient_id), 'reference',
mat_dict={'normalization': np.asarray(norm)})
# enhance image with same parameters for all images
v_min, v_max = mriutils.getContrastStretchingLimits(np.abs(target),
saturated_pixel=0.002)
target_enhanced = mriutils.normalize(np.abs(target), v_min=v_min, v_max=v_max)
u_i_enhanced = mriutils.normalize(np.abs(u_i), v_min=v_min, v_max=v_max)
zf_enhanced = mriutils.normalize(np.abs(zero_filling), v_min=v_min, v_max=v_max)
# save pngs
mriutils.imsave(u_i_enhanced,
'%s-vn-%s.png' % (output_name, patient_id))
mriutils.imsave(target_enhanced,
'%s-ref-%s.png' % (output_name, patient_id))
mriutils.imsave(zf_enhanced,
'%s-zf-%s.png' % (output_name, patient_id))
| 43.930233
| 126
| 0.59573
|
f58b8d05071c472249d5822b5f2a2d2ba8a354eb
| 6,452
|
py
|
Python
|
hitcount/models.py
|
glassfordm/django-hitcount
|
b35d2f9c213f6a2ff0e5d0a746339a5b84b4d416
|
[
"MIT"
] | 1
|
2020-05-20T08:42:49.000Z
|
2020-05-20T08:42:49.000Z
|
hitcount/models.py
|
glassfordm/django-hitcount
|
b35d2f9c213f6a2ff0e5d0a746339a5b84b4d416
|
[
"MIT"
] | 14
|
2020-03-24T17:31:08.000Z
|
2022-03-11T23:59:30.000Z
|
hitcount/models.py
|
glassfordm/django-hitcount
|
b35d2f9c213f6a2ff0e5d0a746339a5b84b4d416
|
[
"MIT"
] | 1
|
2020-04-13T12:37:37.000Z
|
2020-04-13T12:37:37.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import timedelta
from django.db import models
from django.conf import settings
from django.db.models import F
from django.utils import timezone
from django.dispatch import receiver
from django.utils.encoding import python_2_unicode_compatible
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
from .managers import HitCountManager, HitManager
from .signals import delete_hit_count
@receiver(delete_hit_count)
def delete_hit_count_handler(sender, instance, save_hitcount=False, **kwargs):
"""
Custom callback for the Hit.delete() method.
Hit.delete(): removes the hit from the associated HitCount object.
Hit.delete(save_hitcount=True): preserves the hit for the associated
HitCount object.
"""
if not save_hitcount:
instance.hitcount.decrease()
@python_2_unicode_compatible
class HitCount(models.Model):
"""
Model that stores the hit totals for any content object.
"""
hits = models.PositiveIntegerField(default=0)
modified = models.DateTimeField(auto_now=True)
content_type = models.ForeignKey(
ContentType, related_name="content_type_set_for_%(class)s", on_delete=models.CASCADE)
object_pk = models.PositiveIntegerField('object ID')
content_object = GenericForeignKey('content_type', 'object_pk')
objects = HitCountManager()
class Meta:
ordering = ('-hits',)
get_latest_by = "modified"
verbose_name = _("hit count")
verbose_name_plural = _("hit counts")
unique_together = ("content_type", "object_pk")
db_table = "hitcount_hit_count"
def __str__(self):
return '%s' % self.content_object
def increase(self):
self.hits = F('hits') + 1
self.save()
def decrease(self):
self.hits = F('hits') - 1
self.save()
def hits_in_last(self, **kwargs):
"""
Returns hit count for an object during a given time period.
This will only work for as long as hits are saved in the Hit database.
If you are purging your database after 45 days, for example, that means
that asking for hits in the last 60 days will return an incorrect
number as that the longest period it can search will be 45 days.
For example: hits_in_last(days=7).
Accepts days, seconds, microseconds, milliseconds, minutes,
hours, and weeks. It's creating a datetime.timedelta object.
"""
assert kwargs, "Must provide at least one timedelta arg (eg, days=1)"
period = timezone.now() - timedelta(**kwargs)
return self.hit_set.filter(created__gte=period).count()
# def get_content_object_url(self):
# """
# Django has this in its contrib.comments.model file -- seems worth
# implementing though it may take a couple steps.
#
# """
# pass
@python_2_unicode_compatible
class Hit(models.Model):
"""
Model captures a single Hit by a visitor.
None of the fields are editable because they are all dynamically created.
Browsing the Hit list in the Admin will allow one to blacklist both
IP addresses as well as User Agents. Blacklisting simply causes those
hits to not be counted or recorded.
Depending on how long you set the HITCOUNT_KEEP_HIT_ACTIVE, and how long
you want to be able to use `HitCount.hits_in_last(days=30)` you can choose
to clean up your Hit table by using the management `hitcount_cleanup`
management command.
"""
created = models.DateTimeField(editable=False, auto_now_add=True, db_index=True)
ip = models.CharField(max_length=40, editable=False, db_index=True)
session = models.CharField(max_length=40, editable=False, db_index=True)
user_agent = models.CharField(max_length=255, editable=False)
user = models.ForeignKey(AUTH_USER_MODEL, null=True, editable=False, on_delete=models.CASCADE)
hitcount = models.ForeignKey(HitCount, editable=False, on_delete=models.CASCADE)
objects = HitManager()
class Meta:
ordering = ('-created',)
get_latest_by = 'created'
verbose_name = _("hit")
verbose_name_plural = _("hits")
def __str__(self):
return 'Hit: %s' % self.pk
def save(self, *args, **kwargs):
"""
The first time the object is created and saved, we increment
the associated HitCount object by one. The opposite applies
if the Hit is deleted.
"""
if self.pk is None:
self.hitcount.increase()
super(Hit, self).save(*args, **kwargs)
def delete(self, save_hitcount=False):
"""
If a Hit is deleted and save_hitcount=True, it will preserve the
HitCount object's total. However, under normal circumstances, a
delete() will trigger a subtraction from the HitCount object's total.
NOTE: This doesn't work at all during a queryset.delete().
"""
delete_hit_count.send(
sender=self, instance=self, save_hitcount=save_hitcount)
super(Hit, self).delete()
@python_2_unicode_compatible
class BlacklistIP(models.Model):
ip = models.CharField(max_length=40, unique=True)
class Meta:
db_table = "hitcount_blacklist_ip"
verbose_name = _("Blacklisted IP")
verbose_name_plural = _("Blacklisted IPs")
def __str__(self):
return '%s' % self.ip
@python_2_unicode_compatible
class BlacklistUserAgent(models.Model):
user_agent = models.CharField(max_length=255, unique=True)
class Meta:
db_table = "hitcount_blacklist_user_agent"
verbose_name = _("Blacklisted User Agent")
verbose_name_plural = _("Blacklisted User Agents")
def __str__(self):
return '%s' % self.user_agent
class HitCountMixin(object):
"""
HitCountMixin provides an easy way to add a `hit_count` property to your
model that will return the related HitCount object.
"""
@property
def hit_count(self):
ctype = ContentType.objects.get_for_model(self.__class__)
hit_count, created = HitCount.objects.get_or_create(
content_type=ctype, object_pk=self.pk)
return hit_count
| 32.585859
| 98
| 0.688159
|
fb12b0c066654119ed31bf9cb65b02a413de106b
| 1,650
|
py
|
Python
|
src/heap.py
|
regenalgrant/datastructures
|
e6030a28fb5f6f045dd008fa3c32281737c3aac9
|
[
"MIT"
] | null | null | null |
src/heap.py
|
regenalgrant/datastructures
|
e6030a28fb5f6f045dd008fa3c32281737c3aac9
|
[
"MIT"
] | null | null | null |
src/heap.py
|
regenalgrant/datastructures
|
e6030a28fb5f6f045dd008fa3c32281737c3aac9
|
[
"MIT"
] | null | null | null |
# _*_coding:utf-8 _*_
"""Create a heap data structure."""
from __future__ import division
import math
class Heap(object):
"""Create class Heap."""
def __init__(self):
"""Initilializating for Heap class."""
self.high_low = []
def get_parent(self, index):
"""Index number of Node's parent."""
return (index - 1) // (2)
def get_left(self, index):
"""Left child."""
return 2 * index + 1
def get_right(self, index):
"""Right child."""
return 2 * index + 2
def compare_parent(self, index):
"""Compare node it parent."""
while True:
left = self.get_left(index)
right = self.get_right(index)
if left <= len(self.high_low) and self.high_low[left] > self.high_low[index]:
largest = left
else:
largest = index
if right <= len(self.high_low) and self.high_low[right] > self.high_low[largest]:
largest = right
if largest != index:
temp = self.high_low[index]
self.high_low[index] = self.high_low[largest]
self.high_low[largest] = temp
else:
break
def push(self, val):
"""Push new node to heap."""
self.high_low.append(val)
try:
self.compare_parent(self.high_low.index(self.high_low[-1]))
except (ValueError, IndexError):
pass
def pop(self):
"""Remove pop from item of heap."""
temp = self.high_low.pop(0)
self.compare_parent(self.high_low.index(self.high_low[-1]))
| 29.464286
| 93
| 0.544242
|
81219fd2c42c1718f62c876d31817f235f63ca0d
| 10,174
|
py
|
Python
|
mainCode/1.old/TL-GPU1.old.py
|
dreamspy/Endnet
|
0370ec4ee075b76db12a7649c568f453bc1c9bc8
|
[
"Apache-2.0"
] | null | null | null |
mainCode/1.old/TL-GPU1.old.py
|
dreamspy/Endnet
|
0370ec4ee075b76db12a7649c568f453bc1c9bc8
|
[
"Apache-2.0"
] | 10
|
2020-03-24T16:48:40.000Z
|
2022-03-11T23:42:37.000Z
|
mainCode/1.old/TL-GPU1.old.py
|
dreamspy/Endnet
|
0370ec4ee075b76db12a7649c568f453bc1c9bc8
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# # Which GPU to use
# In[1]:
multiGPU = False
whichGPU = 0
# whichGPU = 1
# Select which GPU to use
if(multiGPU):
from keras.utils.training_utils import multi_gpu_model
else:
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
# The GPU id to use, usually either "0" or "1"
os.environ["CUDA_VISIBLE_DEVICES"] = str(whichGPU)
# # Do other imports now...
# # Load all the functions
# In[2]:
get_ipython().run_line_magic('run', "-i 'arena.py'")
# # General Parameters
# In[3]:
import math
# What data to use
tableBase = '4PpKk'
convertStates = False
# Interactive (just in general if one is asked for confirmations, set to False if on autopilot over night f.x.)
askForConfirmation = False
# NN parameters
filters = [16,32,32,64,128,128,128]
filterShape = [2,2,2,2,2,2,2]
batch_size = 256
optimizer = 'Adadelta'
useBatchNorm = False
num_classes = 3
input_shape = (4,8,8)
### DON'T MODIFY BELOW ###
# Generate dataset variables
fileName = tableBase + '.hdf5'
dataSetName = tableBase + '_onlyLegal'
if not convertStates:
dataSetName = tableBase + '_onlyLegal_fullStates'
dataSetWdlName = tableBase + '_Wdl_onlyLegal_3Values'
# Number of Pieces
nPi = int(dataSetName[0])
nPa = nPi - 2
nWPa = math.ceil(nPa/2)
# # Experiment 1
# Bengio methood 3n4 with freeze
# ### Exp 1 Paramters
# In[ ]:
get_ipython().run_line_magic('run', "-i 'arena.py'")
# Parameters
sourceNet = '103' # trained on 3pc from scratch
# sourceNet = '107' # trained on 4pc from scratch
freeze = True
resSaveFile = '3n4freeze'
epochs = 10
averageOver = 10
expDescrBaseName = "Bengio 3n4 - freeze = {} - average over {} runs".format(str(freeze), averageOver)
saveEveryRun = True # save stuff in results dir
saveWeightsCheckpoints = False # save chkp in results dit
saveTensorboardLogs = True # save logs in ./logs dir
resID = '---NORESID---' # used when not saving data, but fitModel() still needs a resID
fractionOfDataToUse = 1
plotDuringTraining = False
loadWeights = False
askForConfirmation = False
saveDir = 'bengioResults'
resSaveFile = resSaveFile + '-{}runAverage'.format(averageOver)
resSaveFileFullPath = saveDir + '/' + str(resSaveFile) + '.pkl'
# ### Create model and load data
# In[ ]:
# prepare save file
if not os.path.exists(resSaveFileFullPath):
print("Save file doesn't exists, creating...\n")
save_obj(saveDir, resSaveFile, [])
else:
print("Save file exists...\n")
# load data
X_train, X_test, y_train, y_test = loadData()
# create model
model, nnStr = createModel()
layersCount = len(model.layers)
# load old results
results = load_obj(saveDir, resSaveFile)
# initialize variables wrt old results
startTrainingAtLayer = len(results)
print("\nStarting/restarting TL at {} transfered layers".format(startTrainingAtLayer))
# ### Train
# In[ ]:
get_ipython().run_line_magic('run', "-i 'arena.py'")
for copyFirstNLayers in range(startTrainingAtLayer, layersCount):
print('\n\n')
print('==========================================================================================')
print('= =')
print('= Currently transfering first {} layers, out of {} ='.format(copyFirstNLayers, layersCount - 1))
print('= =')
print('==========================================================================================')
print()
# check if we are at the flatten layer, and skip it if so
if copyFirstNLayers == layersCount - 1:
copyFirstNLayers += 1
# train and average results
accumulatedScore = 0
for a in range(averageOver):
# set experement description test
expDescr = expDescrBaseName + '__copyLayers_{}__average_{}_of_{}'.format(copyFirstNLayers, a+1, averageOver)
# save current averagePosition to tmp file
with open(saveDir + '/' + str(resSaveFile) + '_currentPosition.txt','w') as file:
if copyFirstNLayers == layersCount:
location = copyFirstNLayers - 1
else:
location = copyFirstNLayers
file.write('Layers Transfered: {} out of {} \nInner avg loop position: {} out of {}'.format(location, layersCount-1, a+1, averageOver))
# load Model layers
model = loadNFirstLayers(model, sourceNet, copyFirstNLayers , freeze)
# Prepare save dir
if saveEveryRun:
resID = genNextResultsDir(model)
# train
fitHistory, logDir = trainModel(resID, model, saveWeightsCheckpoints, saveTensorboardLogs)
# score and save results
score = calcScore(model)
if saveEveryRun:
saveTrainResults(resID, model, logDir, score, copyFirstNLayers)
# update Return
accumulatedScore += score[1]
# append averaged results for one set of layers
results.append(accumulatedScore/averageOver)
# save old results to checkpoints dir
dateTime = time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime())
src = saveDir + '/' + str(resSaveFile) + '.txt'
dest = saveDir + '/checkpoints/' + str(resSaveFile) + dateTime + '.txt'
if os.path.exists(src):
shutil.move(src, dest)
# save results
save_obj(saveDir, resSaveFile, results)
with open(saveDir + '/' + str(resSaveFile) + '.txt','w') as file:
file.write(str(results))
# to load:
# results = load_obj('temp','3n4.txt')
print('\n Final Results: {}'.format(results))
# # Experiment 2
# Bengio methood 4n4 with freeze
# In[ ]:
time.sleep(60*60*4)
# ### Exp 2 Paramters
# In[ ]:
get_ipython().run_line_magic('run', "-i 'arena.py'")
# Parameters
# sourceNet = '103' # trained on 3pc from scratch
sourceNet = '107' # trained on 4pc from scratch
freeze = True
resSaveFile = '4n4freeze'
epochs = 10
averageOver = 1
expDescrBaseName = "Bengio 4n4 - freeze = {} - average over {} runs".format(str(freeze), averageOver)
saveEveryRun = True # save stuff in results dir
saveWeightsCheckpoints = False # save chkp in results dit
saveTensorboardLogs = True # save logs in ./logs dir
resID = '---NORESID---' # used when not saving data, but fitModel() still needs a resID
fractionOfDataToUse = 1
plotDuringTraining = False
loadWeights = False
askForConfirmation = False
saveDir = 'bengioResults'
resSaveFile = resSaveFile + '-{}runAverage'.format(averageOver)
resSaveFileFullPath = saveDir + '/' + str(resSaveFile) + '.pkl'
# ### Create model and load data
# In[ ]:
# prepare save file
if not os.path.exists(resSaveFileFullPath):
print("Save file doesn't exists, creating...\n")
save_obj(saveDir, resSaveFile, [])
else:
print("Save file exists...\n")
# load data
X_train, X_test, y_train, y_test = loadData()
# create model
model, nnStr = createModel()
layersCount = len(model.layers)
# load old results
results = load_obj(saveDir, resSaveFile)
# initialize variables wrt old results
startTrainingAtLayer = len(results)
print("\nStarting/restarting TL at {} transfered layers".format(startTrainingAtLayer))
# ### Train
# In[ ]:
get_ipython().run_line_magic('run', "-i 'arena.py'")
for copyFirstNLayers in range(startTrainingAtLayer, layersCount):
print('\n\n')
print('==========================================================================================')
print('= =')
print('= Currently transfering first {} layers, out of {} ='.format(copyFirstNLayers, layersCount - 1))
print('= =')
print('==========================================================================================')
print()
# check if we are at the flatten layer, and skip it if so
if copyFirstNLayers == layersCount - 1:
copyFirstNLayers += 1
# train and average results
accumulatedScore = 0
for a in range(averageOver):
# set experement description test
expDescr = expDescrBaseName + '__copyLayers_{}__average_{}_of_{}'.format(copyFirstNLayers, a+1, averageOver)
# save current averagePosition to tmp file
with open(saveDir + '/' + str(resSaveFile) + '_currentPosition.txt','w') as file:
if copyFirstNLayers == layersCount:
location = copyFirstNLayers - 1
else:
location = copyFirstNLayers
file.write('Layers Transfered: {} out of {} \nInner avg loop position: {} out of {}'.format(location, layersCount-1, a+1, averageOver))
# load Model layers
model = loadNFirstLayers(model, sourceNet, copyFirstNLayers , freeze)
# Prepare save dir
if saveEveryRun:
resID = genNextResultsDir(model)
# train
fitHistory, logDir = trainModel(resID, model, saveWeightsCheckpoints, saveTensorboardLogs)
# score and save results
score = calcScore(model)
if saveEveryRun:
saveTrainResults(resID, model, logDir, score, copyFirstNLayers)
# update Return
accumulatedScore += score[1]
# append averaged results for one set of layers
results.append(accumulatedScore/averageOver)
# save old results to checkpoints dir
dateTime = time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime())
src = saveDir + '/' + str(resSaveFile) + '.txt'
dest = saveDir + '/checkpoints/' + str(resSaveFile) + dateTime + '.txt'
if os.path.exists(src):
shutil.move(src, dest)
# save results
save_obj(saveDir, resSaveFile, results)
with open(saveDir + '/' + str(resSaveFile) + '.txt','w') as file:
file.write(str(results))
# to load:
# results = load_obj('temp','3n4.txt')
print('\n Final Results: {}'.format(results))
# In[ ]:
# # Experiment 3
# Expansion learning
# In[ ]:
| 28.418994
| 156
| 0.606644
|
95efcb126f343751f123424387fe639bf49a8e36
| 1,266
|
py
|
Python
|
index_cli/handlers/default/file.py
|
lishnih/index_cli
|
57f23d5df5168bcc73e23e0eeabbb8317014585b
|
[
"MIT"
] | null | null | null |
index_cli/handlers/default/file.py
|
lishnih/index_cli
|
57f23d5df5168bcc73e23e0eeabbb8317014585b
|
[
"MIT"
] | null | null | null |
index_cli/handlers/default/file.py
|
lishnih/index_cli
|
57f23d5df5168bcc73e23e0eeabbb8317014585b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
# Stan 2012-04-08, 2017-04-10
from __future__ import ( division, absolute_import,
print_function, unicode_literals )
import os, time
def preparing_file(filename, options, status, DIR):
filenames_encoding = options.get('filenames_encoding', 'cp1251')
basename = os.path.basename(filename)
try:
basename = unicode(basename)
except UnicodeDecodeError:
try:
basename = basename.decode(filenames_encoding)
except UnicodeDecodeError:
basename = unicode(basename, errors='replace')
_, ext = os.path.splitext(basename)
try:
statinfo = os.stat(filename)
except:
size = None
mtime = None
date = None
else:
size = statinfo.st_size
mtime = statinfo.st_mtime
date = time.strftime("%d.%m.%Y", time.gmtime(mtime))
status.file = basename
return dict(
_dir = DIR,
name = basename,
ext = ext,
size = size,
date = date,
_mtime = mtime,
)
def proceed_file(filename, options, status, recorder, DIR):
file_dict = preparing_file(filename, options, status, DIR)
FILE = recorder.reg_object('files', file_dict)
| 24.823529
| 68
| 0.612954
|
c840158ece6fba0ec3792dcc8494337e841ed514
| 2,927
|
py
|
Python
|
Utilities/StratoSurfer_to_JSBSim.py
|
fehrx043/OpenFlightSim
|
9638c996fdb78c85d71c0e6faa0e64eb48cc96fa
|
[
"MIT"
] | 15
|
2019-03-15T17:28:23.000Z
|
2022-03-21T23:52:53.000Z
|
Utilities/StratoSurfer_to_JSBSim.py
|
fehrx043/OpenFlightSim
|
9638c996fdb78c85d71c0e6faa0e64eb48cc96fa
|
[
"MIT"
] | null | null | null |
Utilities/StratoSurfer_to_JSBSim.py
|
fehrx043/OpenFlightSim
|
9638c996fdb78c85d71c0e6faa0e64eb48cc96fa
|
[
"MIT"
] | 5
|
2019-03-28T17:35:50.000Z
|
2022-03-04T19:38:03.000Z
|
"""
University of Minnesota
Aerospace Engineering and Mechanics - UAV Lab
Copyright 2019 Regents of the University of Minnesota
See: LICENSE.md for complete license details
Author: Louis Mueller, Chris Regan
"""
import os.path
import numpy as np
import JSBSimWriteXml as JSBXml
# Constants
ft2m = 0.3048
m2ft = 1.0/ft2m
#%% Aircraft Inputs
aircraftName = 'StratoSurfer'
aeroName = aircraftName + '_DegenGeom'
# JSB Output
saveJsbPath = os.path.join('..', 'Simulation')
saveJsbPath = os.path.abspath(os.path.join(saveJsbPath, 'aircraft', aircraftName))
#
load = {}
# Aero Data
load['Aero'] = {}
load['Aero']['aircraftName'] = aircraftName
load['Aero']['aeroName'] = aeroName
# VSP Dataset
load['Aero']['vspPath'] = os.path.abspath(os.path.join('..', 'AeroDefinitions', 'OpenVSP'))
# AVL DataSet
#load['Aero']['avlPath'] = os.path.abspath(os.path.join('..', 'AeroDefinitions', 'AVL', aircraftName))
# Load Aircraft oFdm data
import StratoSurfer
oFdm = StratoSurfer.LoadAircraftDef(load)
# FIXIT - Increase the base Drag
addedDrag = np.zeros_like(oFdm['Aero']['Coef']['CD']['zero'])
oFdm['Aero']['Coef']['CD']['total'] += addedDrag
oFdm['Aero']['Coef']['CD']['zero'] += addedDrag
#%%
import matplotlib.pyplot as plt
cond = 'beta_rad'
coef = 'dCMn'
dep = 'dElev_rad'
numB, numV, numA = oFdm['Aero']['Coef']['CL']['zero'].shape
xPlot = oFdm['Aero']['Cond'][cond][:,:,1]
yPlot = oFdm['Aero']['Coef'][coef][dep][:,:,1]
plt.plot(xPlot, yPlot, '-*')
#%% Prepare JSBSim-ML data (oFdm -> JSB)
# Define Conversion from oFdm to JSB-ML
convertFdm2Jsb = {}
# Surface Names, JSBSim names must match with Servo models and FCS system definition
convertFdm2Jsb['Surf'] = {}
convertFdm2Jsb['Surf']['oFdm'] = ['d' + s + '_rad' for s in oFdm['Aero']['surfNames']]
convertFdm2Jsb['Surf']['jsb'] = ['fcs/pos' + s + '_rad' for s in oFdm['Aero']['surfNames']]
convertFdm2Jsb['Surf']['scale'] = [None] * len(convertFdm2Jsb['Surf']['oFdm'])
# Aero Deriv dependencies definitions
convertFdm2Jsb['Dep'] = {}
convertFdm2Jsb['Dep']['oFdm'] = ['alpha_rad', 'beta_rad', 'dpHat_rps', 'dqHat_rps', 'drHat_rps'] + convertFdm2Jsb['Surf']['oFdm']
convertFdm2Jsb['Dep']['jsb'] = ['aero/alpha-rad', 'aero/beta-rad', 'velocities/p-aero-rad_sec', 'velocities/q-aero-rad_sec', 'velocities/r-aero-rad_sec'] + convertFdm2Jsb['Surf']['jsb']
convertFdm2Jsb['Dep']['scale'] = [None, None, 'aero/bi2vel', 'aero/ci2vel', 'aero/bi2vel'] + convertFdm2Jsb['Surf']['scale']
convertFdm2Jsb['Coef'] = {}
convertFdm2Jsb['Coef']['oFdm'] = ['zero']
# Aero Table defintions
convertFdm2Jsb['TableDef'] = {}
convertFdm2Jsb['TableDef']['jsb'] = ['aero/beta-deg', 'velocities/vt-fps', 'aero/alpha-deg']
convertFdm2Jsb['TableDef']['brkPts'] = [oFdm['Aero']['TableDef']['betaBrkPts_deg'], oFdm['Aero']['TableDef']['vBrkPts_mps'] * m2ft, oFdm['Aero']['TableDef']['alphaBrkPts_deg']]
#%% Create the XML
JSBXml.Aircraft(oFdm, convertFdm2Jsb, saveJsbPath, aircraftName)
| 30.810526
| 185
| 0.682952
|
78182a9aefa3c3eb496979ce720d1f16cec79d5e
| 13,554
|
py
|
Python
|
main.py
|
rdenadai/snakeplissken
|
0d966a86512f899c037c8594eefef0d00ead74f3
|
[
"MIT"
] | 13
|
2019-08-13T00:04:49.000Z
|
2021-05-20T06:24:25.000Z
|
main.py
|
rdenadai/snakeplissken
|
0d966a86512f899c037c8594eefef0d00ead74f3
|
[
"MIT"
] | 3
|
2019-11-29T23:18:44.000Z
|
2022-03-14T13:54:11.000Z
|
main.py
|
rdenadai/snakeplissken
|
0d966a86512f899c037c8594eefef0d00ead74f3
|
[
"MIT"
] | 6
|
2019-03-05T11:59:04.000Z
|
2020-05-19T05:28:19.000Z
|
#!/usr/bin/env python
import sys, os
import time
import random
import pygame as pyg
from pygame.locals import *
import torch
from torch.optim import Adam, RMSprop
from torch.optim.lr_scheduler import CyclicLR
import torch.nn.functional as F
from configs import *
from utils.utilities import *
from ai.model import Transition
def draw_object(scr, color, position):
pyg.draw.rect(scr, color, position)
def select_action(state, n_actions, steps_done):
sample = np.random.random()
eps_threshold = EPS_END + (EPS_START - EPS_END) * np.exp(
-1.0 * steps_done / EPS_DECAY
)
if sample > eps_threshold:
with torch.no_grad():
# t.max(1) will return largest column value of each row.
# second column on max result is index of where max element was
# found, so we pick action with the larger expected reward.
return policy_net(state).max(1)[1].view(1, 1)
else:
return torch.tensor(
[[random.randrange(n_actions)]], device=device, dtype=torch.long
)
if __name__ == "__main__":
# In linux center the window
os.environ["SDL_VIDEO_CENTERED"] = "1"
# if gpu is to be used
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Device: {device}")
# Pygame init loop
pyg.init()
# confs for pygame
stop_game = False
clock = pyg.time.Clock()
font = pyg.font.Font(None, 20)
# number o actions the agent can do
n_actions = 4
# number of steps done, each step is a run in while loop
steps_done = 0
# number of games played
n_game = 0
# Action to be executed by the agent
action = None
# Train phase
train, exploit, show_screen = True, True, True
options = {
"restart_mem": False,
"restart_models": False,
"restart_optim": False,
"random_clean_memory": False,
"opt": "rmsprop",
}
# Screen size
size = width, height = W_WIDTH, W_HEIGHT
screen = pyg.Surface(size)
if show_screen:
screen = pyg.display.set_mode(size, pyg.DOUBLEBUF)
# Icon and Title
pyg.display.set_icon(pyg.image.load("./img/snake.png"))
pyg.display.set_caption("Snake Plissken")
# print(get_game_screen(screen, device).shape)
# Load model
md_name = "snakeplissken_m2.model"
policy_net, target_net, optimizer, memories = load_model(
md_name, n_actions, device, **options
)
target_net.load_state_dict(policy_net.state_dict())
target_net.eval()
# Starting High learning rate
for param_group in optimizer.param_groups:
if param_group["lr"] != 1e-6:
param_group["lr"] = 1e-6
break
# Memory
# Short is garbage
short_memory = memories["short"]
# Long is were the bad and good are
good_long_memory = memories["good"]
bad_long_memory = memories["bad"]
vloss = [0]
# Game elements started
t_score, p_score, score = [1], 0, 0
wall = get_walls(width, height)
snake, apples = start_game(width, height)
state, next_state = None, None
t_start_game = time.time()
# Game Main loop
while True:
if show_screen:
for event in pyg.event.get():
if event.type == pyg.QUIT:
if train:
memories = {
"short": short_memory,
"good": good_long_memory,
"bad": bad_long_memory,
}
save_model(md_name, policy_net, target_net, optimizer, memories)
pyg.quit()
sys.exit()
# Stop the game, and restart
if stop_game:
# Restart game elements
state, next_state = None, None
stop_game = False
# Zeroed elapsed time
elapsed_time = 0
# Number of games +1
n_game += 1
t_score += [p_score]
if not train:
print(f"Score : {p_score}")
p_score, score = 0, 0
snake, apples = start_game(width, height)
# Load again the new screen: Initial State
if state is None:
state = get_state(screen, device)
# Action and reward of the agent
if train and not exploit:
action = select_action(state, n_actions, steps_done)
else:
with torch.no_grad():
action = policy_net(state).max(1)[1].view(1, 1)
# Key movements of agent to be done
K = action.item()
if K == 0 and snake.head().direction != KEY["DOWN"]:
snake.head().direction = KEY["UP"]
elif K == 1 and snake.head().direction != KEY["UP"]:
snake.head().direction = KEY["DOWN"]
elif K == 2 and snake.head().direction != KEY["RIGHT"]:
snake.head().direction = KEY["LEFT"]
elif K == 3 and snake.head().direction != KEY["LEFT"]:
snake.head().direction = KEY["RIGHT"]
# Human keys!
# pressed = pyg.key.get_pressed()
# if pressed[K_UP] and snake.head().direction != KEY["DOWN"]:
# snake.head().direction = KEY["UP"]
# elif pressed[K_DOWN] and snake.head().direction != KEY["UP"]:
# snake.head().direction = KEY["DOWN"]
# elif pressed[K_LEFT] and snake.head().direction != KEY["RIGHT"]:
# snake.head().direction = KEY["LEFT"]
# elif pressed[K_RIGHT] and snake.head().direction != KEY["LEFT"]:
# snake.head().direction = KEY["RIGHT"]
# Move of snake...
snake.move()
# Snake crash to its tail
if check_crash(snake):
score = SNAKE_EAT_ITSELF_PRIZE # + sum([1e-3 for segment in snake.stack])
stop_game = True
# Wall collision
# Check limits ! Border of screen
for block in wall:
if check_collision(snake.head(), block):
score = WALL_PRIZE
stop_game = True
break
# Check collision between snake and apple
del_apples = []
for i, apple in enumerate(apples):
if check_collision(snake.head(), apple):
del_apples.append(i)
p_score += APPLE_PRIZE
score = APPLE_PRIZE
snake.grow()
break
# Clean screen
screen.fill(BLACK)
# Draw Border
for block in wall:
draw_object(screen, block.color, block.position)
# Draw snake
for segment in snake.stack:
draw_object(screen, segment.color, (segment.x, segment.y) + segment.size)
# Draw appples
if len(apples) == 0:
apples = get_apples(width, height, get_snake_position(snake))
for apple in apples:
draw_object(screen, apple.color, apple.position)
for i in del_apples:
apples[i] = None
apples = list(filter(None.__ne__, apples))
# Reload apples position after some time
if steps_done % APPLE_RELOAD_STEPS == 0:
apples = get_apples(width, height, get_snake_position(snake))
# Next state for the agent
next_state = None
# Give some points because it alive
if not stop_game:
score = SNAKE_ALIVE_PRIZE if score == 0 else score
next_state = get_next_state(screen, state, device)
if train:
reward = torch.tensor([score], device=device, dtype=torch.float)
# Reward for the agent
if not stop_game:
if score >= APPLE_PRIZE:
good_long_memory.push(state, action, next_state, reward)
else:
# Store the transition in memory
short_memory.push(state, action, next_state, reward)
else:
# Store the transition in memory
bad_long_memory.push(state, action, next_state, reward)
score = 0
# Move to the next state
state = next_state
# ----------------------------------------
# Perform one step of the optimization (on the target network)
if train and len(short_memory) > (BATCH_SIZE):
# Alternate a mode
if steps_done % 10_000 == 0:
# Decay learning rate
for param_group in optimizer.param_groups:
if param_group["lr"] > LEARNING_RATE:
param_group["lr"] = np.round(param_group["lr"] * 0.97, 10)
break
if steps_done % 5_000 == 0:
exploit = not exploit
transitions = []
for memory in [short_memory, good_long_memory, bad_long_memory]:
transitions += memory.sample(BATCH_SIZE)
size = len(transitions)
size = BATCH_SIZE if size > BATCH_SIZE else size
transitions = random.sample(transitions, size)
# Transpose the batch (see https://stackoverflow.com/a/19343/3343043 for
# detailed explanation). This converts batch-array of Transitions
# to Transition of batch-arrays.
batch = Transition(*zip(*transitions))
# Compute a mask of non-final states and concatenate the batch elements
# (a final state would've been the one after which simulation ended)
non_final_mask = torch.tensor(
tuple(map(lambda s: s is not None, batch.next_state)), device=device
)
final_mask = 1 - non_final_mask
non_final_next_states = torch.cat(
[s for s in batch.next_state if s is not None]
)
state_batch = torch.cat(batch.state)
action_batch = torch.cat(batch.action)
reward_batch = torch.cat(batch.reward)
# Compute Q(s_t, a) - the model computes Q(s_t), then we select the
# columns of actions taken. These are the actions which would've been taken
# for each batch state according to policy_net
state_action_values = policy_net(state_batch).gather(1, action_batch)
# Compute V(s_{t+1}) for all next states.
# Expected values of actions for non_final_next_states are computed based
# on the "older" target_net; selecting their best reward with max(1)[0].
# This is merged based on the mask, such that we'll have either the expected
# state value or 0 in case the state was final.
expected_state_action_values = torch.zeros(BATCH_SIZE, device=device)
# Compute the expected Q values
expected_state_action_values[non_final_mask] = (
target_net(non_final_next_states).max(1)[0].detach() * GAMMA
+ reward_batch[non_final_mask].detach()
)
expected_state_action_values[final_mask] = reward_batch[final_mask].detach()
# Compute MSE loss
loss = F.mse_loss(
state_action_values, expected_state_action_values.unsqueeze(1)
)
# Compute Huber loss
# loss = F.smooth_l1_loss(
# state_action_values, expected_state_action_values.unsqueeze(1)
# )
vloss += [loss.item()]
# Optimize the model
optimizer.zero_grad()
loss.backward()
for param in policy_net.parameters():
param.grad.data.clamp_(-1, 1)
optimizer.step()
# ----------------------------------------
# Routines of pygame
clock.tick(FPS)
if show_screen:
pyg.display.update()
if train and steps_done % TARGET_UPDATE == 0:
steps = (
f"{np.round(steps_done / 1000, 2)}k"
if steps_done > 1000
else steps_done
)
print("*" * 20)
print(f"Steps: {steps}, N Game: {n_game}")
print(f"Score:")
print(f" - mean: {np.round(np.mean(t_score), 5)}")
print(f" - median: {np.round(np.median(t_score), 5)}")
print(f" - max: {np.round(np.max(t_score), 5)}")
print(f"FPS: {np.round(clock.get_fps(), 2)}")
print(f"Running for: {np.round(time.time() - t_start_game, 2)} secs")
print(f"In training mode: {train}")
print(f"In exploit mode: {exploit}")
print(f"Batch: {BATCH_SIZE}")
print(f"Loss: {np.round(np.mean(vloss), 5)}")
print("Optimizer:", optimizer.__class__.__name__)
for param_group in optimizer.param_groups:
print(f"learning rate={param_group['lr']}")
break
print("Memories:")
print(" - short: ", len(memories["short"]))
print(" - good: ", len(memories["good"]))
print(" - bad: ", len(memories["bad"]))
print("Update target network...")
target_net.load_state_dict(policy_net.state_dict())
t_score, vloss = [1], [0]
if train and steps_done % MODEL_SAVE == 0:
memories = {
"short": short_memory,
"good": good_long_memory,
"bad": bad_long_memory,
}
save_model(md_name, policy_net, target_net, optimizer, memories)
# One step done in the whole game...
steps_done += 1
| 36.831522
| 88
| 0.559909
|
0e7e73202f1c41260f633bd436bf0c99d5ab8099
| 3,039
|
py
|
Python
|
src/pandas_profiling/report/structure/variables/render_boolean.py
|
mereldawu/pandas-profiling
|
4cc48f14ac85bb0d362044fcf285cefe0b59165b
|
[
"MIT"
] | 1
|
2017-03-01T07:05:29.000Z
|
2017-03-01T07:05:29.000Z
|
src/pandas_profiling/report/structure/variables/render_boolean.py
|
sweekarsaxena/pandas-profiling
|
4cc48f14ac85bb0d362044fcf285cefe0b59165b
|
[
"MIT"
] | null | null | null |
src/pandas_profiling/report/structure/variables/render_boolean.py
|
sweekarsaxena/pandas-profiling
|
4cc48f14ac85bb0d362044fcf285cefe0b59165b
|
[
"MIT"
] | null | null | null |
from pandas_profiling.config import config
from pandas_profiling.report.presentation.core import (
Container,
FrequencyTable,
FrequencyTableSmall,
Image,
Table,
VariableInfo,
)
from pandas_profiling.report.presentation.frequency_table_utils import freq_table
from pandas_profiling.report.structure.variables.render_common import render_common
from pandas_profiling.visualisation.plot import pie_plot
def render_boolean(summary):
varid = summary["varid"]
n_obs_bool = config["vars"]["bool"]["n_obs"].get(int)
image_format = config["plot"]["image_format"].get(str)
# Prepare variables
template_variables = render_common(summary)
# Element composition
info = VariableInfo(
anchor_id=summary["varid"],
warnings=summary["warnings"],
var_type="Boolean",
var_name=summary["varname"],
description=summary["description"],
)
table = Table(
[
{
"name": "Distinct count",
"value": summary["n_unique"],
"fmt": "fmt",
"alert": "n_unique" in summary["warn_fields"],
},
{
"name": "Unique (%)",
"value": summary["p_unique"],
"fmt": "fmt_percent",
"alert": "p_unique" in summary["warn_fields"],
},
{
"name": "Missing",
"value": summary["n_missing"],
"fmt": "fmt",
"alert": "n_missing" in summary["warn_fields"],
},
{
"name": "Missing (%)",
"value": summary["p_missing"],
"fmt": "fmt_percent",
"alert": "p_missing" in summary["warn_fields"],
},
{
"name": "Memory size",
"value": summary["memory_size"],
"fmt": "fmt_bytesize",
"alert": False,
},
]
)
fqm = FrequencyTableSmall(
freq_table(
freqtable=summary["value_counts"],
n=summary["n"],
max_number_to_print=n_obs_bool,
),
redact=False,
)
template_variables["top"] = Container([info, table, fqm], sequence_type="grid")
items = [
FrequencyTable(
template_variables["freq_table_rows"],
name="Common Values",
anchor_id=f"{varid}frequency_table",
redact=False,
)
]
max_unique = config["plot"]["pie"]["max_unique"].get(int)
if max_unique > 0:
items.append(
Image(
pie_plot(summary["value_counts"], legend_kws={"loc": "upper right"}),
image_format=image_format,
alt="Chart",
name="Chart",
anchor_id=f"{varid}pie_chart",
)
)
template_variables["bottom"] = Container(
items, sequence_type="tabs", anchor_id=f"{varid}bottom"
)
return template_variables
| 29.221154
| 85
| 0.527476
|
ef079ce66aef88dcbf87caf0814778dfcc198805
| 1,965
|
py
|
Python
|
test/functional/p2p_invalid_locator.py
|
34ro/tapyrus-core
|
52175cd1994fde3cec1bc506ebacd3fd99523103
|
[
"MIT"
] | null | null | null |
test/functional/p2p_invalid_locator.py
|
34ro/tapyrus-core
|
52175cd1994fde3cec1bc506ebacd3fd99523103
|
[
"MIT"
] | null | null | null |
test/functional/p2p_invalid_locator.py
|
34ro/tapyrus-core
|
52175cd1994fde3cec1bc506ebacd3fd99523103
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Copyright (c) 2019 Chaintope Inc.
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node responses to invalid locators.
"""
from test_framework.messages import msg_getheaders, msg_getblocks, MAX_LOCATOR_SZ
from test_framework.mininode import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
class InvalidLocatorTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = False
def run_test(self):
node = self.nodes[0] # convenience reference to the node
node.generate(1, self.signblockprivkeys) # Get node out of IBD
self.log.info('Test max locator size')
block_count = node.getblockcount()
for msg in [msg_getheaders(), msg_getblocks()]:
self.log.info('Wait for disconnect when sending {} hashes in locator'.format(MAX_LOCATOR_SZ + 1))
node.add_p2p_connection(P2PInterface())
msg.locator.vHave = [int(node.getblockhash(i - 1), 16) for i in range(block_count, block_count - (MAX_LOCATOR_SZ + 1), -1)]
node.p2p.send_message(msg)
node.p2p.wait_for_disconnect()
node.disconnect_p2ps()
self.log.info('Wait for response when sending {} hashes in locator'.format(MAX_LOCATOR_SZ))
node.add_p2p_connection(P2PInterface())
msg.locator.vHave = [int(node.getblockhash(i - 1), 16) for i in range(block_count, block_count - (MAX_LOCATOR_SZ), -1)]
node.p2p.send_message(msg)
if type(msg) == msg_getheaders:
node.p2p.wait_for_header(int(node.getbestblockhash(), 16))
else:
node.p2p.wait_for_block(int(node.getbestblockhash(), 16))
if __name__ == '__main__':
InvalidLocatorTest().main()
| 43.666667
| 135
| 0.683461
|
772ced89d4a522d3ce0bb5f5a03768cf161ecc80
| 381
|
py
|
Python
|
sm/wsgi.py
|
zarif007/SM-BackEnd-API-
|
6e9b96748b7df77d7072cd541779771e67ba744f
|
[
"MIT"
] | 1
|
2021-11-02T19:05:11.000Z
|
2021-11-02T19:05:11.000Z
|
sm/wsgi.py
|
zarif007/SM-BackEnd-API-
|
6e9b96748b7df77d7072cd541779771e67ba744f
|
[
"MIT"
] | null | null | null |
sm/wsgi.py
|
zarif007/SM-BackEnd-API-
|
6e9b96748b7df77d7072cd541779771e67ba744f
|
[
"MIT"
] | null | null | null |
"""
WSGI config for sm project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sm.settings')
application = get_wsgi_application()
| 22.411765
| 78
| 0.779528
|
fe81aa5d921428e39f1c91a617b2b2c8ac2bcecb
| 11,070
|
py
|
Python
|
examples/quantized_net/tutorial_dorefanet_cifar10_tfrecord.py
|
XJTUexperiment/tensorlayer
|
7184f728b35106e80f7424e09a2bde1721b82803
|
[
"Apache-2.0"
] | 1
|
2018-09-06T03:40:26.000Z
|
2018-09-06T03:40:26.000Z
|
examples/quantized_net/tutorial_dorefanet_cifar10_tfrecord.py
|
sdd9465/tensorlayer
|
690766535a591367ad86907835b39730f4aa1dea
|
[
"Apache-2.0"
] | null | null | null |
examples/quantized_net/tutorial_dorefanet_cifar10_tfrecord.py
|
sdd9465/tensorlayer
|
690766535a591367ad86907835b39730f4aa1dea
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""
- 1. This model has 1,068,298 paramters and Dorefa compression strategy(weight:1 bit, active: 3 bits),
after 500 epoches' training with GPU,accurcy of 81.1% was found.
- 2. For simplified CNN layers see "Convolutional layer (Simplified)"
in read the docs website.
- 3. Data augmentation without TFRecord see `tutorial_image_preprocess.py` !!
Links
-------
.. paper:https://arxiv.org/abs/1606.06160
.. code:https://github.com/XJTUWYD/DoReFa_Cifar10
Note
------
The optimizers between official code and this code are different.
Description
-----------
The images are processed as follows:
.. They are cropped to 24 x 24 pixels, centrally for evaluation or randomly for training.
.. They are approximately whitened to make the model insensitive to dynamic range.
For training, we additionally apply a series of random distortions to
artificially increase the data set size:
.. Randomly flip the image from left to right.
.. Randomly distort the image brightness.
.. Randomly distort the image contrast.
Speed Up
--------
Reading images from disk and distorting them can use a non-trivial amount
of processing time. To prevent these operations from slowing down training,
we run them inside 16 separate threads which continuously fill a TensorFlow queue.
"""
import os
import time
import tensorflow as tf
import tensorlayer as tl
tf.logging.set_verbosity(tf.logging.DEBUG)
tl.logging.set_verbosity(tl.logging.DEBUG)
model_file_name = "./model_cifar10_tfrecord.ckpt"
resume = False # load model, resume from previous checkpoint?
# Download data, and convert to TFRecord format, see ```tutorial_tfrecord.py```
X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False)
print('X_train.shape', X_train.shape) # (50000, 32, 32, 3)
print('y_train.shape', y_train.shape) # (50000,)
print('X_test.shape', X_test.shape) # (10000, 32, 32, 3)
print('y_test.shape', y_test.shape) # (10000,)
print('X %s y %s' % (X_test.dtype, y_test.dtype))
def data_to_tfrecord(images, labels, filename):
"""Save data into TFRecord."""
if os.path.isfile(filename):
print("%s exists" % filename)
return
print("Converting data into %s ..." % filename)
# cwd = os.getcwd()
writer = tf.python_io.TFRecordWriter(filename)
for index, img in enumerate(images):
img_raw = img.tobytes()
# Visualize a image
# tl.visualize.frame(np.asarray(img, dtype=np.uint8), second=1, saveable=False, name='frame', fig_idx=1236)
label = int(labels[index])
# print(label)
# Convert the bytes back to image as follow:
# image = Image.frombytes('RGB', (32, 32), img_raw)
# image = np.fromstring(img_raw, np.float32)
# image = image.reshape([32, 32, 3])
# tl.visualize.frame(np.asarray(image, dtype=np.uint8), second=1, saveable=False, name='frame', fig_idx=1236)
example = tf.train.Example(
features=tf.train.Features(
feature={
"label": tf.train.Feature(int64_list=tf.train.Int64List(value=[label])),
'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])),
}
)
)
writer.write(example.SerializeToString()) # Serialize To String
writer.close()
def read_and_decode(filename, is_train=None):
"""Return tensor to read from TFRecord."""
filename_queue = tf.train.string_input_producer([filename])
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example, features={
'label': tf.FixedLenFeature([], tf.int64),
'img_raw': tf.FixedLenFeature([], tf.string),
}
)
# You can do more image distortion here for training data
img = tf.decode_raw(features['img_raw'], tf.float32)
img = tf.reshape(img, [32, 32, 3])
# img = tf.cast(img, tf.float32) #* (1. / 255) - 0.5
if is_train == True:
# 1. Randomly crop a [height, width] section of the image.
img = tf.random_crop(img, [24, 24, 3])
# 2. Randomly flip the image horizontally.
img = tf.image.random_flip_left_right(img)
# 3. Randomly change brightness.
img = tf.image.random_brightness(img, max_delta=63)
# 4. Randomly change contrast.
img = tf.image.random_contrast(img, lower=0.2, upper=1.8)
# 5. Subtract off the mean and divide by the variance of the pixels.
img = tf.image.per_image_standardization(img)
elif is_train == False:
# 1. Crop the central [height, width] of the image.
img = tf.image.resize_image_with_crop_or_pad(img, 24, 24)
# 2. Subtract off the mean and divide by the variance of the pixels.
img = tf.image.per_image_standardization(img)
elif is_train == None:
img = img
label = tf.cast(features['label'], tf.int32)
return img, label
# Save data into TFRecord files
data_to_tfrecord(images=X_train, labels=y_train, filename="train.cifar10")
data_to_tfrecord(images=X_test, labels=y_test, filename="test.cifar10")
batch_size = 128
model_file_name = "./model_cifar10_advanced.ckpt"
resume = False # load model, resume from previous checkpoint?
with tf.device('/cpu:0'):
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
# prepare data in cpu
x_train_, y_train_ = read_and_decode("train.cifar10", True)
x_test_, y_test_ = read_and_decode("test.cifar10", False)
# set the number of threads here
x_train_batch, y_train_batch = tf.train.shuffle_batch(
[x_train_, y_train_], batch_size=batch_size, capacity=2000, min_after_dequeue=1000, num_threads=32
)
# for testing, uses batch instead of shuffle_batch
x_test_batch, y_test_batch = tf.train.batch(
[x_test_, y_test_], batch_size=batch_size, capacity=50000, num_threads=32
)
def model(x_crop, y_, reuse):
"""For more simplified CNN APIs, check tensorlayer.org."""
with tf.variable_scope("model", reuse=reuse):
net = tl.layers.InputLayer(x_crop, name='input')
net = tl.layers.Conv2d(net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', name='cnn1')
net = tl.layers.MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool1')
net = tl.layers.LocalResponseNormLayer(net, 4, 1.0, 0.001 / 9.0, 0.75, name='norm1')
net = tl.layers.DorefaConv2d(net, 1, 3, 64, (5, 5), (1, 1), tf.nn.relu, padding='SAME', name='cnn2')
net = tl.layers.LocalResponseNormLayer(net, 4, 1.0, 0.001 / 9.0, 0.75, name='norm2')
net = tl.layers.MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool2')
net = tl.layers.FlattenLayer(net, name='flatten')
net = tl.layers.DorefaDenseLayer(net, 1, 3, 384, act=tf.nn.relu, name='d1relu')
net = tl.layers.DorefaDenseLayer(net, 1, 3, 192, act=tf.nn.relu, name='d2relu')
net = tl.layers.DenseLayer(net, 10, act=None, name='output')
y = net.outputs
ce = tl.cost.cross_entropy(y, y_, name='cost')
# L2 for the MLP, without this, the accuracy will be reduced by 15%.
L2 = 0
for p in tl.layers.get_variables_with_name('relu/W', True, True):
L2 += tf.contrib.layers.l2_regularizer(0.004)(p)
cost = ce + L2
# correct_prediction = tf.equal(tf.argmax(tf.nn.softmax(y), 1), y_)
correct_prediction = tf.equal(tf.cast(tf.argmax(y, 1), tf.int32), y_)
acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
return net, cost, acc
# You can also use placeholder to feed_dict in data after using
# val, l = sess.run([x_train_batch, y_train_batch]) to get the data
# x_crop = tf.placeholder(tf.float32, shape=[batch_size, 24, 24, 3])
# y_ = tf.placeholder(tf.int32, shape=[batch_size,])
# cost, acc, network = model(x_crop, y_, None)
with tf.device('/gpu:0'): # <-- remove it if you don't have GPU
network, cost, acc, = model(x_train_batch, y_train_batch, False)
_, cost_test, acc_test = model(x_test_batch, y_test_batch, True)
# train
n_epoch = 50000
learning_rate = 0.0001
print_freq = 1
n_step_epoch = int(len(y_train) / batch_size)
n_step = n_epoch * n_step_epoch
with tf.device('/gpu:0'): # <-- remove it if you don't have GPU
train_op = tf.train.AdamOptimizer(learning_rate).minimize(cost)
tl.layers.initialize_global_variables(sess)
if resume:
print("Load existing model " + "!" * 10)
saver = tf.train.Saver()
saver.restore(sess, model_file_name)
network.print_params(False)
network.print_layers()
print(' learning_rate: %f' % learning_rate)
print(' batch_size: %d' % batch_size)
print(' n_epoch: %d, step in an epoch: %d, total n_step: %d' % (n_epoch, n_step_epoch, n_step))
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
step = 0
for epoch in range(n_epoch):
start_time = time.time()
train_loss, train_acc, n_batch = 0, 0, 0
for s in range(n_step_epoch):
# You can also use placeholder to feed_dict in data after using
# val, l = sess.run([x_train_batch, y_train_batch])
# tl.visualize.images2d(val, second=3, saveable=False, name='batch', dtype=np.uint8, fig_idx=2020121)
# err, ac, _ = sess.run([cost, acc, train_op], feed_dict={x_crop: val, y_: l})
err, ac, _ = sess.run([cost, acc, train_op])
step += 1
train_loss += err
train_acc += ac
n_batch += 1
if epoch + 1 == 1 or (epoch + 1) % print_freq == 0:
print(
"Epoch %d : Step %d-%d of %d took %fs" %
(epoch, step, step + n_step_epoch, n_step, time.time() - start_time)
)
print(" train loss: %f" % (train_loss / n_batch))
print(" train acc: %f" % (train_acc / n_batch))
test_loss, test_acc, n_batch = 0, 0, 0
for _ in range(int(len(y_test) / batch_size)):
err, ac = sess.run([cost_test, acc_test])
test_loss += err
test_acc += ac
n_batch += 1
print(" test loss: %f" % (test_loss / n_batch))
print(" test acc: %f" % (test_acc / n_batch))
if (epoch + 1) % (print_freq * 50) == 0:
print("Save model " + "!" * 10)
saver = tf.train.Saver()
save_path = saver.save(sess, model_file_name)
# you can also save model into npz
tl.files.save_npz(network.all_params, name='model.npz', sess=sess)
# and restore it as follow:
# tl.files.load_and_assign_npz(sess=sess, name='model.npz', network=network)
coord.request_stop()
coord.join(threads)
sess.close()
| 41.152416
| 117
| 0.633333
|
bfc659afed5e1ca2964e4f5ac68375b9579caf6f
| 1,849
|
py
|
Python
|
var/spack/repos/builtin/packages/remhos/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/remhos/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8
|
2021-11-09T20:28:40.000Z
|
2022-03-15T03:26:33.000Z
|
var/spack/repos/builtin/packages/remhos/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2019-02-08T20:37:20.000Z
|
2019-03-31T15:19:26.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from typing import List
from spack.package import *
class Remhos(MakefilePackage):
"""Remhos (REMap High-Order Solver) is a CEED miniapp that performs monotonic
and conservative high-order discontinuous field interpolation (remap)
using DG advection-based spatial discretization and explicit high-order
time-stepping.
"""
tags = ['proxy-app']
homepage = "https://github.com/CEED/Remhos"
url = "https://github.com/CEED/Remhos/archive/v1.0.tar.gz"
git = "https://github.com/CEED/Remhos.git"
maintainers = ['v-dobrev', 'tzanio', 'vladotomov']
version('develop', branch='master')
version('1.0', sha256='e60464a867fe5b1fd694fbb37bb51773723427f071c0ae26852a2804c08bbb32')
variant('metis', default=True, description='Enable/disable METIS support')
depends_on('mfem+mpi+metis', when='+metis')
depends_on('mfem+mpi~metis', when='~metis')
depends_on('mfem@develop', when='@develop')
depends_on('mfem@4.1.0:', when='@1.0')
@property
def build_targets(self):
targets = []
spec = self.spec
targets.append('MFEM_DIR=%s' % spec['mfem'].prefix)
targets.append('CONFIG_MK=%s' % spec['mfem'].package.config_mk)
targets.append('TEST_MK=%s' % spec['mfem'].package.test_mk)
return targets
# See lib/spack/spack/build_systems/makefile.py
def check(self):
with working_dir(self.build_directory):
make('tests', *self.build_targets)
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('remhos', prefix.bin)
install_time_test_callbacks = [] # type: List[str]
| 33.017857
| 93
| 0.672796
|
c0451cf8a685cf6f8204609a6092e304e8377f9b
| 11,444
|
py
|
Python
|
Agents/NegamaxTranspositionTable.py
|
karlflores/WatchYourBackProject
|
00a7c32e46ea0b75580d17ea6a22372e4a005627
|
[
"Unlicense"
] | null | null | null |
Agents/NegamaxTranspositionTable.py
|
karlflores/WatchYourBackProject
|
00a7c32e46ea0b75580d17ea6a22372e4a005627
|
[
"Unlicense"
] | null | null | null |
Agents/NegamaxTranspositionTable.py
|
karlflores/WatchYourBackProject
|
00a7c32e46ea0b75580d17ea6a22372e4a005627
|
[
"Unlicense"
] | null | null | null |
from math import inf
from Constants import constant
from WatchYourBack.Board import Board
from Evaluation.Policies import Evaluation
from DataStructures.Transposition_Table import TranspositionTable
from copy import deepcopy
from time import time
from ErrorHandling.Errors import *
'''
NEGAMAX WITH A PURPOSE BUILT TRANSPOSITION TABLE FOR MEMOIzATION OF BOARDSTATES/AB CUTOFFS/BEST MOVES
THIS HAS THE SAME FUNCTIONALITY AND METHOD SIGNATURES AS NEGAMAX.PY -- THEREFORE CAN BE USED INTERCHANGEABLY
FOR COMPARISON
'''
class Negamax(object):
def __init__(self, board, colour, file_name):
# we want to create a node
self.tt = TranspositionTable()
# only use this board to complete the search
# save memory
self.board = deepcopy(board)
# for alpha beta search -- instead of passing it into the function calls we can use this
self.alpha = -inf
self.beta = inf
# defines the colours of min and max
self.player = colour
self.opponent = Board.get_opp_piece_type(self.player)
# default depth
self.depth = inf
# data structures for machine learning
self.eval_depth = 0
self.minimax_val = 0
self.policy_vector = []
# dictionary storing the available moves of the board
self.available_actions = {constant.WHITE_PIECE: {}, constant.BLACK_PIECE: {}}
# timing attributes
self.undo_effected = []
self.time_alloc = 0
self.time_rem = 0
self.time_start = 0
self.time_end = 0
self.total_time = 0
# load the evaluation function based on the colour of the player
if self.player == constant.WHITE_PIECE:
self.evaluation = Evaluation("./XML", "/white_weights")
else:
self.evaluation = Evaluation("./XML", "/black_weights")
'''
Iterative Deepening Negamax
This implements a time-cutoff such that search is terminated once we have reached the allocated time for evaluation.
IT RETURNS THE BEST MOVE IT HAS FOUND IN THE TIME ALLOCATED
'''
def itr_negamax(self):
# clear the transposition table every time we make a new move -- this is to ensure that it doesn't grow too big
# if self.board.phase == constant.MOVING_PHASE and self.board.move_counter == 0:
if self.board.phase == constant.PLACEMENT_PHASE:
# clear the transposition table every time we want to evaluate a move in placement phase
# this is to limit the size of growth
self.tt.clear()
# set the max depth iterations based on the phase that we are in
MAX_ITER = 5
else:
MAX_ITER = 11
# update the root number of pieces every time we do a search on a new node
self.board.root_num_black = len(self.board.black_pieces)
self.board.root_num_white = len(self.board.white_pieces)
# default policy
available_actions = self.board.update_actions(self.player)
# if there are no available actions to make, therefore we just return None -- this is a forfeit
if len(available_actions) == 0:
return None
if self.board.phase == constant.PLACEMENT_PHASE:
self.time_alloc = 1500
else:
self.time_alloc = 1200
# if we have reached 100 moves in the game and the game
if self.total_time > 90000 or self.board.move_counter > 120:
self.time_alloc = 500
# if we are near the final shrinking phase, then we can decrease the time it has to
# evaluate
if self.board.move_counter > 150:
self.time_alloc = 190
best_depth = 1
val, move = 0, None
# set the time remaining for each move evaluation
self.time_rem = self.time_alloc
# iterative deepening begins here
for depth in range(1, MAX_ITER):
# get the best move until cut off is reached
try:
self.time_start = self.curr_millisecond_time()
val, move = self.negamax(depth, -inf, inf, self.player)
self.time_end = self.curr_millisecond_time()
# update the time remaining
self.time_rem = self.time_alloc - (self.time_end-self.time_start)
best_depth += 1
except TimeOut:
break
# add the total time to the time allocated
self.total_time += self.time_alloc
# print(self.total_time)
print(best_depth - 1)
self.eval_depth = best_depth - 1
return move
def set_player_colour(self, colour):
self.player = colour;
self.opponent = Board.get_opp_piece_type(colour)
# get the current time in milliseconds
@staticmethod
def curr_millisecond_time():
return int(time() * 1000)
'''
NEGAMAX DRIVER FUNCTION -- THIS IMPLEMENTS THE FOLLOWING:
- NEGAMAX WITH A TRANSPOSITION TABLE
- MOVE ORDERING USING THE BEST MOVE WE HAVE FOUND SO FAR (IF IT EXISTS IN THE TRANSPOSITION TABLE)
- MOVE ORDERING OF THE MOVES WE THINK TO BE FAVOURABLE USING A LIGHTWEIGHT EVALUATION FUNCTION
- SELECTING ONLY THE TOP FAVOURABLE MOVES TO EVALUATE USING MINIMAX -- THIS IS HEAVY GREEDY PRUNING
APPLIED TO NEGAMAX DESIGNED SUCH THAT WE ONLY LOOK AT MOVES THAT WE THINK WILL PRODUCE A GOOD OUTCOME,
THUS PRUNING ANY MOVES THAT HAVE A HIGH CHANGE OF HAVING NO EFFECT ON THE GAME-STATE UTILITY.
'''
def negamax(self,depth,alpha,beta,colour):
# print(self.board.board_state)
# Timeout handling
self.time_end = self.curr_millisecond_time()
if self.time_end - self.time_start > self.time_rem:
raise TimeOut
opponent = Board.get_opp_piece_type(colour)
original_alpha = alpha
dic = {self.player: 1, self.opponent: -1}
move_to_try = None
# check if the current board state is in the transposition table
board_str = self.board.board_state.decode("utf-8")
key = self.tt.contains(board_str, colour, phase=self.board.phase)
if key is not None:
# get the value mappings from the dictionary
board_str = key[0]
entry = self.tt.get_entry(board_str,colour)
tt_value = entry[0]
tt_type = entry[1]
tt_best_move = entry[2]
tt_depth = entry[3]
# if we have found an entry in the transposition table, then the move
# we should try first is this best move
move_to_try = tt_best_move
if tt_depth >= depth:
# this is the PV node therefore this is the best move that we have found so far
if tt_type == constant.TT_EXACT:
return tt_value, tt_best_move
# the minimax value in the transposition table is a lower bound to the search
elif tt_type == constant.TT_LOWER:
if tt_value > alpha:
alpha = tt_value
# the value in the table corresponds to a beta cutoff and therefore it is an upper bound for beta
elif tt_type == constant.TT_UPPER:
if tt_value < beta:
beta = tt_value
# test for cutoff -- return the best move found so far
if alpha >= beta:
return tt_value, tt_best_move
# obtain the actions and sort them
actions = self.board.update_actions(colour)
actions = self.board.sort_actions(actions,colour)
# terminal test -- default case
if self.cutoff_test(depth):
val = self.evaluate_state(self.board, self.player, actions)*dic[colour]
return val, None
# do the negamax search search
best_val = -inf
best_action = None
# if we have found a best action to take in the transposition table, this should be the first
# move we should try -- put this at the start of the list of actions
if move_to_try is not None and move_to_try in actions:
# put the move to try at the first position -- therefore it will be searched first
actions = [move_to_try] + actions
i = 0
# split the list of actions into favourable and unfavourable actions
# we only consider to search teh favourable actions if the action list is long enough
if len(actions) <= 12:
favourable = actions
elif 12 < len(actions) < 20:
favourable = actions[:12]
else:
favourable = actions[:len(actions)//2]
# iterate only through the favourable moves, ensuring that the number of moves is not too big
# the aim is to reduce the branching factor as much as we can, but also having enough moves to
# evaluate such that we get the part of the optimality decision making from negamax/minimax
# rather than a purely greedy approach.
# print(len(favourable))
for action in favourable:
# skip over the best action in the tt table -- this action has already be searched
if action == move_to_try and i != 0:
continue
i += 1
# update the board, record the eliminated pieces from that update
elim = self.board.update_board(action, colour)
score, temp = self.negamax(depth-1, -beta, -alpha, opponent)
score = -score
# undo the action applied to the board
self.undo_action(action,colour,elim)
# get the best score and action so far
if score > best_val:
best_val = score
best_action = action
# update alpha if needed
if best_val > alpha:
alpha = best_val
# test for cut off
if alpha >= beta:
break
# store the values in the transposition table
if best_val <= original_alpha:
# then this is an upper bound
tt_type = constant.TT_UPPER
elif best_val >= beta:
# if the best value we have found is a lower bound
tt_type = constant.TT_LOWER
# print("LOWER")
else:
# this is the PV node value
tt_type = constant.TT_EXACT
# add the entry to the transposition table
self.tt.add_entry(self.board.board_state,colour,best_val,tt_type,best_action, depth)
return best_val, best_action
# cut-off test -- either depth is zero or the board is at terminal state
def cutoff_test(self, depth):
if depth == 0:
return True
if self.is_terminal():
return True
return False
# evaluate the game state
def evaluate_state(self, board, colour, actions):
return self.evaluation.evaluate(board, colour, actions)
# update the negamax board representation for another search
def update_board(self, board):
self.board = deepcopy(board)
# terminal state check
def is_terminal(self):
return self.board.is_terminal()
# undo board wrapper
def undo_action(self,action,colour,elim):
return self.board.undo_action(action,colour,elim)
| 36.44586
| 120
| 0.618228
|
e6525d7dc07608f3b58e7c31bac7fb02b597cbb6
| 3,705
|
py
|
Python
|
ietf/meeting/management/commands/meetecho_conferences.py
|
jimfenton/datatracker
|
25c7c4801a0d971f30027d7e1ac21120cefeb242
|
[
"BSD-3-Clause"
] | 25
|
2022-03-05T08:26:52.000Z
|
2022-03-30T15:45:42.000Z
|
ietf/meeting/management/commands/meetecho_conferences.py
|
Evolution-team/datatracker
|
902e37d24d45c72ad401e761d4ef44af5cf20d03
|
[
"BSD-3-Clause"
] | 219
|
2022-03-04T17:29:12.000Z
|
2022-03-31T21:16:14.000Z
|
ietf/meeting/management/commands/meetecho_conferences.py
|
Evolution-team/datatracker
|
902e37d24d45c72ad401e761d4ef44af5cf20d03
|
[
"BSD-3-Clause"
] | 22
|
2022-03-04T15:34:34.000Z
|
2022-03-28T13:30:59.000Z
|
# Copyright The IETF Trust 2022, All Rights Reserved
# -*- coding: utf-8 -*-
import datetime
from textwrap import dedent
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from ietf.meeting.models import Session
from ietf.utils.meetecho import ConferenceManager, MeetechoAPIError
class Command(BaseCommand):
help = 'Manage Meetecho conferences'
def add_arguments(self, parser) -> None:
parser.add_argument('group', type=str)
parser.add_argument('-d', '--delete', type=int, action='append',
metavar='SESSION_PK',
help='Delete the conference associated with the specified Session')
def handle(self, group, delete, *args, **options):
conf_mgr = ConferenceManager(settings.MEETECHO_API_CONFIG)
if delete:
self.handle_delete_conferences(conf_mgr, group, delete)
else:
self.handle_list_conferences(conf_mgr, group)
def handle_list_conferences(self, conf_mgr, group):
confs, conf_sessions = self.fetch_conferences(conf_mgr, group)
self.stdout.write(f'Meetecho conferences for {group}:\n\n')
for conf in confs:
sessions_desc = ', '.join(str(s.pk) for s in conf_sessions[conf.id]) or None
self.stdout.write(
dedent(f'''\
* {conf.description}
Start time: {conf.start_time}
Duration: {int(conf.duration.total_seconds() // 60)} minutes
URL: {conf.url}
Associated session PKs: {sessions_desc}
''')
)
def handle_delete_conferences(self, conf_mgr, group, session_pks_to_delete):
sessions_to_delete = Session.objects.filter(pk__in=session_pks_to_delete)
confs, conf_sessions = self.fetch_conferences(conf_mgr, group)
confs_to_delete = []
descriptions = []
for session in sessions_to_delete:
for conf in confs:
associated = conf_sessions[conf.id]
if session in associated:
confs_to_delete.append(conf)
sessions_desc = ', '.join(str(s.pk) for s in associated) or None
descriptions.append(
f'{conf.description} ({conf.start_time}, {int(conf.duration.total_seconds() // 60)} mins) - used by {sessions_desc}'
)
if len(confs_to_delete) > 0:
self.stdout.write('Will delete:')
for desc in descriptions:
self.stdout.write(f'* {desc}')
try:
proceed = input('Proceed [y/N]? ').lower()
except EOFError:
proceed = 'n'
if proceed in ['y', 'yes']:
for conf, desc in zip(confs_to_delete, descriptions):
conf.delete()
self.stdout.write(f'Deleted {desc}')
else:
self.stdout.write('Nothing deleted.')
else:
self.stdout.write('No associated Meetecho conferences found')
def fetch_conferences(self, conf_mgr, group):
try:
confs = conf_mgr.fetch(group)
except MeetechoAPIError as err:
raise CommandError('API error fetching Meetecho conference data') from err
conf_sessions = {}
for conf in confs:
conf_sessions[conf.id] = Session.objects.filter(
group__acronym=group,
meeting__date__gte=datetime.date.today(),
remote_instructions__contains=conf.url,
)
return confs, conf_sessions
| 40.271739
| 140
| 0.585425
|
b6671cfdef76507063141074afd9965c8108973c
| 7,573
|
py
|
Python
|
deps/mozjs/js/src/config/expandlibs_exec.py
|
zpao/spidernode
|
843d5b5e9be55ce447fd03127aeeb2c7728ae168
|
[
"MIT"
] | 48
|
2015-01-09T20:39:35.000Z
|
2021-12-21T21:17:52.000Z
|
deps/mozjs/js/src/config/expandlibs_exec.py
|
ninja1002/spidernode
|
ada8fc559bd2c047a6e52c78af9d27ab273c87d5
|
[
"MIT"
] | 2
|
2016-02-05T10:27:37.000Z
|
2019-01-22T16:22:51.000Z
|
deps/mozjs/js/src/config/expandlibs_exec.py
|
ninja1002/spidernode
|
ada8fc559bd2c047a6e52c78af9d27ab273c87d5
|
[
"MIT"
] | 8
|
2015-01-12T17:14:36.000Z
|
2018-09-15T14:10:27.000Z
|
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is a build helper for libraries
#
# The Initial Developer of the Original Code is
# the Mozilla Foundation
# Portions created by the Initial Developer are Copyright (C) 2011
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mike Hommey <mh@glandium.org>
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
'''expandlibs-exec.py applies expandlibs rules, and some more (see below) to
a given command line, and executes that command line with the expanded
arguments.
With the --extract argument (useful for e.g. $(AR)), it extracts object files
from static libraries (or use those listed in library descriptors directly).
With the --uselist argument (useful for e.g. $(CC)), it replaces all object
files with a list file. This can be used to avoid limitations in the length
of a command line. The kind of list file format used depends on the
EXPAND_LIBS_LIST_STYLE variable: 'list' for MSVC style lists (@file.list)
or 'linkerscript' for GNU ld linker scripts.
See https://bugzilla.mozilla.org/show_bug.cgi?id=584474#c59 for more details.
With the --reorder argument, followed by a file name, it will reorder the
object files from the command line according to the order given in the file.
Implies --extract.
'''
from __future__ import with_statement
import sys
import os
from expandlibs import ExpandArgs, relativize, isObject
import expandlibs_config as conf
from optparse import OptionParser
import subprocess
import tempfile
import shutil
class ExpandArgsMore(ExpandArgs):
''' Meant to be used as 'with ExpandArgsMore(args) as ...: '''
def __enter__(self):
self.tmp = []
return self
def __exit__(self, type, value, tb):
'''Automatically remove temporary files'''
for tmp in self.tmp:
if os.path.isdir(tmp):
shutil.rmtree(tmp, True)
else:
os.remove(tmp)
def extract(self):
self[0:] = self._extract(self)
def _extract(self, args):
'''When a static library name is found, either extract its contents
in a temporary directory or use the information found in the
corresponding lib descriptor.
'''
ar_extract = conf.AR_EXTRACT.split()
newlist = []
for arg in args:
if os.path.splitext(arg)[1] == conf.LIB_SUFFIX:
if os.path.exists(arg + conf.LIBS_DESC_SUFFIX):
newlist += self._extract(self._expand_desc(arg))
elif os.path.exists(arg) and len(ar_extract):
tmp = tempfile.mkdtemp(dir=os.curdir)
self.tmp.append(tmp)
subprocess.call(ar_extract + [os.path.abspath(arg)], cwd=tmp)
objs = []
for root, dirs, files in os.walk(tmp):
objs += [relativize(os.path.join(root, f)) for f in files if isObject(f)]
newlist += objs
else:
newlist += [arg]
else:
newlist += [arg]
return newlist
def makelist(self):
'''Replaces object file names with a temporary list file, using a
list format depending on the EXPAND_LIBS_LIST_STYLE variable
'''
objs = [o for o in self if isObject(o)]
if not len(objs): return
fd, tmp = tempfile.mkstemp(suffix=".list",dir=os.curdir)
if conf.EXPAND_LIBS_LIST_STYLE == "linkerscript":
content = ["INPUT(%s)\n" % obj for obj in objs]
ref = tmp
elif conf.EXPAND_LIBS_LIST_STYLE == "list":
content = ["%s\n" % obj for obj in objs]
ref = "@" + tmp
else:
os.remove(tmp)
return
self.tmp.append(tmp)
f = os.fdopen(fd, "w")
f.writelines(content)
f.close()
idx = self.index(objs[0])
newlist = self[0:idx] + [ref] + [item for item in self[idx:] if item not in objs]
self[0:] = newlist
def reorder(self, order_list):
'''Given a list of file names without OBJ_SUFFIX, rearrange self
so that the object file names it contains are ordered according to
that list.
'''
objs = [o for o in self if isObject(o)]
if not objs: return
idx = self.index(objs[0])
# Keep everything before the first object, then the ordered objects,
# then any other objects, then any non-objects after the first object
objnames = dict([(os.path.splitext(os.path.basename(o))[0], o) for o in objs])
self[0:] = self[0:idx] + [objnames[o] for o in order_list if o in objnames] + \
[o for o in objs if os.path.splitext(os.path.basename(o))[0] not in order_list] + \
[x for x in self[idx:] if not isObject(x)]
def main():
parser = OptionParser()
parser.add_option("--extract", action="store_true", dest="extract",
help="when a library has no descriptor file, extract it first, when possible")
parser.add_option("--uselist", action="store_true", dest="uselist",
help="use a list file for objects when executing a command")
parser.add_option("--verbose", action="store_true", dest="verbose",
help="display executed command and temporary files content")
parser.add_option("--reorder", dest="reorder",
help="reorder the objects according to the given list", metavar="FILE")
(options, args) = parser.parse_args()
with ExpandArgsMore(args) as args:
if options.extract or options.reorder:
args.extract()
if options.reorder:
with open(options.reorder) as file:
args.reorder([l.strip() for l in file.readlines()])
if options.uselist:
args.makelist()
if options.verbose:
print >>sys.stderr, "Executing: " + " ".join(args)
for tmp in [f for f in args.tmp if os.path.isfile(f)]:
print >>sys.stderr, tmp + ":"
with open(tmp) as file:
print >>sys.stderr, "".join([" " + l for l in file.readlines()])
sys.stderr.flush()
exit(subprocess.call(args))
if __name__ == '__main__':
main()
| 41.839779
| 102
| 0.637264
|
ff12281d9267b63c8afc2e9fe8d1273c16025a3f
| 4,214
|
py
|
Python
|
idunn/api/utils.py
|
Qwant/idunn
|
65582dfed732093778bf7c2998db1e2cd78255b8
|
[
"Apache-2.0"
] | 3
|
2021-10-07T20:07:57.000Z
|
2022-03-04T15:23:26.000Z
|
idunn/api/utils.py
|
Qwant/idunn
|
65582dfed732093778bf7c2998db1e2cd78255b8
|
[
"Apache-2.0"
] | 16
|
2021-02-01T11:02:03.000Z
|
2022-03-23T14:44:50.000Z
|
idunn/api/utils.py
|
Qwant/idunn
|
65582dfed732093778bf7c2998db1e2cd78255b8
|
[
"Apache-2.0"
] | null | null | null |
from enum import Enum
import os
import logging
from idunn.blocks import (
Weather,
ContactBlock,
DescriptionEvent,
GradesBlock,
ImagesBlock,
InformationBlock,
OpeningDayEvent,
OpeningHourBlock,
Covid19Block,
PhoneBlock,
RecyclingBlock,
WebSiteBlock,
TransactionalBlock,
SocialBlock,
DescriptionBlock,
DeliveryBlock,
StarsBlock,
)
from idunn.utils.settings import _load_yaml_file
from idunn.datasources.mimirsbrunn import MimirPoiFilter
logger = logging.getLogger(__name__)
class Type(str, Enum):
# pylint: disable=invalid-name
# City = "city" # this field is available in Bragi but deprecated
House = "house"
Poi = "poi"
StopArea = "public_transport:stop_area"
Street = "street"
Zone = "zone"
def get_categories():
categories_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "../utils/categories.yml"
)
return _load_yaml_file(categories_path)["categories"]
ALL_CATEGORIES = get_categories()
class CategoryEnum(str):
"""
Methods defining the behavior of the enum `Category` defined bellow.
"""
def match_brand(self):
return ALL_CATEGORIES[self].get("match_brand", False)
def pj_what(self):
return ALL_CATEGORIES[self].get("pj_what")
def raw_filters(self) -> [MimirPoiFilter]:
raw_filters = ALL_CATEGORIES[self].get("raw_filters")
filters = []
for f in raw_filters:
f = f.copy()
poi_class = f.pop("class", None)
poi_subclass = f.pop("subclass", None)
filters.append(MimirPoiFilter(poi_class, poi_subclass, extra=f))
return filters
def regex(self):
return ALL_CATEGORIES[self].get("regex")
# Load the list of categories as an enum for validation purpose
Category = Enum("Category", {cat: cat for cat in ALL_CATEGORIES}, type=CategoryEnum)
class Verbosity(str, Enum):
"""
Control the verbosity of the output.
"""
LONG = "long"
SHORT = "short"
LIST = "list"
@classmethod
def default(cls):
return cls.LONG
@classmethod
def default_list(cls):
return cls.LIST
BLOCKS_BY_VERBOSITY = {
Verbosity.LONG: [
Weather,
OpeningDayEvent,
DescriptionEvent,
OpeningHourBlock,
Covid19Block,
PhoneBlock,
InformationBlock,
WebSiteBlock,
ContactBlock,
ImagesBlock,
GradesBlock,
RecyclingBlock,
TransactionalBlock,
SocialBlock,
DescriptionBlock,
DeliveryBlock,
StarsBlock,
],
Verbosity.LIST: [
OpeningDayEvent,
DescriptionEvent,
OpeningHourBlock,
Covid19Block,
PhoneBlock,
WebSiteBlock,
ImagesBlock,
GradesBlock,
RecyclingBlock,
TransactionalBlock,
SocialBlock,
DeliveryBlock,
StarsBlock,
],
Verbosity.SHORT: [OpeningHourBlock, Covid19Block],
}
def build_blocks(es_poi, lang, verbosity):
"""Returns the list of blocks we want
depending on the verbosity.
"""
blocks = []
for c in BLOCKS_BY_VERBOSITY[verbosity]:
if not c.is_enabled():
continue
block = c.from_es(es_poi, lang)
if block is not None:
blocks.append(block)
return blocks
def get_name(properties, lang):
"""
Return the Place name from the properties field of the elastic response. Here 'name'
corresponds to the POI name in the language of the user request (i.e. 'name:{lang}' field).
If lang is None or if name:lang is not in the properties then name receives the local name
value.
>>> get_name({}, 'fr') is None
True
>>> get_name({'name':'spontini', 'name:en':'spontinien', 'name:fr':'spontinifr'}, None)
'spontini'
>>> get_name({'name':'spontini', 'name:en':'spontinien', 'name:fr':'spontinifr'}, 'cz')
'spontini'
>>> get_name({'name':'spontini', 'name:en':'spontinien', 'name:fr':'spontinifr'}, 'fr')
'spontinifr'
"""
name = properties.get(f"name:{lang}")
if name is None:
name = properties.get("name")
return name
| 24.358382
| 95
| 0.631467
|
ebeaff3059a289b57dd40af4801307783a538c78
| 1,869
|
py
|
Python
|
sdk/python/tests/compiler/testdata/resourceop_basic.py
|
shrivs3/kfp-tekton
|
b7c1d542d43ea24e70f24a874a7c972199e8f976
|
[
"Apache-2.0"
] | 102
|
2019-10-23T20:35:41.000Z
|
2022-03-27T10:28:56.000Z
|
sdk/python/tests/compiler/testdata/resourceop_basic.py
|
shrivs3/kfp-tekton
|
b7c1d542d43ea24e70f24a874a7c972199e8f976
|
[
"Apache-2.0"
] | 891
|
2019-10-24T04:08:17.000Z
|
2022-03-31T22:45:40.000Z
|
sdk/python/tests/compiler/testdata/resourceop_basic.py
|
shrivs3/kfp-tekton
|
b7c1d542d43ea24e70f24a874a7c972199e8f976
|
[
"Apache-2.0"
] | 85
|
2019-10-24T04:04:36.000Z
|
2022-03-01T10:52:57.000Z
|
# Copyright 2020 kubeflow.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This example demonstrates how to use ResourceOp to specify the value of env var.
"""
import json
import kfp.dsl as dsl
_CONTAINER_MANIFEST = """
{
"apiVersion": "batch/v1",
"kind": "Job",
"metadata": {
"generateName": "resourceop-basic-job-"
},
"spec": {
"template": {
"metadata": {
"name": "resourceop-basic"
},
"spec": {
"containers": [{
"name": "sample-container",
"image": "k8s.gcr.io/busybox",
"command": ["/usr/bin/env"]
}],
"restartPolicy": "Never"
}
},
"backoffLimit": 4
}
}
"""
@dsl.pipeline(
name="resourceop-basic",
description="A Basic Example on ResourceOp Usage."
)
def resourceop_basic():
# Start a container. Print out env vars.
op = dsl.ResourceOp(
name='test-step',
k8s_resource=json.loads(_CONTAINER_MANIFEST),
action='create'
# success_condition='status.succeeded > 0',
# failure_condition='status.failed > 0'
)
if __name__ == '__main__':
from kfp_tekton.compiler import TektonCompiler
TektonCompiler().compile(resourceop_basic, __file__.replace('.py', '.yaml'))
| 27.086957
| 80
| 0.606207
|
735e12c1174fc33e41527163d001e68c2a21bbc5
| 135
|
py
|
Python
|
examples/tutorials/django/blog/models.py
|
psy-repos-rust/vagga
|
07d8b32c0656f17427d8b4399f3f36dfbf21ab88
|
[
"MIT"
] | 1,974
|
2015-01-05T01:45:10.000Z
|
2022-03-28T14:35:52.000Z
|
examples/tutorials/django/blog/models.py
|
psy-repos-rust/vagga
|
07d8b32c0656f17427d8b4399f3f36dfbf21ab88
|
[
"MIT"
] | 536
|
2015-01-06T20:33:40.000Z
|
2022-03-03T16:22:21.000Z
|
examples/tutorials/django/blog/models.py
|
psy-repos-rust/vagga
|
07d8b32c0656f17427d8b4399f3f36dfbf21ab88
|
[
"MIT"
] | 132
|
2015-02-10T11:03:30.000Z
|
2022-01-28T12:59:44.000Z
|
from django.db import models
class Article(models.Model):
title = models.CharField(max_length=100)
body = models.TextField()
| 19.285714
| 44
| 0.733333
|
5f5c6bc2273876409c2ed9c296496d11b1e7ba22
| 2,136
|
py
|
Python
|
core/migrations/0001_initial.py
|
henrylei80/customers-app-api
|
d8e889219cd2aa86de57b421c02a1ce34769c022
|
[
"MIT"
] | null | null | null |
core/migrations/0001_initial.py
|
henrylei80/customers-app-api
|
d8e889219cd2aa86de57b421c02a1ce34769c022
|
[
"MIT"
] | null | null | null |
core/migrations/0001_initial.py
|
henrylei80/customers-app-api
|
d8e889219cd2aa86de57b421c02a1ce34769c022
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.3 on 2019-08-25 20:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('address', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='DataSheet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=50)),
('historical_data', models.TextField()),
],
),
migrations.CreateModel(
name='Profession',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dtype', models.CharField(choices=[('PP', 'Passport'), ('ID', 'Identity card'), ('OT', 'Other')], max_length=2)),
('doc_number', models.CharField(max_length=50)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Customer')),
],
),
migrations.AddField(
model_name='customer',
name='Professions',
field=models.ManyToManyField(to='core.Profession'),
),
migrations.AddField(
model_name='customer',
name='data_sheet',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='core.DataSheet'),
),
]
| 36.827586
| 130
| 0.560861
|
526c069f3a8d19742745d2b4e4148d1990525074
| 60,016
|
py
|
Python
|
SybilRanking/scraper/InstagramScraper.py
|
mikeitexpert/osn-sybilranking
|
4f4fd65808d39f9e6d1a44ed5fe3b95f17e77aa3
|
[
"MIT"
] | 3
|
2019-03-11T15:30:08.000Z
|
2021-03-04T18:25:26.000Z
|
SybilRanking/scraper/InstagramScraper.py
|
mikeitexpert/osn-sybilranking
|
4f4fd65808d39f9e6d1a44ed5fe3b95f17e77aa3
|
[
"MIT"
] | null | null | null |
SybilRanking/scraper/InstagramScraper.py
|
mikeitexpert/osn-sybilranking
|
4f4fd65808d39f9e6d1a44ed5fe3b95f17e77aa3
|
[
"MIT"
] | 3
|
2019-10-31T08:52:23.000Z
|
2022-03-24T07:32:28.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import argparse
import codecs
import configparser
import errno
import glob
from operator import itemgetter
import json
import logging.config
import hashlib
import os
import pickle
import re
import sys
import textwrap
import time
import csv
import inspect
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
import warnings
import threading
import concurrent.futures
import requests
import tqdm
from .insta import *
try:
reload(sys) # Python 2.7
sys.setdefaultencoding("UTF8")
except NameError:
pass
warnings.filterwarnings('ignore')
input_lock = threading.RLock()
class LockedStream(object):
file = None
def __init__(self, file):
self.file = file
def write(self, x):
with input_lock:
self.file.write(x)
def flush(self):
return getattr(self.file, 'flush', lambda: None)()
original_stdout, original_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = map(LockedStream, (sys.stdout, sys.stderr))
def threaded_input(prompt):
with input_lock:
try:
with tqdm.external_write_mode():
original_stdout.write(prompt)
original_stdout.flush()
return sys.stdin.readline()
except AttributeError:
original_stdout.write('\n')
original_stdout.write(prompt)
original_stdout.flush()
return sys.stdin.readline()
input = threaded_input
class PartialContentException(Exception):
pass
class InstagramScraper(object):
"""InstagramScraper scrapes and downloads
an instagram user's photos and videos"""
basePath = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe()))
)
csvOuputfilename = basePath + \
'/../model/traindata/insta_train_data.csv'
def __init__(self, **kwargs):
default_attr = dict(username='', usernames=[], filename=None,
login_user=None, login_pass=None,
destination='./', retain_username=False, interactive=True,
quiet=False, maximum=0, media_metadata=False, latest=False,
latest_stamps=False, cookiejar=None,
media_types=['image', 'video', 'story-image', 'story-video'],
tag=False, location=False, search_location=False, comments=False,
verbose=0, include_location=False, filter=None,
template='{urlname}')
allowed_attr = list(default_attr.keys())
default_attr.update(kwargs)
for key in default_attr:
if key in allowed_attr:
self.__dict__[key] = default_attr.get(key)
# story media type means story-image & story-video
if 'story' in self.media_types:
self.media_types.remove('story')
if 'story-image' not in self.media_types:
self.media_types.append('story-image')
if 'story-video' not in self.media_types:
self.media_types.append('story-video')
# Read latest_stamps file with ConfigParser
self.latest_stamps_parser = None
if self.latest_stamps:
parser = configparser.ConfigParser()
parser.read(self.latest_stamps)
self.latest_stamps_parser = parser
# If we have a latest_stamps file, latest must be true as it's the common flag
self.latest = True
# Set up a logger
self.logger = InstagramScraper.get_logger(level=logging.DEBUG,
verbose=default_attr.get('verbose'))
self.posts = []
self.session = requests.Session()
self.session.headers = {'user-agent': CHROME_WIN_UA}
if self.cookiejar and os.path.exists(self.cookiejar):
with open(self.cookiejar, 'rb') as f:
self.session.cookies.update(pickle.load(f))
self.session.cookies.set('ig_pr', '1')
self.rhx_gis = None
self.cookies = None
self.logged_in = False
self.last_scraped_filemtime = 0
if default_attr['filter']:
self.filter = list(self.filter)
self.quit = False
self.jsonOuputfilename = "output_filename.json"
def sleep(self, secs):
min_delay = 1
for _ in range(secs // min_delay):
time.sleep(min_delay)
if self.quit:
return
time.sleep(secs % min_delay)
def _retry_prompt(self, url, exception_message):
"""Show prompt and return True: retry, False: ignore, None: abort"""
answer = input( 'Repeated error {0}\n(A)bort, (I)gnore, (R)etry or retry (F)orever?'.format(exception_message) )
if answer:
answer = answer[0].upper()
if answer == 'I':
self.logger.info( 'The user has chosen to ignore {0}'.format(url) )
return False
elif answer == 'R':
return True
elif answer == 'F':
self.logger.info( 'The user has chosen to retry forever' )
global MAX_RETRIES
MAX_RETRIES = sys.maxsize
return True
else:
self.logger.info( 'The user has chosen to abort' )
return None
def safe_get(self, *args, **kwargs):
# out of the box solution
# session.mount('https://', HTTPAdapter(max_retries=...))
# only covers failed DNS lookups, socket connections and connection timeouts
# It doesnt work when server terminate connection while response is downloaded
retry = 0
retry_delay = RETRY_DELAY
while True:
if self.quit:
return
try:
response = self.session.get(timeout=CONNECT_TIMEOUT, cookies=self.cookies, *args, **kwargs)
if response.status_code == 404:
return
response.raise_for_status()
content_length = response.headers.get('Content-Length')
if content_length is not None and len(response.content) != int(content_length):
#if content_length is None we repeat anyway to get size and be confident
raise PartialContentException('Partial response')
return response
except (KeyboardInterrupt):
raise
except (requests.exceptions.RequestException, PartialContentException) as e:
if 'url' in kwargs:
url = kwargs['url']
elif len(args) > 0:
url = args[0]
if retry < MAX_RETRIES:
self.logger.warning('Retry after exception {0} on {1}'.format(repr(e), url))
self.sleep(retry_delay)
retry_delay = min( 2 * retry_delay, MAX_RETRY_DELAY )
retry = retry + 1
continue
else:
keep_trying = self._retry_prompt(url, repr(e))
if keep_trying == True:
retry = 0
continue
elif keep_trying == False:
return
raise
def get_json(self, *args, **kwargs):
"""Retrieve text from url. Return text as string or None if no data present """
resp = self.safe_get(*args, **kwargs)
if resp is not None:
return resp.text
def login(self):
"""Logs in to instagram."""
self.session.headers.update({'Referer': BASE_URL, 'user-agent': STORIES_UA})
req = self.session.get(BASE_URL)
self.session.headers.update({'X-CSRFToken': req.cookies['csrftoken']})
login_data = {'username': self.login_user, 'password': self.login_pass}
login = self.session.post(LOGIN_URL, data=login_data, allow_redirects=True)
self.session.headers.update({'X-CSRFToken': login.cookies['csrftoken']})
self.cookies = login.cookies
login_text = json.loads(login.text)
if login_text.get('authenticated') and login.status_code == 200:
self.logged_in = True
self.session.headers = {'user-agent': CHROME_WIN_UA}
self.rhx_gis = self.get_shared_data()['rhx_gis']
else:
self.logger.error('Login failed for ' + self.login_user)
if 'checkpoint_url' in login_text:
checkpoint_url = login_text.get('checkpoint_url')
self.logger.error('Please verify your account at ' + BASE_URL[0:-1] + checkpoint_url)
if self.interactive is True:
self.login_challenge(checkpoint_url)
elif 'errors' in login_text:
for count, error in enumerate(login_text['errors'].get('error')):
count += 1
self.logger.debug('Session error %(count)s: "%(error)s"' % locals())
else:
self.logger.error(json.dumps(login_text))
def login_challenge(self, checkpoint_url):
self.session.headers.update({'Referer': BASE_URL})
req = self.session.get(BASE_URL[:-1] + checkpoint_url)
self.session.headers.update({'X-CSRFToken': req.cookies['csrftoken'], 'X-Instagram-AJAX': '1'})
self.session.headers.update({'Referer': BASE_URL[:-1] + checkpoint_url})
mode = int(input('Choose a challenge mode (0 - SMS, 1 - Email): '))
challenge_data = {'choice': mode}
challenge = self.session.post(BASE_URL[:-1] + checkpoint_url, data=challenge_data, allow_redirects=True)
self.session.headers.update({'X-CSRFToken': challenge.cookies['csrftoken'], 'X-Instagram-AJAX': '1'})
code = int(input('Enter code received: '))
code_data = {'security_code': code}
code = self.session.post(BASE_URL[:-1] + checkpoint_url, data=code_data, allow_redirects=True)
self.session.headers.update({'X-CSRFToken': code.cookies['csrftoken']})
self.cookies = code.cookies
code_text = json.loads(code.text)
if code_text.get('status') == 'ok':
self.logged_in = True
elif 'errors' in code.text:
for count, error in enumerate(code_text['challenge']['errors']):
count += 1
self.logger.error('Session error %(count)s: "%(error)s"' % locals())
else:
self.logger.error(json.dumps(code_text))
def logout(self):
"""Logs out of instagram."""
if self.logged_in:
try:
logout_data = {'csrfmiddlewaretoken': self.cookies['csrftoken']}
self.session.post(LOGOUT_URL, data=logout_data)
self.logged_in = False
except requests.exceptions.RequestException:
self.logger.warning('Failed to log out ' + self.login_user)
def get_dst_dir(self, username):
"""Gets the destination directory and last scraped file time."""
if self.destination == './':
dst = './' + username
else:
if self.retain_username:
dst = self.destination + '/' + username
else:
dst = self.destination
# Resolve last scraped filetime
if self.latest_stamps_parser:
self.last_scraped_filemtime = self.get_last_scraped_timestamp(username)
elif os.path.isdir(dst):
self.last_scraped_filemtime = self.get_last_scraped_filemtime(dst)
return dst
def make_dir(self, dst):
try:
os.makedirs(dst)
except OSError as err:
if err.errno == errno.EEXIST and os.path.isdir(dst):
# Directory already exists
pass
else:
# Target dir exists as a file, or a different error
raise
def get_last_scraped_timestamp(self, username):
if self.latest_stamps_parser:
try:
return self.latest_stamps_parser.getint(LATEST_STAMPS_USER_SECTION, username)
except configparser.Error:
pass
return 0
def set_last_scraped_timestamp(self, username, timestamp):
if self.latest_stamps_parser:
if not self.latest_stamps_parser.has_section(LATEST_STAMPS_USER_SECTION):
self.latest_stamps_parser.add_section(LATEST_STAMPS_USER_SECTION)
self.latest_stamps_parser.set(LATEST_STAMPS_USER_SECTION, username, str(timestamp))
with open(self.latest_stamps, 'w') as f:
self.latest_stamps_parser.write(f)
def get_last_scraped_filemtime(self, dst):
"""Stores the last modified time of newest file in a directory."""
list_of_files = []
file_types = ('*.jpg', '*.mp4')
for type in file_types:
list_of_files.extend(glob.glob(dst + '/' + type))
if list_of_files:
latest_file = max(list_of_files, key=os.path.getmtime)
return int(os.path.getmtime(latest_file))
return 0
def query_comments_gen(self, shortcode, end_cursor=''):
"""Generator for comments."""
comments, end_cursor = self.__query_comments(shortcode, end_cursor)
if comments:
try:
while True:
for item in comments:
yield item
if end_cursor:
comments, end_cursor = self.__query_comments(shortcode, end_cursor)
else:
return
except ValueError:
self.logger.exception('Failed to query comments for shortcode ' + shortcode)
def __query_comments(self, shortcode, end_cursor=''):
params = QUERY_COMMENTS_VARS.format(shortcode, end_cursor)
self.update_ig_gis_header(params)
resp = self.get_json(QUERY_COMMENTS.format(params))
if resp is not None:
payload = json.loads(resp)['data']['shortcode_media']
if payload:
container = payload['edge_media_to_comment']
comments = [node['node'] for node in container['edges']]
end_cursor = container['page_info']['end_cursor']
return comments, end_cursor
return None, None
def scrape_hashtag(self):
self.__scrape_query(self.query_hashtag_gen)
def scrape_location(self):
self.__scrape_query(self.query_location_gen)
def worker_wrapper(self, fn, *args, **kwargs):
try:
if self.quit:
return
return fn(*args, **kwargs)
except:
self.logger.debug("Exception in worker thread", exc_info=sys.exc_info())
raise
def __scrape_query(self, media_generator, executor=concurrent.futures.ThreadPoolExecutor(max_workers=MAX_CONCURRENT_DOWNLOADS)):
"""Scrapes the specified value for posted media."""
self.quit = False
try:
for value in self.usernames:
self.posts = []
self.last_scraped_filemtime = 0
greatest_timestamp = 0
future_to_item = {}
dst = self.get_dst_dir(value)
if self.include_location:
media_exec = concurrent.futures.ThreadPoolExecutor(max_workers=5)
iter = 0
for item in tqdm.tqdm(media_generator(value), desc='Searching {0} for posts'.format(value), unit=" media",
disable=self.quiet):
if ((item['is_video'] is False and 'image' in self.media_types) or \
(item['is_video'] is True and 'video' in self.media_types)
) and self.is_new_media(item):
future = executor.submit(self.worker_wrapper, self.download, item, dst)
future_to_item[future] = item
if self.include_location and 'location' not in item:
media_exec.submit(self.worker_wrapper, self.__get_location, item)
if self.comments:
item['edge_media_to_comment']['data'] = list(self.query_comments_gen(item['shortcode']))
if self.media_metadata or self.comments or self.include_location:
self.posts.append(item)
iter = iter + 1
if self.maximum != 0 and iter >= self.maximum:
break
if future_to_item:
for future in tqdm.tqdm(concurrent.futures.as_completed(future_to_item),
total=len(future_to_item),
desc='Downloading', disable=self.quiet):
item = future_to_item[future]
if future.exception() is not None:
self.logger.warning(
'Media for {0} at {1} generated an exception: {2}'.format(value, item['urls'],
future.exception()))
else:
timestamp = self.__get_timestamp(item)
if timestamp > greatest_timestamp:
greatest_timestamp = timestamp
# Even bother saving it?
if greatest_timestamp > self.last_scraped_filemtime:
self.set_last_scraped_timestamp(value, greatest_timestamp)
if (self.media_metadata or self.comments or self.include_location) and self.posts:
self.save_json(self.posts, '{0}/{1}.json'.format(dst, value))
finally:
self.quit = True
def query_hashtag_gen(self, hashtag):
return self.__query_gen(QUERY_HASHTAG, QUERY_HASHTAG_VARS, 'hashtag', hashtag)
def query_location_gen(self, location):
return self.__query_gen(QUERY_LOCATION, QUERY_LOCATION_VARS, 'location', location)
def __query_gen(self, url, variables, entity_name, query, end_cursor=''):
"""Generator for hashtag and location."""
nodes, end_cursor = self.__query(url, variables, entity_name, query, end_cursor)
if nodes:
try:
while True:
for node in nodes:
yield node
if end_cursor:
nodes, end_cursor = self.__query(url, variables, entity_name, query, end_cursor)
else:
return
except ValueError:
self.logger.exception('Failed to query ' + query)
def __query(self, url, variables, entity_name, query, end_cursor):
params = variables.format(query, end_cursor)
self.update_ig_gis_header(params)
resp = self.get_json(url.format(params))
if resp is not None:
payload = json.loads(resp)['data'][entity_name]
if payload:
nodes = []
if end_cursor == '':
top_posts = payload['edge_' + entity_name + '_to_top_posts']
nodes.extend(self._get_nodes(top_posts))
posts = payload['edge_' + entity_name + '_to_media']
nodes.extend(self._get_nodes(posts))
end_cursor = posts['page_info']['end_cursor']
return nodes, end_cursor
return None, None
def _get_nodes(self, container):
return [self.augment_node(node['node']) for node in container['edges']]
def augment_node(self, node):
self.extract_tags(node)
details = None
if self.include_location and 'location' not in node:
details = self.__get_media_details(node['shortcode'])
node['location'] = details.get('location') if details else None
if 'urls' not in node:
node['urls'] = []
if node['is_video'] and 'video_url' in node:
node['urls'] = [node['video_url']]
elif '__typename' in node and node['__typename'] == 'GraphImage':
node['urls'] = [self.get_original_image(node['display_url'])]
else:
if details is None:
details = self.__get_media_details(node['shortcode'])
if details:
if '__typename' in details and details['__typename'] == 'GraphVideo':
node['urls'] = [details['video_url']]
elif '__typename' in details and details['__typename'] == 'GraphSidecar':
urls = []
for carousel_item in details['edge_sidecar_to_children']['edges']:
urls += self.augment_node(carousel_item['node'])['urls']
node['urls'] = urls
else:
node['urls'] = [self.get_original_image(details['display_url'])]
return node
def __get_media_details(self, shortcode):
resp = self.get_json(VIEW_MEDIA_URL.format(shortcode))
if resp is not None:
try:
return json.loads(resp)['graphql']['shortcode_media']
except ValueError:
self.logger.warning('Failed to get media details for ' + shortcode)
else:
self.logger.warning('Failed to get media details for ' + shortcode)
def __get_location(self, item):
code = item.get('shortcode', item.get('code'))
if code:
details = self.__get_media_details(code)
item['location'] = details.get('location')
def scrape(self, executor=concurrent.futures.ThreadPoolExecutor(max_workers=MAX_CONCURRENT_DOWNLOADS)):
"""Crawls through and downloads user's media"""
self.session.headers = {'user-agent': STORIES_UA}
try:
with open(self.csvOuputfilename, 'w') as outputFile:
csvwriter = csv.writer(outputFile)
csvwriter.writerow(['username', 'following_count', 'follower_count', 'is_verified', 'post_count'])
usersData = []
for username in self.usernames:
self.posts = []
self.last_scraped_filemtime = 0
greatest_timestamp = 0
future_to_item = {}
dst = self.get_dst_dir(username)
# Get the user metadata.
shared_data = self.get_shared_data(username)
usersData.append( shared_data )
csvwriter.writerow([
shared_data["entry_data"]["ProfilePage"][0]["graphql"]["user"]["username"],
shared_data["entry_data"]["ProfilePage"][0]["graphql"]["user"]["edge_follow"]["count"],
shared_data["entry_data"]["ProfilePage"][0]["graphql"]["user"]["edge_followed_by"]["count"],
shared_data["entry_data"]["ProfilePage"][0]["graphql"]["user"]["is_verified"],
shared_data["entry_data"]["ProfilePage"][0]["graphql"]["user"]["edge_felix_video_timeline"]["count"] +
shared_data["entry_data"]["ProfilePage"][0]["graphql"]["user"]["edge_owner_to_timeline_media"]["count"],
])
# user = self.deep_get(shared_data, 'entry_data.ProfilePage[0].graphql.user')
#
# if not user:
# self.logger.error(
# 'Error getting user details for {0}. Please verify that the user exists.'.format(username))
# continue
# elif user and user['is_private'] and user['edge_owner_to_timeline_media']['count'] > 0 and not \
# user['edge_owner_to_timeline_media']['edges']:
# self.logger.error('User {0} is private'.format(username))
#
# self.rhx_gis = shared_data['rhx_gis']
#
# self.get_profile_pic(dst, executor, future_to_item, user, username)
# self.get_stories(dst, executor, future_to_item, user, username)
#
# # Crawls the media and sends it to the executor.
# try:
#
# self.get_media(dst, executor, future_to_item, user)
#
# # Displays the progress bar of completed downloads. Might not even pop up if all media is downloaded while
# # the above loop finishes.
# if future_to_item:
# for future in tqdm.tqdm(concurrent.futures.as_completed(future_to_item), total=len(future_to_item),
# desc='Downloading', disable=self.quiet):
# item = future_to_item[future]
#
# if future.exception() is not None:
# self.logger.error(
# 'Media at {0} generated an exception: {1}'.format(item['urls'], future.exception()))
# else:
# timestamp = self.__get_timestamp(item)
# if timestamp > greatest_timestamp:
# greatest_timestamp = timestamp
# # Even bother saving it?
# if greatest_timestamp > self.last_scraped_filemtime:
# self.set_last_scraped_timestamp(username, greatest_timestamp)
#
# if (self.media_metadata or self.comments or self.include_location) and self.posts:
# self.save_json(self.posts, '{0}/{1}.json'.format(dst, username))
# except ValueError:
# self.logger.error("Unable to scrape user - %s" % username)
with open(self.jsonOuputfilename, 'w') as jsonOutputFile:
json.dump(obj = usersData, fp = jsonOutputFile)
finally:
self.quit = True
self.logout()
def scrapeUser(self, username, executor=concurrent.futures.ThreadPoolExecutor(max_workers=MAX_CONCURRENT_DOWNLOADS)):
"""Crawls through and downloads user's media"""
self.session.headers = {'user-agent': STORIES_UA}
try:
self.last_scraped_filemtime = 0
greatest_timestamp = 0
future_to_item = {}
dst = self.get_dst_dir(username)
# Get the user metadata.
shared_data = self.get_shared_data(username)
userData = [
shared_data["entry_data"]["ProfilePage"][0]["graphql"]["user"]["username"],
shared_data["entry_data"]["ProfilePage"][0]["graphql"]["user"]["edge_follow"]["count"],
shared_data["entry_data"]["ProfilePage"][0]["graphql"]["user"]["edge_followed_by"]["count"],
shared_data["entry_data"]["ProfilePage"][0]["graphql"]["user"]["is_verified"],
shared_data["entry_data"]["ProfilePage"][0]["graphql"]["user"]["edge_felix_video_timeline"]["count"] +
shared_data["entry_data"]["ProfilePage"][0]["graphql"]["user"]["edge_owner_to_timeline_media"]["count"],
]
print("\tuserData = ", userData)
return userData
finally:
self.quit = True
self.logout()
def get_profile_pic(self, dst, executor, future_to_item, user, username):
if 'image' not in self.media_types:
return
url = USER_INFO.format(user['id'])
resp = self.get_json(url)
if resp is None:
self.logger.error('Error getting user info for {0}'.format(username))
return
user_info = json.loads(resp)['user']
if user_info['has_anonymous_profile_picture']:
return
try:
profile_pic_urls = [
user_info['hd_profile_pic_url_info']['url'],
user_info['hd_profile_pic_versions'][-1]['url'],
]
profile_pic_url = next(url for url in profile_pic_urls if url is not None)
except (KeyError, IndexError, StopIteration):
self.logger.warning('Failed to get high resolution profile picture for {0}'.format(username))
profile_pic_url = user['profile_pic_url_hd']
item = {'urls': [profile_pic_url], 'username': username, 'shortcode':'', 'created_time': 1286323200, '__typename': 'GraphProfilePic'}
if self.latest is False or os.path.isfile(dst + '/' + item['urls'][0].split('/')[-1]) is False:
for item in tqdm.tqdm([item], desc='Searching {0} for profile pic'.format(username), unit=" images",
ncols=0, disable=self.quiet):
future = executor.submit(self.worker_wrapper, self.download, item, dst)
future_to_item[future] = item
def get_stories(self, dst, executor, future_to_item, user, username):
"""Scrapes the user's stories."""
if self.logged_in and \
('story-image' in self.media_types or 'story-video' in self.media_types):
# Get the user's stories.
stories = self.fetch_stories(user['id'])
# Downloads the user's stories and sends it to the executor.
iter = 0
for item in tqdm.tqdm(stories, desc='Searching {0} for stories'.format(username), unit=" media",
disable=self.quiet):
if self.story_has_selected_media_types(item) and self.is_new_media(item):
item['username'] = username
item['shortcode'] = ''
future = executor.submit(self.worker_wrapper, self.download, item, dst)
future_to_item[future] = item
iter = iter + 1
if self.maximum != 0 and iter >= self.maximum:
break
def get_media(self, dst, executor, future_to_item, user):
"""Scrapes the user's posts for media."""
if 'image' not in self.media_types and 'video' not in self.media_types and 'none' not in self.media_types:
return
username = user['username']
if self.include_location:
media_exec = concurrent.futures.ThreadPoolExecutor(max_workers=5)
iter = 0
for item in tqdm.tqdm(self.query_media_gen(user), desc='Searching {0} for posts'.format(username),
unit=' media', disable=self.quiet):
# -Filter command line
if self.filter:
if 'tags' in item:
filtered = any(x in item['tags'] for x in self.filter)
if self.has_selected_media_types(item) and self.is_new_media(item) and filtered:
item['username']=username
future = executor.submit(self.worker_wrapper, self.download, item, dst)
future_to_item[future] = item
else:
# For when filter is on but media doesnt contain tags
pass
# --------------#
else:
if self.has_selected_media_types(item) and self.is_new_media(item):
item['username']=username
future = executor.submit(self.worker_wrapper, self.download, item, dst)
future_to_item[future] = item
if self.include_location:
item['username']=username
media_exec.submit(self.worker_wrapper, self.__get_location, item)
if self.comments:
item['username']=username
item['comments'] = {'data': list(self.query_comments_gen(item['shortcode']))}
if self.media_metadata or self.comments or self.include_location:
item['username']=username
self.posts.append(item)
iter = iter + 1
if self.maximum != 0 and iter >= self.maximum:
break
def get_shared_data(self, username=''):
"""Fetches the user's metadata."""
resp = self.get_json(BASE_URL + username)
if resp is not None and '_sharedData' in resp:
try:
shared_data = resp.split("window._sharedData = ")[1].split(";</script>")[0]
return json.loads(shared_data)
except (TypeError, KeyError, IndexError):
pass
def fetch_stories(self, user_id):
"""Fetches the user's stories."""
resp = self.get_json(STORIES_URL.format(user_id))
if resp is not None:
retval = json.loads(resp)
if retval['data'] and 'reels_media' in retval['data'] and len(retval['data']['reels_media']) > 0 and len(retval['data']['reels_media'][0]['items']) > 0:
return [self.set_story_url(item) for item in retval['data']['reels_media'][0]['items']]
return []
def query_media_gen(self, user, end_cursor=''):
"""Generator for media."""
media, end_cursor = self.__query_media(user['id'], end_cursor)
if media:
try:
while True:
for item in media:
if not self.is_new_media(item):
return
yield item
if end_cursor:
media, end_cursor = self.__query_media(user['id'], end_cursor)
else:
return
except ValueError:
self.logger.exception('Failed to query media for user ' + user['username'])
def __query_media(self, id, end_cursor=''):
params = QUERY_MEDIA_VARS.format(id, end_cursor)
self.update_ig_gis_header(params)
resp = self.get_json(QUERY_MEDIA.format(params))
if resp is not None:
payload = json.loads(resp)['data']['user']
if payload:
container = payload['edge_owner_to_timeline_media']
nodes = self._get_nodes(container)
end_cursor = container['page_info']['end_cursor']
return nodes, end_cursor
return None, None
def get_ig_gis(self, rhx_gis, params):
data = rhx_gis + ":" + params
if sys.version_info.major >= 3:
return hashlib.md5(data.encode('utf-8')).hexdigest()
else:
return hashlib.md5(data).hexdigest()
def update_ig_gis_header(self, params):
self.session.headers.update({
'x-instagram-gis': self.get_ig_gis(
self.rhx_gis,
params
)
})
def has_selected_media_types(self, item):
filetypes = {'jpg': 0, 'mp4': 0}
for url in item['urls']:
ext = self.__get_file_ext(url)
if ext not in filetypes:
filetypes[ext] = 0
filetypes[ext] += 1
if ('image' in self.media_types and filetypes['jpg'] > 0) or \
('video' in self.media_types and filetypes['mp4'] > 0):
return True
return False
def story_has_selected_media_types(self, item):
# media_type 1 is image, 2 is video
if item['__typename'] == 'GraphStoryImage' and 'story-image' in self.media_types:
return True
if item['__typename'] == 'GraphStoryVideo' and 'story-video' in self.media_types:
return True
return False
def extract_tags(self, item):
"""Extracts the hashtags from the caption text."""
caption_text = ''
if 'caption' in item and item['caption']:
if isinstance(item['caption'], dict):
caption_text = item['caption']['text']
else:
caption_text = item['caption']
elif 'edge_media_to_caption' in item and item['edge_media_to_caption'] and item['edge_media_to_caption'][
'edges']:
caption_text = item['edge_media_to_caption']['edges'][0]['node']['text']
if caption_text:
# include words and emojis
item['tags'] = re.findall(
r"(?<!&)#(\w+|(?:[\xA9\xAE\u203C\u2049\u2122\u2139\u2194-\u2199\u21A9\u21AA\u231A\u231B\u2328\u2388\u23CF\u23E9-\u23F3\u23F8-\u23FA\u24C2\u25AA\u25AB\u25B6\u25C0\u25FB-\u25FE\u2600-\u2604\u260E\u2611\u2614\u2615\u2618\u261D\u2620\u2622\u2623\u2626\u262A\u262E\u262F\u2638-\u263A\u2648-\u2653\u2660\u2663\u2665\u2666\u2668\u267B\u267F\u2692-\u2694\u2696\u2697\u2699\u269B\u269C\u26A0\u26A1\u26AA\u26AB\u26B0\u26B1\u26BD\u26BE\u26C4\u26C5\u26C8\u26CE\u26CF\u26D1\u26D3\u26D4\u26E9\u26EA\u26F0-\u26F5\u26F7-\u26FA\u26FD\u2702\u2705\u2708-\u270D\u270F\u2712\u2714\u2716\u271D\u2721\u2728\u2733\u2734\u2744\u2747\u274C\u274E\u2753-\u2755\u2757\u2763\u2764\u2795-\u2797\u27A1\u27B0\u27BF\u2934\u2935\u2B05-\u2B07\u2B1B\u2B1C\u2B50\u2B55\u3030\u303D\u3297\u3299]|\uD83C[\uDC04\uDCCF\uDD70\uDD71\uDD7E\uDD7F\uDD8E\uDD91-\uDD9A\uDE01\uDE02\uDE1A\uDE2F\uDE32-\uDE3A\uDE50\uDE51\uDF00-\uDF21\uDF24-\uDF93\uDF96\uDF97\uDF99-\uDF9B\uDF9E-\uDFF0\uDFF3-\uDFF5\uDFF7-\uDFFF]|\uD83D[\uDC00-\uDCFD\uDCFF-\uDD3D\uDD49-\uDD4E\uDD50-\uDD67\uDD6F\uDD70\uDD73-\uDD79\uDD87\uDD8A-\uDD8D\uDD90\uDD95\uDD96\uDDA5\uDDA8\uDDB1\uDDB2\uDDBC\uDDC2-\uDDC4\uDDD1-\uDDD3\uDDDC-\uDDDE\uDDE1\uDDE3\uDDEF\uDDF3\uDDFA-\uDE4F\uDE80-\uDEC5\uDECB-\uDED0\uDEE0-\uDEE5\uDEE9\uDEEB\uDEEC\uDEF0\uDEF3]|\uD83E[\uDD10-\uDD18\uDD80-\uDD84\uDDC0]|(?:0\u20E3|1\u20E3|2\u20E3|3\u20E3|4\u20E3|5\u20E3|6\u20E3|7\u20E3|8\u20E3|9\u20E3|#\u20E3|\\*\u20E3|\uD83C(?:\uDDE6\uD83C(?:\uDDEB|\uDDFD|\uDDF1|\uDDF8|\uDDE9|\uDDF4|\uDDEE|\uDDF6|\uDDEC|\uDDF7|\uDDF2|\uDDFC|\uDDE8|\uDDFA|\uDDF9|\uDDFF|\uDDEA)|\uDDE7\uD83C(?:\uDDF8|\uDDED|\uDDE9|\uDDE7|\uDDFE|\uDDEA|\uDDFF|\uDDEF|\uDDF2|\uDDF9|\uDDF4|\uDDE6|\uDDFC|\uDDFB|\uDDF7|\uDDF3|\uDDEC|\uDDEB|\uDDEE|\uDDF6|\uDDF1)|\uDDE8\uD83C(?:\uDDF2|\uDDE6|\uDDFB|\uDDEB|\uDDF1|\uDDF3|\uDDFD|\uDDF5|\uDDE8|\uDDF4|\uDDEC|\uDDE9|\uDDF0|\uDDF7|\uDDEE|\uDDFA|\uDDFC|\uDDFE|\uDDFF|\uDDED)|\uDDE9\uD83C(?:\uDDFF|\uDDF0|\uDDEC|\uDDEF|\uDDF2|\uDDF4|\uDDEA)|\uDDEA\uD83C(?:\uDDE6|\uDDE8|\uDDEC|\uDDF7|\uDDEA|\uDDF9|\uDDFA|\uDDF8|\uDDED)|\uDDEB\uD83C(?:\uDDF0|\uDDF4|\uDDEF|\uDDEE|\uDDF7|\uDDF2)|\uDDEC\uD83C(?:\uDDF6|\uDDEB|\uDDE6|\uDDF2|\uDDEA|\uDDED|\uDDEE|\uDDF7|\uDDF1|\uDDE9|\uDDF5|\uDDFA|\uDDF9|\uDDEC|\uDDF3|\uDDFC|\uDDFE|\uDDF8|\uDDE7)|\uDDED\uD83C(?:\uDDF7|\uDDF9|\uDDF2|\uDDF3|\uDDF0|\uDDFA)|\uDDEE\uD83C(?:\uDDF4|\uDDE8|\uDDF8|\uDDF3|\uDDE9|\uDDF7|\uDDF6|\uDDEA|\uDDF2|\uDDF1|\uDDF9)|\uDDEF\uD83C(?:\uDDF2|\uDDF5|\uDDEA|\uDDF4)|\uDDF0\uD83C(?:\uDDED|\uDDFE|\uDDF2|\uDDFF|\uDDEA|\uDDEE|\uDDFC|\uDDEC|\uDDF5|\uDDF7|\uDDF3)|\uDDF1\uD83C(?:\uDDE6|\uDDFB|\uDDE7|\uDDF8|\uDDF7|\uDDFE|\uDDEE|\uDDF9|\uDDFA|\uDDF0|\uDDE8)|\uDDF2\uD83C(?:\uDDF4|\uDDF0|\uDDEC|\uDDFC|\uDDFE|\uDDFB|\uDDF1|\uDDF9|\uDDED|\uDDF6|\uDDF7|\uDDFA|\uDDFD|\uDDE9|\uDDE8|\uDDF3|\uDDEA|\uDDF8|\uDDE6|\uDDFF|\uDDF2|\uDDF5|\uDDEB)|\uDDF3\uD83C(?:\uDDE6|\uDDF7|\uDDF5|\uDDF1|\uDDE8|\uDDFF|\uDDEE|\uDDEA|\uDDEC|\uDDFA|\uDDEB|\uDDF4)|\uDDF4\uD83C\uDDF2|\uDDF5\uD83C(?:\uDDEB|\uDDF0|\uDDFC|\uDDF8|\uDDE6|\uDDEC|\uDDFE|\uDDEA|\uDDED|\uDDF3|\uDDF1|\uDDF9|\uDDF7|\uDDF2)|\uDDF6\uD83C\uDDE6|\uDDF7\uD83C(?:\uDDEA|\uDDF4|\uDDFA|\uDDFC|\uDDF8)|\uDDF8\uD83C(?:\uDDFB|\uDDF2|\uDDF9|\uDDE6|\uDDF3|\uDDE8|\uDDF1|\uDDEC|\uDDFD|\uDDF0|\uDDEE|\uDDE7|\uDDF4|\uDDF8|\uDDED|\uDDE9|\uDDF7|\uDDEF|\uDDFF|\uDDEA|\uDDFE)|\uDDF9\uD83C(?:\uDDE9|\uDDEB|\uDDFC|\uDDEF|\uDDFF|\uDDED|\uDDF1|\uDDEC|\uDDF0|\uDDF4|\uDDF9|\uDDE6|\uDDF3|\uDDF7|\uDDF2|\uDDE8|\uDDFB)|\uDDFA\uD83C(?:\uDDEC|\uDDE6|\uDDF8|\uDDFE|\uDDF2|\uDDFF)|\uDDFB\uD83C(?:\uDDEC|\uDDE8|\uDDEE|\uDDFA|\uDDE6|\uDDEA|\uDDF3)|\uDDFC\uD83C(?:\uDDF8|\uDDEB)|\uDDFD\uD83C\uDDF0|\uDDFE\uD83C(?:\uDDF9|\uDDEA)|\uDDFF\uD83C(?:\uDDE6|\uDDF2|\uDDFC))))[\ufe00-\ufe0f\u200d]?)+",
caption_text, re.UNICODE)
item['tags'] = list(set(item['tags']))
return item
def get_original_image(self, url):
"""Gets the full-size image from the specified url."""
# these path parts somehow prevent us from changing the rest of media url
#url = re.sub(r'/vp/[0-9A-Fa-f]{32}/[0-9A-Fa-f]{8}/', '/', url)
# remove dimensions to get largest image
#url = re.sub(r'/[sp]\d{3,}x\d{3,}/', '/', url)
# get non-square image if one exists
#url = re.sub(r'/c\d{1,}.\d{1,}.\d{1,}.\d{1,}/', '/', url)
return url
def set_story_url(self, item):
"""Sets the story url."""
urls = []
if 'video_resources' in item:
urls.append(item['video_resources'][-1]['src'])
if 'display_resources' in item:
urls.append(item['display_resources'][-1]['src'])
item['urls'] = urls
return item
def download(self, item, save_dir='./'):
"""Downloads the media file."""
for full_url, base_name in self.templatefilename(item):
url = full_url.split('?')[0] #try the static url first, stripping parameters
file_path = os.path.join(save_dir, base_name)
if not os.path.exists(os.path.dirname(file_path)):
self.make_dir(os.path.dirname(file_path))
if not os.path.isfile(file_path):
headers = {'Host': urlparse(url).hostname}
part_file = file_path + '.part'
downloaded = 0
total_length = None
with open(part_file, 'wb') as media_file:
try:
retry = 0
retry_delay = RETRY_DELAY
while(True):
if self.quit:
return
try:
downloaded_before = downloaded
headers['Range'] = 'bytes={0}-'.format(downloaded_before)
with self.session.get(url, cookies=self.cookies, headers=headers, stream=True, timeout=CONNECT_TIMEOUT) as response:
if response.status_code == 404:
#instagram don't lie on this
break
if response.status_code == 403 and url != full_url:
#see issue #254
url = full_url
continue
response.raise_for_status()
if response.status_code == 206:
try:
match = re.match(r'bytes (?P<first>\d+)-(?P<last>\d+)/(?P<size>\d+)', response.headers['Content-Range'])
range_file_position = int(match.group('first'))
if range_file_position != downloaded_before:
raise Exception()
total_length = int(match.group('size'))
media_file.truncate(total_length)
except:
raise requests.exceptions.InvalidHeader('Invalid range response "{0}" for requested "{1}"'.format(
response.headers.get('Content-Range'), headers.get('Range')))
elif response.status_code == 200:
if downloaded_before != 0:
downloaded_before = 0
downloaded = 0
media_file.seek(0)
content_length = response.headers.get('Content-Length')
if content_length is None:
self.logger.warning('No Content-Length in response, the file {0} may be partially downloaded'.format(base_name))
else:
total_length = int(content_length)
media_file.truncate(total_length)
else:
raise PartialContentException('Wrong status code {0}', response.status_code)
for chunk in response.iter_content(chunk_size=64*1024):
if chunk:
downloaded += len(chunk)
media_file.write(chunk)
if self.quit:
return
if downloaded != total_length and total_length is not None:
raise PartialContentException('Got first {0} bytes from {1}'.format(downloaded, total_length))
break
# In case of exception part_file is not removed on purpose,
# it is easier to exemine it later when analising logs.
# Please do not add os.remove here.
except (KeyboardInterrupt):
raise
except (requests.exceptions.RequestException, PartialContentException) as e:
if downloaded - downloaded_before > 0:
# if we got some data on this iteration do not count it as a failure
self.logger.warning('Continue after exception {0} on {1}'.format(repr(e), url))
retry = 0 # the next fail will be first in a row with no data
continue
if retry < MAX_RETRIES:
self.logger.warning('Retry after exception {0} on {1}'.format(repr(e), url))
self.sleep(retry_delay)
retry_delay = min( 2 * retry_delay, MAX_RETRY_DELAY )
retry = retry + 1
continue
else:
keep_trying = self._retry_prompt(url, repr(e))
if keep_trying == True:
retry = 0
continue
elif keep_trying == False:
break
raise
finally:
media_file.truncate(downloaded)
if downloaded == total_length or total_length is None:
os.rename(part_file, file_path)
timestamp = self.__get_timestamp(item)
file_time = int(timestamp if timestamp else time.time())
os.utime(file_path, (file_time, file_time))
def templatefilename(self, item):
for url in item['urls']:
filename, extension = os.path.splitext(os.path.split(url.split('?')[0])[1])
try:
template = self.template
template_values = {
'username' : item['username'],
'urlname': filename,
'shortcode': str(item['shortcode']),
'mediatype' : item['__typename'][5:],
'datetime': time.strftime('%Y%m%d %Hh%Mm%Ss',
time.localtime(self.__get_timestamp(item))),
'date': time.strftime('%Y%m%d', time.localtime(self.__get_timestamp(item))),
'year': time.strftime('%Y', time.localtime(self.__get_timestamp(item))),
'month': time.strftime('%m', time.localtime(self.__get_timestamp(item))),
'day': time.strftime('%d', time.localtime(self.__get_timestamp(item))),
'h': time.strftime('%Hh', time.localtime(self.__get_timestamp(item))),
'm': time.strftime('%Mm', time.localtime(self.__get_timestamp(item))),
's': time.strftime('%Ss', time.localtime(self.__get_timestamp(item)))}
customfilename = str(template.format(**template_values) + extension)
yield url, customfilename
except KeyError:
customfilename = str(filename + extension)
yield url, customfilename
def is_new_media(self, item):
"""Returns True if the media is new."""
if self.latest is False or self.last_scraped_filemtime == 0:
return True
current_timestamp = self.__get_timestamp(item)
return current_timestamp > 0 and current_timestamp > self.last_scraped_filemtime
@staticmethod
def __get_timestamp(item):
if item:
for key in ['taken_at_timestamp', 'created_time', 'taken_at', 'date']:
found = item.get(key, 0)
try:
found = int(found)
if found > 1: # >1 to ignore any boolean casts
return found
except ValueError:
pass
return 0
@staticmethod
def __get_file_ext(url):
return os.path.splitext(urlparse(url).path)[1][1:].strip().lower()
@staticmethod
def __search(query):
resp = requests.get(SEARCH_URL.format(query))
return json.loads(resp.text)
def search_locations(self):
query = ' '.join(self.usernames)
result = self.__search(query)
if len(result['places']) == 0:
raise ValueError("No locations found for query '{0}'".format(query))
sorted_places = sorted(result['places'], key=itemgetter('position'))
for item in sorted_places[0:5]:
place = item['place']
print('location-id: {0}, title: {1}, subtitle: {2}, city: {3}, lat: {4}, lng: {5}'.format(
place['location']['pk'],
place['title'],
place['subtitle'],
place['location']['city'],
place['location']['lat'],
place['location']['lng']
))
@staticmethod
def save_json(data, dst='./'):
"""Saves the data to a json file."""
if not os.path.exists(os.path.dirname(dst)):
os.makedirs(os.path.dirname(dst))
if data:
with open(dst, 'wb') as f:
json.dump(data, codecs.getwriter('utf-8')(f), indent=4, sort_keys=True, ensure_ascii=False)
@staticmethod
def get_logger(level=logging.DEBUG, verbose=0):
"""Returns a logger."""
logger = logging.getLogger(__name__)
fh = logging.FileHandler('instagram-scraper.log', 'w')
fh.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
fh.setLevel(level)
logger.addHandler(fh)
sh = logging.StreamHandler(sys.stdout)
sh.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
sh_lvls = [logging.ERROR, logging.WARNING, logging.INFO]
sh.setLevel(sh_lvls[verbose])
logger.addHandler(sh)
logger.setLevel(level)
return logger
@staticmethod
def parse_file_usernames(usernames_file):
"""Parses a file containing a list of usernames."""
users = []
try:
with open(usernames_file) as user_file:
for line in user_file.readlines():
# Find all usernames delimited by ,; or whitespace
users += re.findall(r'[^,;\s]+', line.split("#")[0])
except IOError as err:
raise ValueError('File not found ' + err)
return users
@staticmethod
def parse_delimited_str(input):
"""Parse the string input as a list of delimited tokens."""
return re.findall(r'[^,;\s]+', input)
def deep_get(self, dict, path):
def _split_indexes(key):
split_array_index = re.compile(r'[.\[\]]+') # ['foo', '0']
return filter(None, split_array_index.split(key))
ends_with_index = re.compile(r'\[(.*?)\]$') # foo[0]
keylist = path.split('.')
val = dict
for key in keylist:
try:
if ends_with_index.search(key):
for prop in _split_indexes(key):
if prop.isdigit():
val = val[int(prop)]
else:
val = val[prop]
else:
val = val[key]
except (KeyError, IndexError, TypeError):
return None
return val
def save_cookies(self):
if self.cookiejar:
with open(self.cookiejar, 'wb') as f:
pickle.dump(self.session.cookies, f)
def main():
parser = argparse.ArgumentParser(
description="instagram-scraper scrapes and downloads an instagram user's photos and videos.",
epilog=textwrap.dedent("""
You can hide your credentials from the history, by reading your
username from a local file:
$ instagram-scraper @insta_args.txt user_to_scrape
with insta_args.txt looking like this:
-u=my_username
-p=my_password
You can add all arguments you want to that file, just remember to have
one argument per line.
Customize filename:
by adding option --template or -T
Default is: {urlname}
And there are some option:
{username}: Instagram user(s) to scrape.
{shortcode}: post shortcode, but profile_pic and story are none.
{urlname}: filename form url.
{mediatype}: type of media.
{datetime}: date and time that photo/video post on,
format is: 20180101 01h01m01s
{date}: date that photo/video post on,
format is: 20180101
{year}: format is: 2018
{month}: format is: 01-12
{day}: format is: 01-31
{h}: hour, format is: 00-23h
{m}: minute, format is 00-59m
{s}: second, format is 00-59s
"""),
formatter_class=argparse.RawDescriptionHelpFormatter,
fromfile_prefix_chars='@')
parser.add_argument('username', help='Instagram user(s) to scrape', nargs='*')
parser.add_argument('--destination', '-d', default='./', help='Download destination')
parser.add_argument('--login-user', '--login_user', '-u', default=None, help='Instagram login user', required=True)
parser.add_argument('--login-pass', '--login_pass', '-p', default=None, help='Instagram login password', required=True)
parser.add_argument('--filename', '-f', help='Path to a file containing a list of users to scrape')
parser.add_argument('--quiet', '-q', default=False, action='store_true', help='Be quiet while scraping')
parser.add_argument('--maximum', '-m', type=int, default=0, help='Maximum number of items to scrape')
parser.add_argument('--retain-username', '--retain_username', '-n', action='store_true', default=False,
help='Creates username subdirectory when destination flag is set')
parser.add_argument('--media-metadata', '--media_metadata', action='store_true', default=False,
help='Save media metadata to json file')
parser.add_argument('--include-location', '--include_location', action='store_true', default=False,
help='Include location data when saving media metadata')
parser.add_argument('--media-types', '--media_types', '-t', nargs='+', default=['image', 'video', 'story'],
help='Specify media types to scrape')
parser.add_argument('--latest', action='store_true', default=False, help='Scrape new media since the last scrape')
parser.add_argument('--latest-stamps', '--latest_stamps', default=None,
help='Scrape new media since timestamps by user in specified file')
parser.add_argument('--cookiejar', '--cookierjar', default=None,
help='File in which to store cookies so that they can be reused between runs.')
parser.add_argument('--tag', action='store_true', default=False, help='Scrape media using a hashtag')
parser.add_argument('--filter', default=None, help='Filter by tags in user posts', nargs='*')
parser.add_argument('--location', action='store_true', default=False, help='Scrape media using a location-id')
parser.add_argument('--search-location', action='store_true', default=False, help='Search for locations by name')
parser.add_argument('--comments', action='store_true', default=False, help='Save post comments to json file')
parser.add_argument('--interactive', '-i', action='store_true', default=False,
help='Enable interactive login challenge solving')
parser.add_argument('--retry-forever', action='store_true', default=False,
help='Retry download attempts endlessly when errors are received')
parser.add_argument('--verbose', '-v', type=int, default=0, help='Logging verbosity level')
parser.add_argument('--template', '-T', type=str, default='{urlname}', help='Customize filename template')
args = parser.parse_args()
if (args.login_user and args.login_pass is None) or (args.login_user is None and args.login_pass):
parser.print_help()
raise ValueError('Must provide login user AND password')
if not args.username and args.filename is None:
parser.print_help()
raise ValueError('Must provide username(s) OR a file containing a list of username(s)')
elif args.username and args.filename:
parser.print_help()
raise ValueError('Must provide only one of the following: username(s) OR a filename containing username(s)')
if args.tag and args.location:
parser.print_help()
raise ValueError('Must provide only one of the following: hashtag OR location')
if args.tag and args.filter:
parser.print_help()
raise ValueError('Filters apply to user posts')
if args.filename:
args.usernames = InstagramScraper.parse_file_usernames(args.filename)
else:
args.usernames = InstagramScraper.parse_delimited_str(','.join(args.username))
if args.media_types and len(args.media_types) == 1 and re.compile(r'[,;\s]+').findall(args.media_types[0]):
args.media_types = InstagramScraper.parse_delimited_str(args.media_types[0])
if args.retry_forever:
global MAX_RETRIES
MAX_RETRIES = sys.maxsize
scraper = InstagramScraper(**vars(args))
scraper.login()
if args.tag:
scraper.scrape_hashtag()
elif args.location:
scraper.scrape_location()
elif args.search_location:
scraper.search_locations()
else:
scraper.scrape()
scraper.save_cookies()
if __name__ == '__main__':
main()
| 46.237288
| 3,655
| 0.557001
|
bca9b415fde2716809938d8ecad1e34de55b983b
| 10,726
|
py
|
Python
|
tests/protocol/mockwiredata.py
|
utkarsh2102/walinuxagent
|
c5f2376a4870962838c388924ae530caee6d6db5
|
[
"Apache-2.0"
] | null | null | null |
tests/protocol/mockwiredata.py
|
utkarsh2102/walinuxagent
|
c5f2376a4870962838c388924ae530caee6d6db5
|
[
"Apache-2.0"
] | null | null | null |
tests/protocol/mockwiredata.py
|
utkarsh2102/walinuxagent
|
c5f2376a4870962838c388924ae530caee6d6db5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import re
from tests.tools import load_bin_data, load_data, MagicMock, Mock
from azurelinuxagent.common.exception import HttpError, ResourceGoneError
from azurelinuxagent.common.future import httpclient
from azurelinuxagent.common.utils.cryptutil import CryptUtil
DATA_FILE = {
"version_info": "wire/version_info.xml",
"goal_state": "wire/goal_state.xml",
"hosting_env": "wire/hosting_env.xml",
"shared_config": "wire/shared_config.xml",
"certs": "wire/certs.xml",
"ext_conf": "wire/ext_conf.xml",
"manifest": "wire/manifest.xml",
"ga_manifest": "wire/ga_manifest.xml",
"trans_prv": "wire/trans_prv",
"trans_cert": "wire/trans_cert",
"test_ext": "ext/sample_ext-1.3.0.zip"
}
DATA_FILE_NO_EXT = DATA_FILE.copy()
DATA_FILE_NO_EXT["goal_state"] = "wire/goal_state_no_ext.xml"
DATA_FILE_EXT_NO_SETTINGS = DATA_FILE.copy()
DATA_FILE_EXT_NO_SETTINGS["ext_conf"] = "wire/ext_conf_no_settings.xml"
DATA_FILE_EXT_NO_PUBLIC = DATA_FILE.copy()
DATA_FILE_EXT_NO_PUBLIC["ext_conf"] = "wire/ext_conf_no_public.xml"
DATA_FILE_EXT_AUTOUPGRADE = DATA_FILE.copy()
DATA_FILE_EXT_AUTOUPGRADE["ext_conf"] = "wire/ext_conf_autoupgrade.xml"
DATA_FILE_EXT_INTERNALVERSION = DATA_FILE.copy()
DATA_FILE_EXT_INTERNALVERSION["ext_conf"] = "wire/ext_conf_internalversion.xml"
DATA_FILE_EXT_AUTOUPGRADE_INTERNALVERSION = DATA_FILE.copy()
DATA_FILE_EXT_AUTOUPGRADE_INTERNALVERSION["ext_conf"] = "wire/ext_conf_autoupgrade_internalversion.xml"
DATA_FILE_EXT_ROLLINGUPGRADE = DATA_FILE.copy()
DATA_FILE_EXT_ROLLINGUPGRADE["ext_conf"] = "wire/ext_conf_upgradeguid.xml"
DATA_FILE_EXT_SEQUENCING = DATA_FILE.copy()
DATA_FILE_EXT_SEQUENCING["ext_conf"] = "wire/ext_conf_sequencing.xml"
DATA_FILE_EXT_DELETION = DATA_FILE.copy()
DATA_FILE_EXT_DELETION["manifest"] = "wire/manifest_deletion.xml"
DATA_FILE_EXT_SINGLE = DATA_FILE.copy()
DATA_FILE_EXT_SINGLE["manifest"] = "wire/manifest_deletion.xml"
DATA_FILE_MULTIPLE_EXT = DATA_FILE.copy()
DATA_FILE_MULTIPLE_EXT["ext_conf"] = "wire/ext_conf_multiple_extensions.xml"
DATA_FILE_NO_CERT_FORMAT = DATA_FILE.copy()
DATA_FILE_NO_CERT_FORMAT["certs"] = "wire/certs_no_format_specified.xml"
DATA_FILE_CERT_FORMAT_NOT_PFX = DATA_FILE.copy()
DATA_FILE_CERT_FORMAT_NOT_PFX["certs"] = "wire/certs_format_not_pfx.xml"
class WireProtocolData(object):
def __init__(self, data_files=DATA_FILE):
self.emulate_stale_goal_state = False
self.call_counts = {
"comp=versions": 0,
"/versions": 0,
"goalstate": 0,
"hostingenvuri": 0,
"sharedconfiguri": 0,
"certificatesuri": 0,
"extensionsconfiguri": 0,
"extensionArtifact": 0,
"manifest.xml": 0,
"manifest_of_ga.xml": 0,
"ExampleHandlerLinux": 0
}
self.data_files = data_files
self.version_info = None
self.goal_state = None
self.hosting_env = None
self.shared_config = None
self.certs = None
self.ext_conf = None
self.manifest = None
self.ga_manifest = None
self.trans_prv = None
self.trans_cert = None
self.ext = None
self.reload()
def reload(self):
self.version_info = load_data(self.data_files.get("version_info"))
self.goal_state = load_data(self.data_files.get("goal_state"))
self.hosting_env = load_data(self.data_files.get("hosting_env"))
self.shared_config = load_data(self.data_files.get("shared_config"))
self.certs = load_data(self.data_files.get("certs"))
self.ext_conf = load_data(self.data_files.get("ext_conf"))
self.manifest = load_data(self.data_files.get("manifest"))
self.ga_manifest = load_data(self.data_files.get("ga_manifest"))
self.trans_prv = load_data(self.data_files.get("trans_prv"))
self.trans_cert = load_data(self.data_files.get("trans_cert"))
self.ext = load_bin_data(self.data_files.get("test_ext"))
def mock_http_get(self, url, *args, **kwargs):
content = None
resp = MagicMock()
resp.status = httpclient.OK
# wire server versions
if "comp=versions" in url:
content = self.version_info
self.call_counts["comp=versions"] += 1
# HostPlugin versions
elif "/versions" in url:
content = '["2015-09-01"]'
self.call_counts["/versions"] += 1
elif "goalstate" in url:
content = self.goal_state
self.call_counts["goalstate"] += 1
elif "hostingenvuri" in url:
content = self.hosting_env
self.call_counts["hostingenvuri"] += 1
elif "sharedconfiguri" in url:
content = self.shared_config
self.call_counts["sharedconfiguri"] += 1
elif "certificatesuri" in url:
content = self.certs
self.call_counts["certificatesuri"] += 1
elif "extensionsconfiguri" in url:
content = self.ext_conf
self.call_counts["extensionsconfiguri"] += 1
else:
# A stale GoalState results in a 400 from the HostPlugin
# for which the HTTP handler in restutil raises ResourceGoneError
if self.emulate_stale_goal_state:
if "extensionArtifact" in url:
self.emulate_stale_goal_state = False
self.call_counts["extensionArtifact"] += 1
raise ResourceGoneError()
else:
raise HttpError()
# For HostPlugin requests, replace the URL with that passed
# via the x-ms-artifact-location header
if "extensionArtifact" in url:
self.call_counts["extensionArtifact"] += 1
if "headers" not in kwargs or \
"x-ms-artifact-location" not in kwargs["headers"]:
raise Exception("Bad HEADERS passed to HostPlugin: {0}",
kwargs)
url = kwargs["headers"]["x-ms-artifact-location"]
if "manifest.xml" in url:
content = self.manifest
self.call_counts["manifest.xml"] += 1
elif "manifest_of_ga.xml" in url:
content = self.ga_manifest
self.call_counts["manifest_of_ga.xml"] += 1
elif "ExampleHandlerLinux" in url:
content = self.ext
self.call_counts["ExampleHandlerLinux"] += 1
resp.read = Mock(return_value=content)
return resp
else:
raise Exception("Bad url {0}".format(url))
resp.read = Mock(return_value=content.encode("utf-8"))
return resp
def mock_crypt_util(self, *args, **kw):
#Partially patch instance method of class CryptUtil
cryptutil = CryptUtil(*args, **kw)
cryptutil.gen_transport_cert = Mock(side_effect=self.mock_gen_trans_cert)
return cryptutil
def mock_gen_trans_cert(self, trans_prv_file, trans_cert_file):
with open(trans_prv_file, 'w+') as prv_file:
prv_file.write(self.trans_prv)
with open(trans_cert_file, 'w+') as cert_file:
cert_file.write(self.trans_cert)
#
# Having trouble reading the regular expressions below? you are not alone!
#
# For the use of "(?<=" "(?=" see 7.2.1 in https://docs.python.org/3.1/library/re.html
# For the use of "\g<1>" see backreferences in https://docs.python.org/3.1/library/re.html#re.sub
#
# Note that these regular expressions are not enough to parse all valid XML documents (e.g. they do
# not account for metacharacters like < or > in the values) but they are good enough for the test
# data. There are some basic checks, but the functions may not match valid XML or produce invalid
# XML if their input is too complex.
#
@staticmethod
def replace_xml_element_value(xml_document, element_name, element_value):
new_xml_document = re.sub(r'(?<=<{0}>).+(?=</{0}>)'.format(element_name), element_value, xml_document)
if new_xml_document == xml_document:
raise Exception("Could not match element '{0}'", element_name)
return new_xml_document
@staticmethod
def replace_xml_attribute_value(xml_document, element_name, attribute_name, attribute_value):
new_xml_document = re.sub(r'(?<=<{0} )(.*{1}=")[^"]+(?="[^>]*>)'.format(element_name, attribute_name), r'\g<1>{0}'.format(attribute_value), xml_document)
if new_xml_document == xml_document:
raise Exception("Could not match attribute '{0}' of element '{1}'", attribute_name, element_name)
return new_xml_document
def set_incarnation(self, incarnation):
'''
Sets the incarnation in the goal state, but not on its subcomponents (e.g. hosting env, shared config)
'''
self.goal_state = WireProtocolData.replace_xml_element_value(self.goal_state, "Incarnation", incarnation)
def set_container_id(self, container_id):
self.goal_state = WireProtocolData.replace_xml_element_value(self.goal_state, "ContainerId", container_id)
def set_role_config_name(self, role_config_name):
self.goal_state = WireProtocolData.replace_xml_element_value(self.goal_state, "ConfigName", role_config_name)
def set_hosting_env_deployment_name(self, deployment_name):
self.hosting_env = WireProtocolData.replace_xml_attribute_value(self.hosting_env, "Deployment", "name", deployment_name)
def set_shared_config_deployment_name(self, deployment_name):
self.shared_config = WireProtocolData.replace_xml_attribute_value(self.shared_config, "Deployment", "name", deployment_name)
def set_extensions_config_sequence_number(self, sequence_number):
'''
Sets the sequence number for *all* extensions
'''
self.ext_conf = WireProtocolData.replace_xml_attribute_value(self.ext_conf, "RuntimeSettings", "seqNo", sequence_number)
| 42.563492
| 161
| 0.669215
|
dd45d0238687bec99b95607bc5441c9efe93c7f0
| 3,195
|
py
|
Python
|
calcium_bflow_analysis/trace_converter.py
|
HagaiHargil/python-ca-analysis-bloodflow
|
87fcca6fd79f65122b4010d2225d10403450da7e
|
[
"Apache-2.0"
] | null | null | null |
calcium_bflow_analysis/trace_converter.py
|
HagaiHargil/python-ca-analysis-bloodflow
|
87fcca6fd79f65122b4010d2225d10403450da7e
|
[
"Apache-2.0"
] | 2
|
2021-07-19T11:38:59.000Z
|
2021-07-19T11:40:03.000Z
|
calcium_bflow_analysis/trace_converter.py
|
PBLab/ca-analysis-bloodflow
|
87fcca6fd79f65122b4010d2225d10403450da7e
|
[
"Apache-2.0"
] | null | null | null |
import attr
from attr.validators import instance_of
import numpy as np
import enum
from scipy.stats import mode
class ConversionMethod(enum.Enum):
"""
Types of conversion
"""
RAW = 1
DFF = 2
RAW_SUBTRACT = 3
NONE = 4
@attr.s(slots=True)
class RawTraceConverter:
"""
Covnert a raw fluorescence trace into something more useful
"""
conversion_method = attr.ib(validator=instance_of(ConversionMethod))
raw_data = attr.ib(validator=instance_of(np.ndarray))
data_before_offset = attr.ib(init=False)
converted_data = attr.ib(init=False)
num_of_rois = attr.ib(init=False)
num_of_slices = attr.ib(init=False)
def convert(self) -> np.ndarray:
"""
Main conversion method
:return np.ndarray: Dimensions neurons * time
"""
self.__set_params()
if self.conversion_method is ConversionMethod.RAW:
self.__convert_raw()
elif self.conversion_method is ConversionMethod.RAW_SUBTRACT:
self.__convert_raw_subtract()
elif self.conversion_method is ConversionMethod.DFF:
self.__convert_dff()
elif self.conversion_method is ConversionMethod.NONE:
self.__convert_none()
self.__add_offset()
return self.converted_data
def __set_params(self):
self.num_of_rois = self.raw_data.shape[0]
self.num_of_slices = self.raw_data.shape[1]
def __convert_raw(self):
"""
Change the raw trace to a normalized raw trace.
:return: None
"""
maxes = np.max(self.raw_data, axis=1).reshape((self.num_of_rois, 1))
maxes = np.tile(maxes, self.num_of_slices)
self.data_before_offset = self.raw_data / maxes
def __convert_raw_subtract(self):
"""
Subtract the minimal value from the stack and then normalize it.
:return: None
"""
mins = np.min(self.raw_data, axis=1).reshape((self.num_of_rois, 1))
mins = np.tile(mins, self.num_of_slices)
zeroed_data = self.raw_data - mins
maxes = np.max(zeroed_data, axis=1).reshape((self.num_of_rois, 1))
maxes = np.tile(maxes, self.num_of_slices)
self.data_before_offset = zeroed_data / maxes
def __convert_dff(self):
"""
Subtract the minimal value and divide by the mode to receive a DF/F estimate.
:return: None
"""
mins = np.min(self.raw_data, axis=1).reshape((self.num_of_rois, 1))
mins = np.tile(mins, self.num_of_slices)
zeroed_trace = self.raw_data - mins + 1
mods = mode(zeroed_trace, axis=1)[0].reshape((self.num_of_rois, 1))
mods = np.tile(mods, self.num_of_slices)
self.data_before_offset = (self.raw_data-mods) / mods
def __convert_none(self):
self.data_before_offset = self.raw_data / 4
def __add_offset(self):
"""
For easier visualization, add an offset to each trace
:return:
"""
offsets = np.arange(self.num_of_rois).reshape((self.num_of_rois, 1))
offsets = np.tile(offsets, self.num_of_slices)
self.converted_data = self.data_before_offset + offsets
| 31.019417
| 85
| 0.643818
|
3bbc0d07d856b60494b7c1ee20c7f6f1d746166e
| 819
|
py
|
Python
|
utils/load_externals.py
|
tsm55555/FeatureSqueezing
|
e353f433540cf2a549b5ce73d1c5ab6e11a6dc66
|
[
"MIT"
] | null | null | null |
utils/load_externals.py
|
tsm55555/FeatureSqueezing
|
e353f433540cf2a549b5ce73d1c5ab6e11a6dc66
|
[
"MIT"
] | null | null | null |
utils/load_externals.py
|
tsm55555/FeatureSqueezing
|
e353f433540cf2a549b5ce73d1c5ab6e11a6dc66
|
[
"MIT"
] | null | null | null |
import sys, os
external_libs = {'Cleverhans v1.0.0': "externals/cleverhans",
'Tensorflow-Model-Resnet': "externals/tensorflow-models",
}
project_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
for lib_name, lib_path in external_libs.items():
lib_path = os.path.join(project_path, lib_path)
if os.listdir(lib_path) == []:
cmd = "git submodule update --init --recursive"
print("Fetching external libraries...")
os.system(cmd)
if lib_name == 'Tensorflow-Model-Resnet':
lib_token_fpath = os.path.join(lib_path, 'resnet', '__init__.py')
if not os.path.isfile(lib_token_fpath):
open(lib_token_fpath, 'a').close()
sys.path.append(lib_path)
print("Located %s" % lib_name)
# print (sys.path)
| 32.76
| 75
| 0.641026
|
fdc3932d49886fd9bf51b8ab6d488020bb65fd84
| 40,210
|
py
|
Python
|
tensorflow/python/eager/core_test.py
|
18jeffreyma/tensorflow
|
421453ee0c7471af40bbaf254ecf91d6a3a320cf
|
[
"Apache-2.0"
] | 27
|
2019-01-02T09:36:57.000Z
|
2022-02-21T06:41:51.000Z
|
tensorflow/python/eager/core_test.py
|
18jeffreyma/tensorflow
|
421453ee0c7471af40bbaf254ecf91d6a3a320cf
|
[
"Apache-2.0"
] | 3
|
2019-01-23T11:01:22.000Z
|
2022-02-24T02:53:31.000Z
|
tensorflow/python/eager/core_test.py
|
18jeffreyma/tensorflow
|
421453ee0c7471af40bbaf254ecf91d6a3a320cf
|
[
"Apache-2.0"
] | 11
|
2019-03-02T12:42:23.000Z
|
2021-02-04T12:20:10.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for core."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import pickle
import threading
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tfe
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import def_function
from tensorflow.python.eager import execute as execute_lib
from tensorflow.python.eager import executor
from tensorflow.python.eager import test
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import variables
def execute(op_name, num_outputs, inputs, attrs=None):
return execute_lib.execute(
op_name, num_outputs, inputs, attrs, context.context())
def truncated_normal(shape):
return execute(
b'TruncatedNormal',
1,
inputs=[shape],
attrs=('dtype', dtypes.float32.as_datatype_enum, 'T',
shape.dtype.as_datatype_enum, 'seed', 0, 'seed2', 0))[0]
def current_device():
return array_ops.identity(1.).device
def configure_virtual_cpus():
cpus = config.list_physical_devices('CPU')
# Set 2 virtual CPUs
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
class TFETest(test_util.TensorFlowTestCase):
def setUp(self):
super(TFETest, self).setUp()
context._reset_context()
configure_virtual_cpus()
def _test_hashable(self, a, b, hashable):
if hashable:
self.assertIsInstance(b, collections.Hashable)
self.assertLen(set([a, b]), 2)
else:
# TODO(gjn): Figure out how to make this work for tf.Tensor
# self.assertNotIsInstance(b, collections.Hashable)
with self.assertRaisesRegex(TypeError, 'unhashable'):
set([a, b])
def testEquality(self):
default = ops.Tensor._USE_EQUALITY
try:
def _v1_check(a, b):
self.assertEqual(a, a)
self.assertIs(a, a)
self.assertNotEqual(a, 1.0)
self.assertIsNot(a, 1.0)
self.assertNotEqual(a, b)
self.assertIsNot(a, b)
def _v2_check(a, b):
self.assertEqual(a, a)
self.assertIs(a, a)
self.assertEqual(a, 1.0)
self.assertIsNot(a, 1.0)
self.assertEqual(a, b)
self.assertIsNot(a, b)
constant_a = constant_op.constant(1.0)
constant_b = constant_op.constant(1.0)
ops.disable_tensor_equality()
self._test_hashable(constant_a, constant_b, True)
_v1_check(constant_a, constant_b)
ops.enable_tensor_equality()
_v2_check(constant_a, constant_b)
self._test_hashable(constant_a, constant_b, False)
variable_a = variables.Variable(1.0)
variable_b = variables.Variable(1.0)
ops.disable_tensor_equality()
_v1_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, True)
ops.enable_tensor_equality()
_v2_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, False)
# We only test numpy behaviour in v2 mode since we'd like to match that.
numpy_a = np.array(1.0)
numpy_b = np.array(1.0)
_v2_check(numpy_a, numpy_b)
self._test_hashable(numpy_a, numpy_b, False)
finally:
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
def testEqualityNan(self):
default = ops.Tensor._USE_EQUALITY
try:
def _v1_check(a, b):
self.assertEqual(a, a)
self.assertIs(a, a)
self.assertNotEqual(a, float('nan'))
self.assertIsNot(a, float('nan'))
self.assertNotEqual(a, b)
self.assertIsNot(a, b)
def _v2_check(a, b):
self.assertNotEqual(a, a)
self.assertIs(a, a)
self.assertNotEqual(a, float('nan'))
self.assertIsNot(a, float('nan'))
self.assertNotEqual(a, b)
self.assertIsNot(a, b)
constant_a = constant_op.constant(float('nan'))
constant_b = constant_op.constant(float('nan'))
ops.disable_tensor_equality()
self._test_hashable(constant_a, constant_b, True)
_v1_check(constant_a, constant_b)
ops.enable_tensor_equality()
_v2_check(constant_a, constant_b)
self._test_hashable(constant_a, constant_b, False)
variable_a = variables.Variable(float('nan'))
variable_b = variables.Variable(float('nan'))
ops.disable_tensor_equality()
_v1_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, True)
ops.enable_tensor_equality()
_v2_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, False)
numpy_a = np.array(float('nan'))
numpy_b = np.array(float('nan'))
_v2_check(numpy_a, numpy_b)
self._test_hashable(numpy_a, numpy_b, False)
finally:
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
def testEqualityCompare(self):
default = ops.Tensor._USE_EQUALITY
try:
tf_a = constant_op.constant([1, 2])
tf_b = constant_op.constant([1, 2])
tf_c = constant_op.constant([1, 1])
np_a = np.array([1, 2])
np_b = np.array([1, 2])
np_c = np.array([1, 1])
ops.disable_tensor_equality()
# We don't do element-wise comparison
self.assertNotEqual(tf_a, tf_b)
self.assertNotEqual(tf_a, tf_c)
# We can compare list of tensors
self.assertEqual([tf_a, tf_b], [tf_a, tf_b])
self.assertNotEqual([tf_a, tf_b], [tf_b, tf_b])
# We can compare existence in a list
self.assertIn(tf_a, [tf_a, tf_b])
self.assertIn(tf_a, [tf_b, tf_a])
self.assertNotIn(tf_a, [tf_b, tf_c])
ops.enable_tensor_equality()
# We do element-wise comparison but can't convert results array to bool
with self.assertRaises(ValueError):
bool(tf_a == tf_b)
self.assertAllEqual(tf_a == tf_b, [True, True])
with self.assertRaises(ValueError):
bool(tf_a == tf_c)
self.assertAllEqual(tf_a == tf_c, [True, False])
self.assertNotAllEqual(tf_a, tf_c)
with self.assertRaises(ValueError):
bool(np_a == np_b)
self.assertAllEqual(np_a == np_b, [True, True])
with self.assertRaises(ValueError):
bool(np_a == np_c)
self.assertAllEqual(np_a == np_c, [True, False])
self.assertNotAllEqual(np_a, np_c)
# Warning even though we technically shouldn't be able to compare here,
# since the id is the same both TF & numpy will handle lists with the same
# value without raising an error
self.assertEqual([tf_a, tf_b], [tf_a, tf_b])
with self.assertRaises(ValueError):
bool([tf_a, tf_b] == [tf_b, tf_b])
self.assertEqual([np_a, np_b], [np_a, np_b])
with self.assertRaises(ValueError):
bool([np_a, np_b] == [np_b, np_b])
# Similar to lists we shouldn't be able to do a `in` check such as
# `if a in [a,b]`. However if `a` is the first element, it works due to
# short circuiting
self.assertIn(tf_a, [tf_a, tf_b])
with self.assertRaises(ValueError):
bool(tf_a in [tf_b, tf_a])
with self.assertRaises(ValueError):
bool(tf_a in [tf_b, tf_c])
self.assertIn(np_a, [np_a, np_b])
with self.assertRaises(ValueError):
bool(np_a in [np_b, np_a])
with self.assertRaises(ValueError):
bool(np_a in [np_b, np_c])
# rank 0
self.assertAllEqual(
constant_op.constant(1) == constant_op.constant(1), True)
self.assertAllEqual(
constant_op.constant(1) == constant_op.constant(2), False)
self.assertAllEqual(np.array(1) == np.array(1), True)
self.assertAllEqual(np.array(1) == np.array(2), False)
finally:
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
def testEqualityBroadcast(self):
default = ops.Tensor._USE_EQUALITY
try:
tf_a = constant_op.constant([1, 1])
tf_b = constant_op.constant([1, 1])
tf_c = constant_op.constant([[1, 1], [1, 1]])
tf_d = constant_op.constant([[1, 2], [1, 2]])
tf_e = constant_op.constant([1, 1, 1])
np_a = np.array([1, 1])
np_b = np.array([1, 1])
np_c = np.array([[1, 1], [1, 1]])
np_d = np.array([[1, 2], [1, 2]])
np_e = np.array([1, 1, 1])
ops.disable_tensor_equality()
# We don't do element-wise comparison
self.assertNotEqual(tf_a, tf_b)
self.assertNotEqual(tf_a, tf_c)
self.assertNotEqual(tf_a, tf_d)
ops.enable_tensor_equality()
# We do element-wise comparison but can't convert results array to bool
with self.assertRaises(ValueError):
bool(tf_a == tf_b)
self.assertAllEqual(tf_a == tf_b, [True, True])
with self.assertRaises(ValueError):
bool(tf_a == tf_c)
self.assertAllEqual(tf_a == tf_c, [[True, True], [True, True]])
with self.assertRaises(ValueError):
bool(tf_a == tf_d)
self.assertAllEqual(tf_a == tf_d, [[True, False], [True, False]])
self.assertFalse(bool(tf_a == tf_e))
self.assertTrue(bool(tf_a != tf_e))
self.assertNotAllEqual(tf_a, tf_e)
with self.assertRaises(ValueError):
bool(np_a == np_b)
self.assertAllEqual(np_a == np_b, [True, True])
with self.assertRaises(ValueError):
bool(np_a == np_c)
self.assertAllEqual(np_a == np_c, [[True, True], [True, True]])
self.assertAllEqual(np_a == np_d, [[True, False], [True, False]])
self.assertFalse(bool(np_a == np_e))
self.assertTrue(bool(np_a != np_e))
self.assertNotAllEqual(np_a, np_e)
finally:
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
@test_util.disable_tfrt('Async execution mode not supported in TFRT.')
def testContext(self):
ctx = context.Context()
self.assertTrue(ctx.executing_eagerly())
self.assertEqual('', ctx.scope_name)
ctx.scope_name = 'foo'
self.assertEqual('foo', ctx.scope_name)
self.assertEqual(context.SYNC, ctx.execution_mode)
ctx.execution_mode = context.ASYNC
self.assertEqual(context.ASYNC, ctx.execution_mode)
ctx.execution_mode = context.SYNC
self.assertEqual(context.SYNC, ctx.execution_mode)
self.assertEqual('', ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device('GPU:0'):
self.assertEqual('/job:localhost/replica:0/task:0/device:GPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device(None):
self.assertEqual('', ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device('CPU:0'):
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device(ctx.list_logical_devices('CPU')[0]):
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
gpus = ctx.list_logical_devices('GPU')
if gpus:
with ctx.device(gpus[0]):
self.assertEqual('/job:localhost/replica:0/task:0/device:GPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
has_cpu_device = False
for x in ctx.devices():
has_cpu_device = has_cpu_device or 'CPU' in x
self.assertTrue(has_cpu_device)
del ctx
def testDevice_supportsLogicalDevice(self):
ctx = context.Context()
cpus = ctx.list_logical_devices('CPU')
with ctx.device(cpus[0]):
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
ctx.device_name)
def testDevice_supportsDeviceSpec(self):
ctx = context.Context()
device_name = '/job:localhost/replica:0/task:0/device:CPU:0'
device_spec = pydev.DeviceSpec.from_string(device_name)
with ctx.device(device_spec):
self.assertEqual(device_name, ctx.device_name)
@test_util.disable_tfrt('Async execution mode not supported in TFRT.')
def testAsyncBasic(self):
ctx = context.Context(execution_mode=context.ASYNC)
ctx.ensure_initialized()
has_cpu_device = False
for x in ctx.devices():
has_cpu_device = has_cpu_device or 'CPU' in x
self.assertTrue(has_cpu_device)
del ctx
@test_util.disable_tfrt('Multi CPU placement not supported yet.')
def testMultiCpuPlacement(self):
with ops.device('cpu:1'):
x = array_ops.identity(1.0)
with ops.device('cpu:0'):
y = array_ops.identity(x)
self.assertEqual(x.device, '/job:localhost/replica:0/task:0/device:CPU:1')
self.assertEqual(y.device, '/job:localhost/replica:0/task:0/device:CPU:0')
@test_util.run_gpu_only
def testShouldCopy(self):
with ops.device('GPU:0'):
x = array_ops.identity(1.0)
self.assertEndsWith(x.device, 'GPU:0')
y = array_ops.identity(x)
# The value we're testing y.device against will depend on what the behavior
# of not explicitly specifying a device in the context is. This behavior is
# subject to change (for example, in the future we may want to use GPUs, if
# available, when no device is explicitly provided)
self.assertEqual(y.device, current_device())
def testContextSwitchStackContainsEagerMode(self):
# Eager execution has been enabled, and no other context switch has
# occurred, so `context_switches` should contain exactly one entry.
self.assertEqual(len(context.context().context_switches.stack), 1)
switch = context.context().context_switches.stack[0]
# The entry should log that eager mode was entered.
self.assertIs(switch.enter_context_fn, context.eager_mode)
# It is not possible to build a graph function when eager execution
# is enabled; the stack entry should reflect this fact.
self.assertFalse(switch.is_building_function)
@test_util.run_gpu_only
def testInt32GPU(self):
with ops.device('gpu:0'):
xent = nn_ops.sparse_softmax_cross_entropy_with_logits(
logits=[[0.0, 0.0]], labels=[0])
self.assertAllClose(xent, [0.69314718])
def _runInThread(self, target, args):
t = threading.Thread(target=target, args=args)
try:
t.start()
t.join()
except Exception as e:
raise e
# Test that different thread local values are initialized to the same values
# in different threads.
def testContextThreadLocalMembers(self):
def get_context_values(ctx):
return [
ctx.executing_eagerly(),
ctx.scope_name,
ctx.device_name,
ctx.num_gpus()
]
def get_values(ctx, values):
values.extend(get_context_values(ctx))
context_values = []
ctx = context.Context()
self._runInThread(get_values, (ctx, context_values))
self.assertAllEqual(context_values, get_context_values(ctx))
@test_util.run_gpu_only
@test_util.disable_tfrt('Context config not supported in TFRT.')
def testContextConfig(self):
ctx = context.Context(config=config_pb2.ConfigProto(
device_count={'GPU': 0}))
self.assertEqual(0, ctx.num_gpus())
def testPickle(self):
tmp_dir = self.get_temp_dir()
fname = os.path.join(tmp_dir, 't.pickle')
with open(fname, 'wb') as f:
t = constant_op.constant(10.0)
pickle.dump(t, f)
with open(fname, 'rb') as f:
t = pickle.load(f)
self.assertAllEqual(t.numpy(), 10.0)
@test_util.run_gpu_only
def testDevicePlacementEnforcesConsistency(self):
cpu = context.device('cpu:0')
gpu = context.device('gpu:0')
cpu.__enter__()
self.assertEndsWith(current_device(), 'CPU:0')
gpu.__enter__()
self.assertEndsWith(current_device(), 'GPU:0')
with self.assertRaisesRegex(
RuntimeError, 'Exiting device scope without proper scope nesting'):
cpu.__exit__()
self.assertEndsWith(current_device(), 'GPU:0')
gpu.__exit__()
self.assertEndsWith(current_device(), 'CPU:0')
cpu.__exit__()
@test_util.run_gpu_only
@test_util.disable_tfrt('Device name incorrect (known issue for runtime '
'fallback).')
def testReEntrant(self):
cpu = context.device('cpu:0')
gpu = context.device('gpu:0')
with cpu:
with gpu:
with gpu:
self.assertEndsWith(current_device(), 'GPU:0')
self.assertEndsWith(current_device(), 'GPU:0')
self.assertEndsWith(current_device(), 'CPU:0')
with gpu:
self.assertEndsWith(current_device(), 'GPU:0')
@test_util.run_gpu_only
def testTensorPlacement(self):
x = constant_op.constant(1.).gpu()
with context.device('gpu:0'):
y = constant_op.constant(2.)
# Add would fail if t2 were not on GPU
result = execute(
b'Add', 1, inputs=[x, y],
attrs=('T', x.dtype.as_datatype_enum))[0].cpu().numpy()
self.assertEqual(3, result)
@test_util.run_gpu_only
def testResourceTensorPlacement(self):
with context.device('gpu:0'):
v = resource_variable_ops.ResourceVariable(1.0)
with context.device('cpu:0'):
# Check that even though we specified the cpu device we'll run the read op
# in the device where the handle is.
self.assertAllEqual(
gen_resource_variable_ops.read_variable_op(v.handle, v.dtype), 1.0)
@test_util.run_gpu_only
def testCopyBetweenDevices(self):
x = constant_op.constant([[1., 2.], [3., 4.]])
x = x.cpu()
x = x.gpu()
x = x.gpu()
x = x.cpu()
# Invalid device
with self.assertRaises(RuntimeError):
x.gpu(context.context().num_gpus() + 1)
@test_util.run_gpu_only
@test_util.disable_tfrt('Async execution mode not supported in TFRT.')
def testCopyBetweenDevicesAsync(self):
with context.execution_mode(context.ASYNC):
x = constant_op.constant([[1., 2.], [3., 4.]])
x = x.cpu()
x = x.gpu()
x = x.gpu()
x = x.cpu()
context.context().executor.wait()
# Invalid device
with self.assertRaises(RuntimeError):
x.gpu(context.context().num_gpus() + 1)
context.context().executor.wait()
context.context().executor.clear_error()
@test_util.run_gpu_only
@test_util.disable_tfrt('Device placement policy not configurable yet.')
def testCopyScope(self):
constant = constant_op.constant(1.0)
with ops.device('gpu:0'):
with context.device_policy(context.DEVICE_PLACEMENT_SILENT):
c = constant + 1.0
self.assertAllEqual(c, 2.0)
@test_util.disable_tfrt('ContextFromInterface not implemented.')
def testPyFunctionNullContext(self):
def simple_fn(unused_handle):
return 1.
with ops.device('CPU:0'):
test_var = variables.Variable([2., 3.])
@def_function.function
def test_fn(v):
script_ops.eager_py_func(simple_fn, [v.handle], dtypes.float32)
return 1.
self.assertAllEqual(test_fn(test_var), 1.0)
@test_util.disable_tfrt('Async execution mode not supported in TFRT.')
def testPyFunctionAsync(self):
def simple_fn(v):
one = constant_op.constant(1.)
return v + one
@def_function.function
def test_fn(v):
return script_ops.eager_py_func(simple_fn, [v], dtypes.float32)
async_executor = executor.new_executor(enable_async=True)
with context.executor_scope(async_executor):
test_var = variables.Variable(2.)
self.assertAllEqual(test_fn(test_var), 3.0)
async_executor.wait()
@test_util.run_gpu_only
def testNumpyForceCPU(self):
cpu = constant_op.constant([[1., 2.], [3., 4.]])
c2g = cpu.gpu()
self.assertAllEqual(c2g, cpu.numpy())
def testCopyFromCPUToCPU(self):
ta = constant_op.constant([[1, 2], [3, 4]])
tb = ta.cpu()
self.assertNotEqual(id(ta), id(tb))
self.assertAllEqual(ta, tb.numpy())
def testRegisterExceptionClass(self):
with self.assertRaises(TypeError):
pywrap_tfe.TFE_Py_RegisterExceptionClass(str)
pywrap_tfe.TFE_Py_RegisterExceptionClass(core._NotOkStatusException) # pylint: disable=protected-access
# TODO(agarwal): add tests passing incorrect typed values to attrs.
def testExecuteBasic(self):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = execute(
b'Mul',
num_outputs=1,
inputs=[three, five],
attrs=('T', three.dtype.as_datatype_enum))[0]
self.assertAllEqual(15, product)
@test_util.disable_tfrt('Async execution mode not supported in TFRT.')
def testExecuteBasicAsync(self):
with context.execution_mode(context.ASYNC):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = execute(
b'Mul',
num_outputs=1,
inputs=[three, five],
attrs=('T', three.dtype.as_datatype_enum))[0]
self.assertAllEqual(15, product)
# Error: Invalid arguments
# TODO(b/149995282): When an exception is thrown in ASYNC mode, it seems
# there are things left over that cause mutex corruption when
# _reset_context() is called before the next test is executed.
#
# context.set_execution_mode(context.ASYNC)
# with self.assertRaises(errors.InvalidArgumentError):
# execute(
# b'MatMul',
# num_outputs=1,
# inputs=[three, five],
# attrs=('transpose_a', False, 'transpose_b', False, 'T',
# three.dtype.as_datatype_enum))
# context.context().executor.wait()
#
context.context().executor.clear_error()
context.context().execution_mode = context.SYNC
@test_util.disable_tfrt('TFRT asserts correct number of outputs instead of '
'returning error status.')
def testExecuteTooManyNumOutputs(self):
# num_outputs provided is 50, but only one output is produced.
product = execute(
b'Mul',
num_outputs=50,
inputs=[constant_op.constant(3),
constant_op.constant(5)],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
self.assertAllEqual(15, product)
@test_util.disable_tfrt('TFRT asserts correct number of outputs instead of '
'returning error status.')
def testExecuteTooFewNumOutputs(self):
# num_outputs provided is 0, but one output is produced.
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'Mul',
num_outputs=0,
inputs=[constant_op.constant(3),
constant_op.constant(5)],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
@test_util.run_gpu_only
def testMatMulGPU(self):
three = constant_op.constant([[3.]]).gpu()
five = constant_op.constant([[5.]]).gpu()
product = execute(
b'MatMul',
num_outputs=1,
inputs=[three, five],
attrs=('transpose_a', False, 'transpose_b', False, 'T',
three.dtype.as_datatype_enum))[0]
self.assertAllEqual([[15.0]], product)
@test_util.run_gpu_only
def testMatMulGPUCopyToCPU(self):
three = constant_op.constant([[3.]]).gpu()
five = constant_op.constant([[5.]]).gpu()
with ops.device('CPU:0'):
product = execute(
b'MatMul',
num_outputs=1,
inputs=[three, five],
attrs=('transpose_a', False, 'transpose_b', False, 'T',
three.dtype.as_datatype_enum))[0]
self.assertAllEqual([[15.0]], product)
def testExecuteStringAttr(self):
checked_three = execute(
b'CheckNumerics',
num_outputs=1,
inputs=[constant_op.constant(3.)],
attrs=('message', 'just checking', 'T',
dtypes.float32.as_datatype_enum))[0]
self.assertEqual([[3]], checked_three.numpy())
def testExecuteStringAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'CheckNumerics',
num_outputs=1,
inputs=[constant_op.constant(3.)],
attrs=('message', 1, 'T', dtypes.float32.as_datatype_enum))
def testExecuteFloatAttr(self):
almost_equal = execute(
b'ApproximateEqual',
num_outputs=1,
inputs=[constant_op.constant(3.0), constant_op.constant(2.9)],
attrs=('tolerance', 0.3, 'T', dtypes.float32.as_datatype_enum))[0]
self.assertTrue(almost_equal)
def testExecuteFloatAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'ApproximateEqual',
num_outputs=1,
inputs=[constant_op.constant(3.0), constant_op.constant(2.9)],
attrs=('tolerance', '0.3', 'T', dtypes.float32.as_datatype_enum))
def testExecuteIntAttr(self):
total = execute(
b'AddN',
num_outputs=1,
inputs=[constant_op.constant(3), constant_op.constant(4)],
attrs=('T', dtypes.int32.as_datatype_enum, 'N', 2))[0]
self.assertAllEqual(7, total)
def testExecuteIntAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'AddN',
num_outputs=1,
inputs=[constant_op.constant(3), constant_op.constant(4)],
attrs=('T', dtypes.int32.as_datatype_enum, 'N', '2'))
# Looks like we don't have an existing op with list(bool) attrs.
def testExecuteBoolAttr(self):
product = execute(
b'MatMul',
num_outputs=1,
inputs=[constant_op.constant([[3.]]),
constant_op.constant([[5.]])],
attrs=('transpose_a', True, 'transpose_b', False, 'T',
dtypes.int32.as_datatype_enum))[0]
self.assertAllEqual([[15]], product)
def testExecuteShapeAttr(self):
execute(
b'VarHandleOp',
num_outputs=1,
inputs=[],
attrs=('shape', [1, 2], 'dtype', dtypes.int32.as_datatype_enum,
'container', '', 'shared_name', ''))
def testExecuteShapeAttrBadValue(self):
with self.assertRaisesRegex(
errors.InvalidArgumentError,
'Expecting a Dimension for attr shape, got object'):
execute(
b'VarHandleOp',
num_outputs=1,
inputs=[],
attrs=('shape', [object()], 'dtype', dtypes.int32.as_datatype_enum,
'container', '', 'shared_name', ''))
def testExecuteListStringAttr(self):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description',
'tensor_summary', 'labels', ['3',
'summary'], 'display_name', 'test'))
def testExecuteListStringAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description', '',
'labels', 3, 'display_name', 'test'))
def testExecuteListStringAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description', '',
'labels', [3], 'display_name', 'test'))
def testExecuteListFloatAttr(self):
b = execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', [4.0,
6.0]))[0]
self.assertAllEqual([0, 1, 2], b)
def testExecuteListFloatAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', 4.0))
def testExecuteListFloatAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries',
['4.0', '6.0']))
def testExecuteListIntAttr(self):
b = execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', [0, 2]))[0]
self.assertAllEqual([3], b)
def testExecuteListIntAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', 0))
def testExecuteListIntAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims',
['0', '2']))
def testExecuteListTypeListShapeAttr(self):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[[1, 2]], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListTypeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', dtypes.float64.as_datatype_enum, 'shapes',
[[1, 2]], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListTypeAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', '1', 'shapes', [[1, 2]], 'capacity', -1,
'container', '', 'shared_name', ''))
def testExecuteListShapeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[1, 2], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListShapeAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[1], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteMultipleOutputs(self):
split_dim = 1
value = [[0, 1, 2], [3, 4, 5]]
x1, x2, x3 = execute(
b'Split',
num_outputs=3,
inputs=[constant_op.constant(split_dim),
constant_op.constant(value)],
attrs=('num_split', 3, 'T', dtypes.int32.as_datatype_enum))
self.assertAllEqual([[0], [3]], x1)
self.assertAllEqual([[1], [4]], x2)
self.assertAllEqual([[2], [5]], x3)
def testExecuteBadNumOutputsArgument(self):
with self.assertRaises(TypeError):
execute(
b'Relu', [],
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum))
@test_util.disable_tfrt('TFRT raises InternalError instead of NotFoundError')
def testExecuteUnknownOp(self):
with self.assertRaises(errors.NotFoundError):
execute(b'BlahBlahBlah', num_outputs=1, inputs=[], attrs=None)
def testExecuteUnknownAttr(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Identity',
num_outputs=1,
inputs=[constant_op.constant(3)],
attrs=('T', dtypes.int32.as_datatype_enum, 'unknown_attr', 'blah'))
def testComposition(self):
def add(x, y):
return execute(
b'Add',
num_outputs=1,
inputs=[x, y],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
x = constant_op.constant(1)
three_x = add(add(x, x), x)
self.assertEqual(dtypes.int32, three_x.dtype)
self.assertAllEqual(3, three_x)
@test_util.run_gpu_only
def testOperationWithNoInputsRunsOnDevice(self):
shape = constant_op.constant([], dtype=dtypes.int32)
# x: Run the "TruncatedNormal" op CPU and copy result to GPU.
x = truncated_normal(shape).gpu()
# y: Explicitly run the "TruncatedNormal" op on GPU.
with context.device('gpu:0'):
y = truncated_normal(shape)
# Add would fail if x and y were not on the same device.
execute(
b'Add', 1, inputs=[x, y], attrs=('T', x.dtype.as_datatype_enum))
def testInvalidDevice(self):
with self.assertRaises(ValueError):
with context.device('pu:0'):
_ = constant_op.constant(1)
def testConvertMixedEagerTensors(self):
array = np.zeros((), dtype=np.float32)
tensor = constant_op.constant(0., dtype=dtypes.float32)
types, tensors = execute_lib.convert_to_mixed_eager_tensors(
[array, tensor], context.context())
for typ, t in zip(types, tensors):
self.assertEqual(typ, dtypes.float32)
self.assertIsInstance(t, ops.EagerTensor)
def testConvertMixedEagerTensorsWithVariables(self):
var = resource_variable_ops.ResourceVariable(1.0)
types, tensors = execute_lib.convert_to_mixed_eager_tensors(
['foo', var], context.context())
self.assertAllEqual([dtypes.string, dtypes.float32], types)
for t in tensors:
self.assertIsInstance(t, ops.EagerTensor)
# TODO(b/123637108): re-enable
@test_util.run_gpu_only
def disabled_testSmallIntegerOpsForcedToCPU(self):
a = constant_op.constant((1, 2, 3, 4, 5), dtype=dtypes.int64)
b = constant_op.constant((2, 3, 4, 5, 6), dtype=dtypes.int64)
with context.device('gpu:0'):
c = a + b
# Op forced to CPU since all constants are integers and small.
self.assertEndsWith(c.device, 'CPU:0')
a = array_ops.zeros((8, 10), dtype=dtypes.int64)
b = array_ops.ones((8, 10), dtype=dtypes.int64)
with context.device('gpu:0'):
c = a + b
# Op not forced to CPU since the tensors are larger than 64 elements.
self.assertEndsWith(c.device, 'GPU:0')
a = constant_op.constant((1, 2, 3, 4, 5), dtype=dtypes.float32)
b = constant_op.constant((2, 3, 4, 5, 6), dtype=dtypes.float32)
with context.device('gpu:0'):
c = a + b
# Op not forced to CPU since the constants are not integers.
self.assertEndsWith(c.device, 'GPU:0')
def testExecutionModeIsStoredThreadLocal(self):
cv = threading.Condition()
count = [0]
num_threads = 10
def execution_mode_test(cond, count, num_threads, ctx, mode):
cond.acquire()
# Ensure that all threads set their mode simultaneously
# Note that this is not a simple assignment, as the execution_mode is an
# @property with a custom setter.
ctx.execution_mode = mode
count[0] = count[0] + 1
if count[0] < num_threads:
cond.wait()
else:
cond.notify_all()
cond.release()
self.assertEqual(ctx.execution_mode, mode)
ctx = context.Context()
threads = []
for i in range(num_threads):
t = threading.Thread(
target=execution_mode_test,
args=(cv, count, num_threads, ctx,
context.SYNC if i % 2 == 0 else context.ASYNC))
t.start()
threads.append(t)
for t in threads:
t.join()
@test_util.disable_tfrt('Does not support converting DT_RESOURCE'
'to op attr type yet.')
def testEmptyResourceReturned(self):
with ops.device('CPU:0'):
v = variables.Variable(1.)
empty_handle = array_ops.gather(
v.handle[array_ops.newaxis], array_ops.zeros([0], dtype=dtypes.int32))
self.assertEqual(
[0],
empty_handle.shape.as_list())
class SendRecvTest(test_util.TensorFlowTestCase):
cpu_device = '/job:localhost/replica:0/task:0/device:CPU:0'
def _send(self, tensor, tensor_name, to_device):
return execute(
b'_Send', num_outputs=0, inputs=[tensor],
attrs=('T', tensor.dtype.as_datatype_enum,
'tensor_name', tensor_name,
'send_device', tensor.device,
'send_device_incarnation', 0,
'recv_device', to_device,
'client_terminated', True))
def _recv(self, dtype, tensor_name, from_device):
device_name = context.context().device_name
if not device_name:
device_name = self.cpu_device
return execute(
b'_Recv', num_outputs=1, inputs=[],
attrs=('tensor_type', dtype.as_datatype_enum,
'tensor_name', tensor_name,
'send_device', from_device,
'send_device_incarnation', 0,
'recv_device', device_name,
'client_terminated', False))[0]
def setUp(self):
super(SendRecvTest, self).setUp()
context._reset_context()
configure_virtual_cpus()
@test_util.disable_tfrt('Send/Receive not supported in TFRT yet.')
def testBasic(self):
t0 = constant_op.constant(1.0)
t1 = constant_op.constant(2.0)
self._send(t0, 't0', self.cpu_device)
self._send(t1, 't1', self.cpu_device)
self.assertAllEqual(
self._recv(dtypes.float32, 't0', self.cpu_device),
1.0)
self.assertAllEqual(
self._recv(dtypes.float32, 't1', self.cpu_device),
2.0)
@test_util.run_gpu_only
@test_util.disable_tfrt('Send/Receive not supported in TFRT yet.')
def testLocalCrossDevice(self):
gpu_device_name = '/job:localhost/replica:0/task:0/device:GPU:0'
with ops.device('GPU:0'):
t0 = array_ops.identity(1.0)
self._send(t0, 't0', self.cpu_device)
with ops.device('cpu:0'):
self.assertAllEqual(
self._recv(dtypes.float32, 't0', gpu_device_name),
1.0)
self._send(constant_op.constant(2.0), 't1', gpu_device_name)
with ops.device('GPU:0'):
self.assertAllEqual(
self._recv(dtypes.float32, 't1', self.cpu_device),
2.0)
class EagerTensorCacheTest(test_util.TensorFlowTestCase):
def setUp(self):
super(EagerTensorCacheTest, self).setUp()
context._reset_context()
configure_virtual_cpus()
def testCacheSkipsTensorsTooLarge(self):
cache = context._EagerTensorCache(max_items=100, max_tensor_size=3)
cache.put('1', array_ops.zeros((2, 2)))
self.assertIsNone(cache.get('1'))
cache.put('2', array_ops.zeros((2)))
self.assertIsNotNone(cache.get('2'))
if __name__ == '__main__':
test.main()
| 34.995648
| 108
| 0.649316
|
3283dce8e35d3dbf6a009f3adf969a1d2c6c39b6
| 2,679
|
py
|
Python
|
modelci/controller/controller.py
|
FerdinandZhong/ML-Model-CI
|
90fa2de056dca05031f0787b96c520dc57dc664d
|
[
"Apache-2.0"
] | 170
|
2020-06-08T18:30:52.000Z
|
2022-03-28T12:08:11.000Z
|
modelci/controller/controller.py
|
FerdinandZhong/ML-Model-CI
|
90fa2de056dca05031f0787b96c520dc57dc664d
|
[
"Apache-2.0"
] | 146
|
2020-06-14T18:56:27.000Z
|
2022-02-27T21:15:59.000Z
|
modelci/controller/controller.py
|
FerdinandZhong/ML-Model-CI
|
90fa2de056dca05031f0787b96c520dc57dc664d
|
[
"Apache-2.0"
] | 36
|
2020-06-08T18:30:56.000Z
|
2022-03-07T18:10:19.000Z
|
import collections
import time
from threading import Thread
import GPUtil
from modelci.hub import Profiler
from modelci.monitor import GPUNodeExporter
class UtilMonitor(Thread):
"""
Monitor for GPU Utilization
"""
def __init__(self, device, profiler: Profiler, delay, util_level, memory_level):
"""
Init the GPU Utilization Monitor Thread.
:param delay: time period to get the information.
:param util_level: The utilization level that trigger profiling.
:param memory_level: The memory usage level that trigger profiling.
:param profiler: The instance of model profiler.
:param device: GPU device to test.
"""
super(UtilMonitor, self).__init__()
self.stopped = False
self.delay = delay
self.memory_level = memory_level
self.util_level = util_level
self.profiler = profiler
self.exporter = GPUNodeExporter()
self.device = device
if self.exporter is None:
raise TypeError(
'Failed to get GPU relative information from node exporter, please make sure the service has started.')
def run(self):
while not self.stopped:
available_devices = self.exporter.get_idle_gpu(util_level=self.util_level,
memory_level=self.memory_level)
if self.device.id in available_devices:
self.profiler.auto_diagnose(available_devices=[self.device])
time.sleep(self.delay)
def stop(self):
self.stopped = True
def auto_model_profiling(model_info, server_name, device_util_thd=0.01, device_memory_thd=0.01, period=10):
"""
Start model profiling automatically.
:param model_info: model information object saved by ModelCI.
:param server_name: serving docker container's name.
:param device_util_thd: The utilization level that trigger profiling.
:param device_memory_thd: The memory usage level that trigger profiling.
:param period: time period to get the information.
:return: None
"""
different_kind_devices = collections.OrderedDict()
for gpu in GPUtil.getGPUs():
if gpu.name not in different_kind_devices:
different_kind_devices[gpu.name] = gpu
for device in list(different_kind_devices.values()):
profiler = Profiler(model_info=model_info, server_name=server_name)
monitor = UtilMonitor(device, profiler, period, device_util_thd, device_memory_thd)
monitor.start()
def auto_device_placement():
raise NotImplementedError('Method `auto_device_placement` is not implemented.')
| 34.346154
| 119
| 0.680478
|
3a5c0560c45e2fc6e653c889101efff08780e115
| 61,712
|
py
|
Python
|
test/test_agents.py
|
hidde-jan/von_agent
|
e011bf74718616c7ee30c54261d969300fe94f31
|
[
"Apache-2.0"
] | null | null | null |
test/test_agents.py
|
hidde-jan/von_agent
|
e011bf74718616c7ee30c54261d969300fe94f31
|
[
"Apache-2.0"
] | null | null | null |
test/test_agents.py
|
hidde-jan/von_agent
|
e011bf74718616c7ee30c54261d969300fe94f31
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2017-2018 Government of Canada - Public Services and Procurement Canada - buyandsell.gc.ca
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from indy import IndyError
from indy.error import ErrorCode
from von_agent.demo_agents import TrustAnchorAgent, SRIAgent, OrgBookAgent, BCRegistrarAgent
from von_agent.nodepool import NodePool
from von_agent.proto.proto_util import attr_match, list_schemata, pred_match, pred_match_match, req_attrs
from von_agent.schema import SchemaKey
from von_agent.util import decode, encode, revealed_attrs, claims_for, prune_claims_json, schema_keys_for, ppjson
from von_agent.wallet import Wallet
import datetime
import pytest
import json
def claim_value_pair(plain):
return [str(plain), encode(plain)]
#noinspection PyUnusedLocal
@pytest.mark.asyncio
async def test_agents_direct(
pool_name,
pool_genesis_txn_path,
seed_trustee1,
pool_genesis_txn_file,
path_home):
# 1. Open pool, init agents
p = NodePool(pool_name, pool_genesis_txn_path, {'auto_remove': True})
await p.open()
assert p.handle
tag = TrustAnchorAgent(
p,
Wallet(p.name, seed_trustee1, 'trustee-wallet'),
'127.0.0.1',
8000,
'api/v0')
sag = SRIAgent(
p,
Wallet(p.name, 'SRI-Agent-0000000000000000000000', 'sri-agent-wallet'),
'127.0.0.1',
8001,
'api/v0')
pspcobag = OrgBookAgent(
p,
Wallet(p.name, 'PSPC-Org-Book-Agent-000000000000', 'pspc-org-book-agent-wallet'),
'127.0.0.1',
8002,
'api/v0')
bcobag = OrgBookAgent(
p,
Wallet(p.name, 'BC-Org-Book-Agent-00000000000000', 'bc-org-book-agent-wallet'),
'127.0.0.1',
8003,
'api/v0')
bcrag = BCRegistrarAgent(
p,
Wallet(p.name, 'BC-Registrar-Agent-0000000000000', 'bc-registrar-agent-wallet'),
'127.0.0.1',
8004,
'api/v0')
await tag.open()
await sag.open()
await pspcobag.open()
await bcobag.open()
await bcrag.open()
# print('TAG DID {}'.format(tag.did)) # V4SG...
# print('SAG DID {}'.format(sag.did)) # FaBA...
# print('PSPCOBAG DID {}'.format(pspcobag.did)) # 45Ue...
# print('BCOBAG DID {}'.format(bcobag.did)) # Rzra...
# print('BCRAG DID {}'.format(bcrag.did)) # Q4zq...
# 2. Publish agent particulars to ledger if not yet present
did2ag = {}
for ag in (tag, sag, pspcobag, bcobag, bcrag):
did2ag[ag.did] = ag
if not json.loads(await tag.get_nym(ag.did)):
await tag.send_nym(ag.did, ag.verkey)
if not json.loads(await tag.get_endpoint(ag.did)):
await ag.send_endpoint()
nyms = {
'tag': json.loads(await tag.get_nym(tag.did)),
'sag': json.loads(await tag.get_nym(sag.did)),
'pspcobag': json.loads(await tag.get_nym(pspcobag.did)),
'bcobag': json.loads(await tag.get_nym(bcobag.did)),
'bcrag': json.loads(await tag.get_nym(bcrag.did))
}
endpoints = {
'tag': json.loads(await tag.get_endpoint(tag.did)),
'sag': json.loads(await tag.get_endpoint(sag.did)),
'pspcobag': json.loads(await tag.get_endpoint(pspcobag.did)),
'bcobag': json.loads(await tag.get_endpoint(bcobag.did)),
'bcrag': json.loads(await tag.get_endpoint(bcrag.did))
}
print('\n\n== 1 == nyms: {}\nendpoints: {}\n'.format(ppjson(nyms), ppjson(endpoints)))
for k in nyms:
assert 'dest' in nyms[k]
for k in endpoints:
assert 'host' in endpoints[k]
assert 'port' in endpoints[k]
# 3. Publish schema to ledger if not yet present; get from ledger
S_KEY = {
'BC': SchemaKey(bcrag.did, 'bc-reg', '1.0'),
'SRI-1.0': SchemaKey(sag.did, 'sri', '1.0'),
'SRI-1.1': SchemaKey(sag.did, 'sri', '1.1'),
'GREEN': SchemaKey(sag.did, 'green', '1.0'),
}
schema_data = {
S_KEY['BC']: {
'name': S_KEY['BC'].name,
'version': S_KEY['BC'].version,
'attr_names': [
'id',
'busId',
'orgTypeId',
'jurisdictionId',
'legalName',
'effectiveDate',
'endDate'
]
},
S_KEY['SRI-1.0']: {
'name': S_KEY['SRI-1.0'].name,
'version': S_KEY['SRI-1.0'].version,
'attr_names': [
'legalName',
'jurisdictionId',
'sriRegDate'
]
},
S_KEY['SRI-1.1']: {
'name': S_KEY['SRI-1.1'].name,
'version': S_KEY['SRI-1.1'].version,
'attr_names': [
'legalName',
'jurisdictionId',
'businessLang',
'sriRegDate'
]
},
S_KEY['GREEN']: {
'name': S_KEY['GREEN'].name,
'version': S_KEY['GREEN'].version,
'attr_names': [
'legalName',
'greenLevel',
'auditDate'
]
}
}
# index by transaction number
seq_no2schema = {}
seq_no2schema_key = {}
# index by SchemaKey
schema_json = {}
schema = {}
claim_def_json = {}
claim_def = {}
claim_data = {}
claim_req = {}
claim_req_json = {}
claim = {}
claim_json = {}
find_req = {}
claims_found = {}
claims_found_json = {}
holder_prover = {
bcrag.did: bcobag,
sag.did: pspcobag
}
x_json = await tag.get_schema(SchemaKey(tag.did, 'Xxxx', 'X.x')) # Bad version number
assert not json.loads(x_json)
i = 0
for s_key in schema_data:
swab_json = await bcrag.get_schema(s_key) # may exist
if not json.loads(swab_json):
await did2ag[s_key.origin_did].send_schema(json.dumps(schema_data[s_key]))
schema_json[s_key] = await did2ag[s_key.origin_did].get_schema(s_key) # should exist now
schema[s_key] = json.loads(schema_json[s_key])
seq_no2schema_key[schema[s_key]['seqNo']] = s_key
seq_no2schema[schema[s_key]['seqNo']] = schema[s_key]
print('\n\n== 2.{} == SCHEMA [{} v{}]: {}'.format(i, s_key.name, s_key.version, ppjson(schema[s_key])))
assert schema[s_key]
i += 1
assert not json.loads(await did2ag[S_KEY['BC'].origin_did].send_schema(
json.dumps(schema_data[S_KEY['BC']]))) # forbid multiple write of multiple schema on same key
# 4. BC Registrar and SRI agents (as Issuers) create, store, and publish claim definitions to ledger
non_claim_def_json = await bcobag.get_claim_def(999999, bcrag.did) # ought not exist
assert not json.loads(non_claim_def_json)
i = 0
for s_key in schema_data:
await did2ag[s_key.origin_did].send_claim_def(schema_json[s_key])
claim_def_json[s_key] = await holder_prover[s_key.origin_did].get_claim_def(
schema[s_key]['seqNo'],
s_key.origin_did) # ought to exist now
claim_def[s_key] = json.loads(claim_def_json[s_key])
print('\n\n== 3.{} == Claim def [{} v{}]: {}'.format(
i,
s_key.name,
s_key.version,
ppjson(json.loads(claim_def_json[s_key]))))
assert json.loads(claim_def_json[s_key])['ref'] == schema[s_key]['seqNo']
repeat_claim_def = json.loads(await did2ag[s_key.origin_did].send_claim_def(schema_json[s_key]))
assert repeat_claim_def # check idempotence and non-crashing on duplicate claim-def send
i += 1
# 5. Setup master secrets, claim reqs at HolderProver agents
await bcobag.create_master_secret('MasterSecret')
await pspcobag.create_master_secret('SecretMaster')
for ag in (bcobag, pspcobag):
wallet_name = ag.wallet.name
assert (await ag.reset_wallet()) == wallet_name
i = 0
for s_key in schema_data:
await holder_prover[s_key.origin_did].store_claim_offer(s_key.origin_did, s_key)
claim_req_json[s_key] = await holder_prover[s_key.origin_did].store_claim_req(
s_key.origin_did,
claim_def_json[s_key])
claim_req[s_key] = json.loads(claim_req_json[s_key])
print('\n\n== 4.{} == Claim req [{} v{}]: {}'.format(
i,
s_key.name,
s_key.version,
ppjson(claim_req_json[s_key])))
assert json.loads(claim_req_json[s_key])
i += 1
# 6. BC Reg agent (as Issuer) issues claims and stores at HolderProver: get claim req, create claim, store claim
claim_data = {
S_KEY['BC']: [
{
'id': claim_value_pair(1),
'busId': claim_value_pair('11121398'),
'orgTypeId': claim_value_pair(2),
'jurisdictionId': claim_value_pair(1),
'legalName': claim_value_pair('The Original House of Pies'),
'effectiveDate': claim_value_pair('2010-10-10'),
'endDate': claim_value_pair(None)
},
{
'id': claim_value_pair(2),
'busId': claim_value_pair('11133333'),
'orgTypeId': claim_value_pair(1),
'jurisdictionId': claim_value_pair(1),
'legalName': claim_value_pair('Planet Cake'),
'effectiveDate': claim_value_pair('2011-10-01'),
'endDate': claim_value_pair(None)
},
{
'id': claim_value_pair(3),
'busId': claim_value_pair('11144444'),
'orgTypeId': claim_value_pair(2),
'jurisdictionId': claim_value_pair(1),
'legalName': claim_value_pair('Tart City'),
'effectiveDate': claim_value_pair('2012-12-01'),
'endDate': claim_value_pair(None)
}
],
S_KEY['SRI-1.0']: [],
S_KEY['SRI-1.1']: [],
S_KEY['GREEN']: []
}
for s_key in claim_data:
for c in claim_data[s_key]:
(_, claim_json[s_key]) = await did2ag[s_key.origin_did].create_claim(claim_req_json[s_key], c)
assert json.loads(claim_json[s_key])
await holder_prover[s_key.origin_did].store_claim(claim_json[s_key])
# 7. BC Org Book agent (as HolderProver) finds claims; actuator filters post hoc
find_req[S_KEY['BC']] = {
'nonce': '1000',
'name': 'bc_proof_req',
'version': '0',
'requested_attrs': {
'{}_{}_uuid'.format(schema[S_KEY['BC']]['seqNo'], attr): {
'name': attr,
'restrictions': [{
'schema_key': {
'did': S_KEY['BC'].origin_did,
'name': S_KEY['BC'].name,
'version': S_KEY['BC'].version
}
}]
} for attr in claim_data[S_KEY['BC']][0]
},
'requested_predicates': {}
}
(bc_referents_all, claims_found_json[S_KEY['BC']]) = await bcobag.get_claims(json.dumps(find_req[S_KEY['BC']]))
print('\n\n== 5 == All BC claims, no filter {}: {}'.format(
bc_referents_all,
ppjson(claims_found_json[S_KEY['BC']])))
claims_found[S_KEY['BC']] = json.loads(claims_found_json[S_KEY['BC']])
bc_display_pruned_filt_post_hoc = claims_for(
claims_found[S_KEY['BC']],
{
S_KEY['BC']: {
'legalName': decode(claim_data[S_KEY['BC']][2]['legalName'][1])
}
})
print('\n\n== 6 == BC claims display, filtered post hoc matching {}: {}'.format(
decode(claim_data[S_KEY['BC']][2]['legalName'][1]),
ppjson(bc_display_pruned_filt_post_hoc)))
bc_display_pruned = prune_claims_json(
claims_found[S_KEY['BC']],
{k for k in bc_display_pruned_filt_post_hoc})
print('\n\n== 7 == BC claims, stripped down: {}'.format(ppjson(bc_display_pruned)))
filt = {
S_KEY['BC']: {
'attr-match': {
k: decode(claim_data[S_KEY['BC']][2][k][1]) for k in claim_data[S_KEY['BC']][2]
if k in ('jurisdictionId', 'busId')
}
}
}
(bc_referents_filt, claims_found_json[S_KEY['BC']]) = await bcobag.get_claims(
json.dumps(find_req[S_KEY['BC']]),
filt)
print('\n\n== 8 == BC claims, filtered a priori {}: {}'.format(
bc_referents_filt,
ppjson(claims_found_json[S_KEY['BC']])))
assert set([*bc_display_pruned_filt_post_hoc]) == bc_referents_filt
assert len(bc_display_pruned_filt_post_hoc) == 1
bc_referent = bc_referents_filt.pop()
# 8. BC Org Book agent (as HolderProver) creates proof for claim specified by filter
claims_found[S_KEY['BC']] = json.loads(claims_found_json[S_KEY['BC']])
bc_requested_claims = {
'self_attested_attributes': {},
'requested_attrs': {
attr: [bc_referent, True]
for attr in find_req[S_KEY['BC']]['requested_attrs'] if attr in claims_found[S_KEY['BC']]['attrs']
},
'requested_predicates': {
pred: bc_referent
for pred in find_req[S_KEY['BC']]['requested_predicates']
}
}
bc_proof_json = await bcobag.create_proof(
find_req[S_KEY['BC']],
claims_found[S_KEY['BC']],
bc_requested_claims)
print('\n\n== 9 == BC proof (by filter): {}'.format(ppjson(bc_proof_json)))
# 9. SRI agent (as Verifier) verifies proof (by filter)
rc_json = await sag.verify_proof(
find_req[S_KEY['BC']],
json.loads(bc_proof_json))
print('\n\n== 10 == The SRI agent verifies the BC proof (by filter) as: {}'.format(ppjson(rc_json)))
assert json.loads(rc_json)
# 10. BC Org Book agent (as HolderProver) finds claim by referent, no claim by non-referent
s_key = set(schema_keys_for(claims_found[S_KEY['BC']], {bc_referent}).values()).pop() # it's unique
req_attrs = {
'{}_{}_uuid'.format(schema[s_key]['seqNo'], attr_name): {
'name': attr_name,
'restrictions': [{
'schema_key': {
'did': s_key.origin_did,
'name': s_key.name,
'version': s_key.version
}
}]
} for attr_name in schema_data[S_KEY['BC']]['attr_names']
}
bc_claim_found_by_referent = json.loads(await bcobag.get_claim_by_referent(bc_referent, req_attrs))
print('\n\n== 11 == BC claim by referent={}: {}'.format(
bc_referent,
ppjson(bc_claim_found_by_referent)))
assert bc_claim_found_by_referent
assert bc_claim_found_by_referent['attrs']
bc_non_claim_by_referent = json.loads(await bcobag.get_claim_by_referent(
'claim::ffffffff-ffff-ffff-ffff-ffffffffffff',
req_attrs))
print('\n\n== 12 == BC non-claim: {}'.format(ppjson(bc_non_claim_by_referent)))
assert bc_non_claim_by_referent
assert all(not bc_non_claim_by_referent['attrs'][attr] for attr in bc_non_claim_by_referent['attrs'])
# 11. BC Org Book agent (as HolderProver) creates proof for claim specified by referent
bc_requested_claims = {
'self_attested_attributes': {},
'requested_attrs': {
attr: [bc_referent, True]
for attr in bc_claim_found_by_referent['attrs']
},
'requested_predicates': {}
}
bc_proof_json = await bcobag.create_proof(
find_req[S_KEY['BC']],
bc_claim_found_by_referent,
bc_requested_claims)
bc_proof = json.loads(bc_proof_json)
print('\n\n== 13 == BC proof by referent={}: {}'.format(bc_referent, ppjson(bc_proof_json)))
# 12. SRI agent (as Verifier) verifies proof (by referent)
rc_json = await sag.verify_proof(
find_req[S_KEY['BC']],
bc_proof)
print('\n\n== 14 == SRI agent verifies BC proof by referent={} as: {}'.format(bc_referent, ppjson(rc_json)))
assert json.loads(rc_json)
# 13. BC Org Book agent (as HolderProver) finds claims by predicate
find_req_pred = {
'nonce': '1111',
'name': 'bc_proof_req',
'version': '0',
'requested_attrs': {
'{}_{}_uuid'.format(schema[S_KEY['BC']]['seqNo'], attr): {
'name': attr,
'restrictions': [{
'schema_key': {
'did': S_KEY['BC'].origin_did,
'name': S_KEY['BC'].name,
'version': S_KEY['BC'].version
}
}]
} for attr in claim_data[S_KEY['BC']][2] if attr != 'id'
},
'requested_predicates': {
'{}_id_uuid'.format(schema[S_KEY['BC']]['seqNo']): {
'attr_name': 'id',
'p_type': '>=',
'value': int(claim_data[S_KEY['BC']][2]['id'][0]),
'restrictions': [{
'schema_key': {
'did': S_KEY['BC'].origin_did,
'name': S_KEY['BC'].name,
'version': S_KEY['BC'].version
}
}]
}
}
}
filt_pred = {
S_KEY['BC']: {
'pred-match': [{
'attr': 'id',
'pred-type': '>=',
'value': int(claim_data[S_KEY['BC']][2]['id'][0]),
}]
}
}
(bc_referents_pred, claims_found_pred_json) = await bcobag.get_claims(json.dumps(find_req_pred), filt_pred)
print('\n\n== 15 == BC claims, filtered by predicate id >= 3: {}'.format(
ppjson(claims_found_pred_json)))
claims_found_pred = json.loads(claims_found_pred_json)
bc_display_pred = claims_for(claims_found_pred)
print('\n\n== 16 == BC claims display, filtered by predicate id >= 3: {}'.format(ppjson(bc_display_pred)))
assert len(bc_referents_pred) == 1
bc_referent_pred = bc_referents_pred.pop() # it's unique
# 14. BC Org Book agent (as HolderProver) creates proof for claims structure filtered by predicate
bc_requested_claims_pred = {
'self_attested_attributes': {},
'requested_attrs': {
a_referent: [bc_referent_pred, True]
for a_referent in find_req_pred['requested_attrs'] if a_referent in claims_found_pred['attrs']
},
'requested_predicates': {
p_referent: bc_referent_pred
for p_referent in find_req_pred['requested_predicates'] if p_referent in claims_found_pred['predicates']
}
}
bc_proof_pred_json = await bcobag.create_proof(
find_req_pred,
claims_found_pred,
bc_requested_claims_pred)
print('\n\n== 17 == BC proof by predicate id >= 3: {}'.format(ppjson(bc_proof_pred_json)))
# 15. SRI agent (as Verifier) verifies proof (by predicate)
rc_json = await sag.verify_proof(
find_req_pred,
json.loads(bc_proof_pred_json))
print('\n\n== 18 == The SRI agent verifies the BC proof (by predicate) as: {}'.format(ppjson(rc_json)))
assert json.loads(rc_json)
# 16. Create and store SRI registration completion claims, green claim from verified proof + extra data
revealed = revealed_attrs(bc_proof)[bc_referent]
claim_data[S_KEY['SRI-1.0']].append({
**{k: claim_value_pair(revealed[k]) for k in revealed if k in schema_data[S_KEY['SRI-1.0']]['attr_names']},
'sriRegDate': claim_value_pair(datetime.date.today().strftime('%Y-%m-%d'))
})
claim_data[S_KEY['SRI-1.1']].append({
**{k: claim_value_pair(revealed[k]) for k in revealed if k in schema_data[S_KEY['SRI-1.1']]['attr_names']},
'sriRegDate': claim_value_pair(datetime.date.today().strftime('%Y-%m-%d')),
'businessLang': claim_value_pair('EN-CA')
})
claim_data[S_KEY['GREEN']].append({
**{k: claim_value_pair(revealed[k]) for k in revealed if k in schema_data[S_KEY['GREEN']]['attr_names']},
'greenLevel': claim_value_pair('Silver'),
'auditDate': claim_value_pair(datetime.date.today().strftime('%Y-%m-%d'))
})
i = 0
for s_key in claim_data:
if s_key == S_KEY['BC']:
continue
for c in claim_data[s_key]:
(_, claim_json[s_key]) = await did2ag[s_key.origin_did].create_claim(claim_req_json[s_key], c)
print('\n\n== 19.{} == SRI created claim [{} v{}]: {}'.format(
i,
s_key.name,
s_key.version,
ppjson(claim_json[s_key])))
assert json.loads(claim_json[s_key])
await holder_prover[s_key.origin_did].store_claim(claim_json[s_key])
i += 1
# 17. PSPC Org Book agent (as HolderProver) finds all claims, one schema at a time
i = 0
for s_key in schema:
if s_key == S_KEY['BC']:
continue
find_req[s_key] = {
'nonce': str(1234 + i),
'name': 'sri_find_req',
'version': '0',
'requested_attrs': {
'{}_{}_uuid'.format(schema[s_key]['seqNo'], attr): {
'name': attr,
'restrictions': [{
'schema_key': {
'did': s_key.origin_did,
'name': s_key.name,
'version': s_key.version
}
}]
} for attr in claim_data[s_key][0]
},
'requested_predicates': {}
}
(sri_referents, claims_found_json[s_key]) = await holder_prover[s_key.origin_did].get_claims(
json.dumps(find_req[s_key]))
print('\n\n== 20.{} == Claims on [{} v{}], no filter {}: {}'.format(
i,
s_key.name,
s_key.version,
sri_referents,
ppjson(claims_found_json[s_key])))
i += 1
# 18. PSPC Org Book agent (as HolderProver) finds all claims on all schemata at once; actuator filters post hoc
req_attrs_sri_find_all = {}
for s_key in schema_data:
if s_key == S_KEY['BC']:
continue
seq_no = schema[s_key]['seqNo']
for attr_name in schema_data[s_key]['attr_names']:
req_attrs_sri_find_all['{}_{}_uuid'.format(seq_no, attr_name)] = {
'name': attr_name,
'restrictions': [{
'schema_key': {
'did': s_key.origin_did,
'name': s_key.name,
'version': s_key.version
}
}]
}
find_req_sri_all = {
'nonce': '9999',
'name': 'sri_find_req_all',
'version': '0',
'requested_attrs': req_attrs_sri_find_all,
'requested_predicates': {}
}
(sri_referents_all, sri_claims_found_all_json) = await pspcobag.get_claims(json.dumps(find_req_sri_all))
print('\n\n== 21 == All SRI-issued claims (no filter) at PSPC Org Book {}: {}'.format(
sri_referents_all,
ppjson(sri_claims_found_all_json)))
sri_claims_found_all = json.loads(sri_claims_found_all_json)
sri_display_pruned_filt_post_hoc = claims_for(
sri_claims_found_all,
{
S_KEY['GREEN']: {
'legalName': decode(claim_data[S_KEY['GREEN']][0]['legalName'][1])
}
})
print('\n\n== 22 == SRI claims display, filtered post hoc matching {}: {}'.format(
decode(claim_data[S_KEY['GREEN']][0]['legalName'][1]),
ppjson(sri_display_pruned_filt_post_hoc)))
sri_display_pruned = prune_claims_json(
sri_claims_found_all,
{k for k in sri_display_pruned_filt_post_hoc})
print('\n\n== 23 == SRI claims, stripped down: {}'.format(ppjson(sri_display_pruned)))
filt = {
S_KEY['GREEN']: {
'legalName': decode(claim_data[S_KEY['GREEN']][0]['legalName'][1])
}
}
(sri_referents_filt, claims_found_json[S_KEY['GREEN']]) = await pspcobag.get_claims(
json.dumps(find_req[S_KEY['GREEN']]),
filt)
print('\n\n== 24 == SRI claims, filtered a priori {}: {}'.format(
sri_referents_filt,
ppjson(claims_found_json[S_KEY['GREEN']])))
assert set([*sri_display_pruned_filt_post_hoc]) == sri_referents_filt
assert len(sri_display_pruned_filt_post_hoc) == 1
sri_claims_found_all = json.loads(sri_claims_found_all_json)
sri_req_attrs4sri_req_claims = {}
for attr_uuid in sri_claims_found_all['attrs']:
sri_req_attrs4sri_req_claims[attr_uuid] = [sri_claims_found_all['attrs'][attr_uuid][0]['referent'], True]
# 19. PSPC Org Book agent (as HolderProver) creates proof for multiple claims
sri_requested_claims = {
'self_attested_attributes': {},
'requested_attrs': sri_req_attrs4sri_req_claims,
'requested_predicates': {}
}
sri_proof_json = await pspcobag.create_proof(
find_req_sri_all,
sri_claims_found_all,
sri_requested_claims)
print('\n\n== 25 == PSPC Org Book proof on referent={}: {}'.format(sri_referents_all, ppjson(sri_proof_json)))
sri_proof = json.loads(sri_proof_json)
# 20. SRI agent (as Verifier) verify proof
rc_json = await sag.verify_proof(
find_req_sri_all,
sri_proof)
print('\n\n== 26 == the SRI agent verifies the PSPC Org Book proof by referent={} as: {}'.format(
sri_referents_all,
ppjson(rc_json)))
assert json.loads(rc_json)
await bcrag.close()
await bcobag.close()
await pspcobag.close()
await sag.close()
await tag.close()
await p.close()
#noinspection PyUnusedLocal
@pytest.mark.asyncio
async def test_agents_process_forms_local(
pool_name,
pool_genesis_txn_path,
seed_trustee1,
pool_genesis_txn_file,
path_home):
# 1. Open pool, init agents
async with NodePool(pool_name, pool_genesis_txn_path, {'auto_remove': True}) as p, (
TrustAnchorAgent(
p,
Wallet(p.name, seed_trustee1, 'trustee-wallet'),
'127.0.0.1',
'8000',
'api/v0')) as tag, (
SRIAgent(
p,
Wallet(p.name, 'SRI-Agent-0000000000000000000000', 'sri-agent-wallet'),
'127.0.0.1',
8001,
'api/v0')) as sag, (
OrgBookAgent(
p,
Wallet(p.name, 'PSPC-Org-Book-Agent-000000000000', 'pspc-org-book-agent-wallet'),
'127.0.0.1',
8003,
'api/v0')) as pspcobag, (
OrgBookAgent(
p,
Wallet(p.name, 'BC-Org-Book-Agent-00000000000000', 'bc-org-book-agent-wallet'),
'127.0.0.1',
8003,
'api/v0')) as bcobag, (
BCRegistrarAgent(
p,
Wallet(p.name, 'BC-Registrar-Agent-0000000000000', 'bc-registrar-agent-wallet'),
'127.0.0.1',
8004,
'api/v0')) as bcrag:
assert p.handle is not None
# TAG DID: V4SG...
# SAG DID: FaBA...
# PSPCOBAG DID: 45Ue...
# BCOBAG DID: Rzra...
# BCRAG DID: Q4zq...
print('\n\n== 1 == Agent DIDs: {}'.format(ppjson(
{ag.wallet.name.replace('-wallet',''): ag.did for ag in (tag, sag, pspcobag, bcobag, bcrag)})))
# 2. Publish agent particulars to ledger if not yet present
did2ag = {}
for ag in (tag, sag, pspcobag, bcobag, bcrag):
did2ag[ag.did] = ag
nym_lookup_form = {
'type': 'agent-nym-lookup',
'data': {
'agent-nym': {
'did': ag.did
}
}
}
nym = json.loads(await ag.process_post(nym_lookup_form))
if not nym:
resp_json = await tag.process_post({
'type': 'agent-nym-send',
'data': {
'agent-nym': {
'did': ag.did,
'verkey': ag.verkey
}
}
})
nym = json.loads(await ag.process_post(nym_lookup_form))
assert nym
endpoint_lookup_form = {
'type': 'agent-endpoint-lookup',
'data': {
'agent-endpoint': {
'did': ag.did
}
}
}
endpoint = json.loads(await tag.process_post(endpoint_lookup_form))
if not endpoint:
resp_json = await ag.process_post({
'type': 'agent-endpoint-send',
'data': {
}
})
endpoint = json.loads(await ag.process_post(endpoint_lookup_form))
assert endpoint
try: # Make sure only a trust anchor can register an agent
await sag.process_post({
'type': 'agent-nym-send',
'data': {
'agent-nym': {
'did': sag.did,
'verkey': sag.verkey
}
}
})
assert False
except NotImplementedError:
pass
# 3. Publish schema to ledger if not yet present; get from ledger
S_KEY = {
'BC': SchemaKey(bcrag.did, 'bc-reg', '1.0'),
'SRI-1.0': SchemaKey(sag.did, 'sri', '1.0'),
'SRI-1.1': SchemaKey(sag.did, 'sri', '1.1'),
'GREEN': SchemaKey(sag.did, 'green', '1.0'),
}
schema_data = {
S_KEY['BC']: {
'name': S_KEY['BC'].name,
'version': S_KEY['BC'].version,
'attr_names': [
'id',
'busId',
'orgTypeId',
'jurisdictionId',
'legalName',
'effectiveDate',
'endDate'
]
},
S_KEY['SRI-1.0']: {
'name': S_KEY['SRI-1.0'].name,
'version': S_KEY['SRI-1.0'].version,
'attr_names': [
'legalName',
'jurisdictionId',
'sriRegDate'
]
},
S_KEY['SRI-1.1']: {
'name': S_KEY['SRI-1.1'].name,
'version': S_KEY['SRI-1.1'].version,
'attr_names': [
'legalName',
'jurisdictionId',
'businessLang',
'sriRegDate'
]
},
S_KEY['GREEN']: {
'name': S_KEY['GREEN'].name,
'version': S_KEY['GREEN'].version,
'attr_names': [
'legalName',
'greenLevel',
'auditDate'
]
}
}
# index by transaction number
seq_no2schema = {}
seq_no2schema_key = {}
# index by SchemaKey
schema_lookup_form = {}
schema_json = {}
schema = {}
claim_def_json = {}
claim_def = {}
claim_data = {}
claim_req = {}
claim_req_json = {}
claim = {}
claim_json = {}
claims_found = {}
claims_found_json = {}
holder_prover = {
bcrag.did: bcobag,
sag.did: pspcobag
}
schema_lookup_form = {
S_KEY['BC']: {
'type': 'schema-lookup',
'data': {
'schema': {
'origin-did': S_KEY['BC'].origin_did,
'name': S_KEY['BC'].name,
'version': S_KEY['BC'].version
}
}
},
S_KEY['SRI-1.0']: {
'type': 'schema-lookup',
'data': {
'schema': {
'origin-did': S_KEY['SRI-1.0'].origin_did,
'name': S_KEY['SRI-1.0'].name,
'version': S_KEY['SRI-1.0'].version
}
}
},
S_KEY['SRI-1.1']: {
'type': 'schema-lookup',
'data': {
'schema': {
'origin-did': S_KEY['SRI-1.1'].origin_did,
'name': S_KEY['SRI-1.1'].name,
'version': S_KEY['SRI-1.1'].version
}
}
},
S_KEY['GREEN']: {
'type': 'schema-lookup',
'data': {
'schema': {
'origin-did': S_KEY['GREEN'].origin_did,
'name': S_KEY['GREEN'].name,
'version': S_KEY['GREEN'].version
}
}
}
}
schema_lookup_form[S_KEY['BC']]['data']['schema']['version'] = 'xxx'
x_json = await bcrag.process_post(schema_lookup_form[S_KEY['BC']]) # Bad version number
assert not json.loads(x_json)
schema_lookup_form[S_KEY['BC']]['data']['schema']['version'] = '999.999'
assert not json.loads(await bcrag.process_post(schema_lookup_form[S_KEY['BC']])) # ought not exist
schema_lookup_form[S_KEY['BC']]['data']['schema']['version'] = schema_data[S_KEY['BC']]['version'] # restore
i = 0
for s_key in schema_data:
swab_json = await bcrag.get_schema(s_key) # may exist
if not json.loads(swab_json):
await did2ag[s_key.origin_did].process_post({
'type': 'schema-send',
'data': {
'schema': {
'origin-did': s_key.origin_did,
'name': s_key.name,
'version': s_key.version
},
'attr-names': schema_data[s_key]['attr_names']
}
})
schema_json[s_key] = await did2ag[s_key.origin_did].process_post(
schema_lookup_form[s_key]) # should exist now
schema[s_key] = json.loads(schema_json[s_key])
assert schema[s_key]
seq_no2schema_key[schema[s_key]['seqNo']] = s_key
seq_no2schema[schema[s_key]['seqNo']] = schema[s_key]
print('\n\n== 2.{} == SCHEMA [{} v{}]: {}'.format(i, s_key.name, s_key.version, ppjson(schema[s_key])))
i += 1
for xag in (pspcobag, bcobag):
try: # Make sure only an origin can send a schema
await xag.process_post({
'type': 'schema-send',
'data': {
'schema': {
'origin-did': xag.did,
'name': S_KEY['BC'].name,
'version': S_KEY['BC'].version
},
'attr-names': schema_data[S_KEY['BC']]['attr_names']
}
})
assert False
except NotImplementedError:
pass
# 4. BC Registrar and SRI agents (as Issuers) create, store, and publish claim def to ledger
i = 0
for s_key in schema_data:
claim_def_send_form = {
'type': 'claim-def-send',
'data': {
'schema': {
'origin-did': s_key.origin_did,
'name': s_key.name,
'version': s_key.version
}
}
}
await did2ag[s_key.origin_did].process_post(claim_def_send_form)
claim_def_json[s_key] = await holder_prover[s_key.origin_did].get_claim_def(
schema[s_key]['seqNo'],
s_key.origin_did) # ought to exist now (short-circuit to low-level API)
claim_def[s_key] = json.loads(claim_def_json[s_key])
print('\n\n== 3.{} == Claim def [{} v{}]: {}'.format(
i,
s_key.name,
s_key.version,
ppjson(json.loads(claim_def_json[s_key]))))
assert json.loads(claim_def_json[s_key])['ref'] == schema[s_key]['seqNo']
await did2ag[s_key.origin_did].process_post(claim_def_send_form)
repeat_claim_def = json.loads(await holder_prover[s_key.origin_did].get_claim_def(
schema[s_key]['seqNo'],
s_key.origin_did)) # check idempotence and non-crashing on duplicate claim-def send
assert repeat_claim_def
i += 1
# 5. Setup master secrets, claim reqs at HolderProver agents
master_secret_set_form = {
'type': 'master-secret-set',
'data': {
'label': 'maestro'
}
}
claim_hello_form = {
s_key: {
'type': 'claim-hello',
'data': {
'issuer-did': s_key.origin_did,
'schema': {
'origin-did': s_key.origin_did,
'name': s_key.name,
'version': s_key.version
}
}
} for s_key in schema_data
}
try: # master secret unspecified, ought to fail
await bcobag.process_post(claim_hello_form[S_KEY['BC']])
except ValueError:
pass
await bcobag.process_post(master_secret_set_form)
master_secret_set_form['data']['label'] = 'shhhhhhh'
await pspcobag.process_post(master_secret_set_form)
claims_reset_form = {
'type': 'claims-reset',
'data': {}
}
for ag in (bcobag, pspcobag): # reset all HolderProvers
assert not json.loads(await ag.process_post(claims_reset_form)) # response is {} if OK
i = 0
for s_key in schema_data:
claim_req_json[s_key] = await holder_prover[s_key.origin_did].process_post(claim_hello_form[s_key])
claim_req[s_key] = json.loads(claim_req_json[s_key])
assert claim_req[s_key]
# 6. BC Reg agent (as Issuer) issues claims and stores at HolderProver: get claim req, create claim, store claim
claim_data = {
S_KEY['BC']: [
{
'id': 1,
'busId': '11121398',
'orgTypeId': 2,
'jurisdictionId': 1,
'legalName': 'The Original House of Pies',
'effectiveDate': '2010-10-10',
'endDate': None
},
{
'id': 2,
'busId': '11133333',
'orgTypeId': 1,
'jurisdictionId': 1,
'legalName': 'Planet Cake',
'effectiveDate': '2011-10-01',
'endDate': None
},
{
'id': 3,
'busId': '11144444',
'orgTypeId': 2,
'jurisdictionId': 1,
'legalName': 'Tart City',
'effectiveDate': '2012-12-01',
'endDate': None
}
],
S_KEY['SRI-1.0']: [],
S_KEY['SRI-1.1']: [],
S_KEY['GREEN']: []
}
for s_key in claim_data:
for c in claim_data[s_key]:
claim_json[s_key] = await did2ag[s_key.origin_did].process_post({
'type': 'claim-create',
'data': {
'claim-req': claim_req[s_key],
'claim-attrs': c
}
})
claim[s_key] = json.loads(claim_json[s_key])
assert claim[s_key]
await holder_prover[s_key.origin_did].process_post({
'type': 'claim-store',
'data': {
'claim': claim[s_key]
}
})
# 7. BC Org Book agent (as HolderProver) finds claims; actuator filters post hoc
claims_found[S_KEY['BC']] = json.loads(await bcobag.process_post({
'type': 'claim-request',
'data': {
'schemata': list_schemata([S_KEY['BC']]),
'claim-filter': {
'attr-match': [],
'pred-match': [],
},
'requested-attrs': []
}
}))
print('\n\n== 4 == All BC claims, no filter: {}'.format(ppjson(claims_found[S_KEY['BC']])))
bc_display_pruned_filt_post_hoc = claims_for(
claims_found[S_KEY['BC']]['claims'],
{
S_KEY['BC']: {
'legalName': claim_data[S_KEY['BC']][2]['legalName']
}
})
try: # exercise proof restriction to one claim per attribute
await bcobag.process_post({
'type': 'proof-request',
'data': {
'schemata': list_schemata([S_KEY['BC']]),
'claim-filter': {
'attr-match': [],
'pred-match': [],
},
'requested-attrs': []
}
})
assert False
except ValueError:
pass # carry on: proof supports at most one claim per attribute
print('\n\n== 5 == display BC claims filtered post hoc matching {}: {}'.format(
claim_data[S_KEY['BC']][2]['legalName'],
ppjson(bc_display_pruned_filt_post_hoc)))
bc_display_pruned = prune_claims_json(
claims_found[S_KEY['BC']]['claims'],
{k for k in bc_display_pruned_filt_post_hoc})
print('\n\n== 6 == BC claims, stripped down {}'.format(ppjson(bc_display_pruned)))
bc_claims_prefilt_json = await bcobag.process_post({
'type': 'claim-request',
'data': {
'schemata': list_schemata([S_KEY['BC']]),
'claim-filter': {
'attr-match': [
attr_match(
S_KEY['BC'],
{
k: claim_data[S_KEY['BC']][2][k] for k in claim_data[S_KEY['BC']][2]
if k in ('jurisdictionId', 'busId')
}
)
],
'pred-match': []
},
'requested-attrs': []
}
})
bc_claims_prefilt = json.loads(bc_claims_prefilt_json)
print('\n\n== 6 == BC claims, with filter a priori, process-post: {}'.format(ppjson(bc_claims_prefilt)))
bc_display_pruned_prefilt = claims_for(bc_claims_prefilt['claims'])
print('\n\n== 7 == BC display claims filtered a priori matching {}: {}'.format(
claim_data[S_KEY['BC']][2]['legalName'],
ppjson(bc_display_pruned_prefilt)))
assert set([*bc_display_pruned_filt_post_hoc]) == set([*bc_display_pruned_prefilt])
assert len(bc_display_pruned_filt_post_hoc) == 1
# 8. BC Org Book agent (as HolderProver) creates proof (by filter)
bc_proof_resp = json.loads(await bcobag.process_post({
'type': 'proof-request',
'data': {
'schemata': list_schemata([S_KEY['BC']]),
'claim-filter': {
'attr-match': [
attr_match(
S_KEY['BC'],
{
k: claim_data[S_KEY['BC']][2][k] for k in claim_data[S_KEY['BC']][2]
if k in ('jurisdictionId', 'busId')
}
)
],
'pred-match': []
},
'requested-attrs': []
}
}))
print('\n\n== 8 == BC proof response (by filter): {}'.format(ppjson(bc_proof_resp)))
# 9. SRI agent (as Verifier) verifies proof (by filter)
rc_json = await sag.process_post({
'type': 'verification-request',
'data': bc_proof_resp
})
print('\n\n== 9 == the SRI agent verifies the BC proof (by filter) as: {}'.format(ppjson(rc_json)))
assert json.loads(rc_json)
# 10. BC Org Book agent (as HolderProver) creates proof (by referent)
bc_referent = set([*bc_display_pruned_prefilt]).pop()
s_key = set(schema_keys_for(bc_claims_prefilt['claims'], {bc_referent}).values()).pop() # it's unique
bc_proof_resp = json.loads(await bcobag.process_post({
'type': 'proof-request-by-referent',
'data': {
'schemata': list_schemata([s_key]),
'referents': [
bc_referent
],
'requested-attrs': []
}
}))
print('\n\n== 10 == BC proof response by referent={}: {}'.format(bc_referent, ppjson(bc_proof_resp)))
# 11. BC Org Book agent (as HolderProver) creates non-proof (by non-referent)
bc_non_referent = 'claim::ffffffff-ffff-ffff-ffff-ffffffffffff'
try:
json.loads(await bcobag.process_post({
'type': 'proof-request-by-referent',
'data': {
'schemata': list_schemata([s_key]),
'referents': [
bc_non_referent
],
'requested-attrs': []
}
}))
assert False
except ValueError:
pass
# 12. SRI agent (as Verifier) verifies proof (by referent)
rc_json = await sag.process_post({
'type': 'verification-request',
'data': bc_proof_resp
})
print('\n\n== 12 == SRI agent verifies BC proof by referent={} as: {}'.format(
bc_referent,
ppjson(rc_json)))
assert json.loads(rc_json)
# 13. BC Org Book agent (as HolderProver) finds claims by predicate on default attr-match, req-attrs w/schema
claims_found_pred = json.loads(await bcobag.process_post({
'type': 'claim-request',
'data': {
'schemata': list_schemata([S_KEY['BC']]),
'claim-filter': {
'attr-match': [],
'pred-match': [
pred_match(
S_KEY['BC'],
[
pred_match_match('id', '>=', claim_data[S_KEY['BC']][2]['id'])
])
],
},
'requested-attrs': [req_attrs(S_KEY['BC'])]
}
}))
assert (set(req_attr['name'] for req_attr in claims_found_pred['proof-req']['requested_attrs'].values()) ==
set(schema_data[S_KEY['BC']]['attr_names']) - {'id'})
assert (set(req_pred['attr_name']
for req_pred in claims_found_pred['proof-req']['requested_predicates'].values()) == {'id'})
# 14. BC Org Book agent (as HolderProver) finds claims by predicate on default attr-match and req-attrs
claims_found_pred = json.loads(await bcobag.process_post({
'type': 'claim-request',
'data': {
'schemata': list_schemata([S_KEY['BC']]),
'claim-filter': {
'attr-match': [],
'pred-match': [
pred_match(
S_KEY['BC'],
[
pred_match_match('id', '>=', claim_data[S_KEY['BC']][2]['id'])
])
],
},
'requested-attrs': []
}
}))
assert (set(req_attr['name'] for req_attr in claims_found_pred['proof-req']['requested_attrs'].values()) ==
set(schema_data[S_KEY['BC']]['attr_names']) - {'id'})
assert (set(req_pred['attr_name']
for req_pred in claims_found_pred['proof-req']['requested_predicates'].values()) == {'id'})
print('\n\n== 13 == BC claims structure by predicate: {}'.format(ppjson(claims_found_pred)))
bc_display_pred = claims_for(claims_found_pred['claims'])
print('\n\n== 14 == BC display claims by predicate: {}'.format(ppjson(bc_display_pred)))
assert len(bc_display_pred) == 1
# 15. BC Org Book agent (as HolderProver) creates proof by predicate, default req-attrs
bc_proof_resp_pred = json.loads(await bcobag.process_post({
'type': 'proof-request',
'data': {
'schemata': list_schemata([S_KEY['BC']]),
'claim-filter': {
'attr-match': [],
'pred-match': [
pred_match(
S_KEY['BC'],
[
pred_match_match('id', '>=', 2),
pred_match_match('orgTypeId', '>=', 2)
]) # resolves to one claim
]
},
'requested-attrs': []
}
}))
print('\n\n== 15 == BC proof by predicates id, orgTypeId >= 2: {}'.format(ppjson(bc_proof_resp_pred)))
revealed = revealed_attrs(bc_proof_resp_pred['proof'])
print('\n\n== 16 == BC proof revealed attrs by predicates id, orgTypeId >= 2: {}'.format(ppjson(revealed)))
assert len(revealed) == 1
assert (set(revealed[set(revealed.keys()).pop()].keys()) ==
set(schema_data[S_KEY['BC']]['attr_names']) - set(('id', 'orgTypeId')))
# 16. SRI agent (as Verifier) verifies proof (by predicates)
rc_json = await sag.process_post({
'type': 'verification-request',
'data': bc_proof_resp_pred
})
print('\n\n== 17 == SRI agent verifies BC proof by predicates id, orgTypeId >= 2 as: {}'.format(ppjson(rc_json)))
assert json.loads(rc_json)
# 17. Create and store SRI registration completion claims, green claims from verified proof + extra data
revealed = revealed_attrs(bc_proof_resp['proof'])[bc_referent]
claim_data[S_KEY['SRI-1.0']].append({
**{k: revealed[k] for k in revealed if k in schema_data[S_KEY['SRI-1.0']]['attr_names']},
'sriRegDate': datetime.date.today().strftime('%Y-%m-%d')
})
claim_data[S_KEY['SRI-1.1']].append({
**{k: revealed[k] for k in revealed if k in schema_data[S_KEY['SRI-1.1']]['attr_names']},
'sriRegDate': datetime.date.today().strftime('%Y-%m-%d'),
'businessLang': 'EN-CA'
})
claim_data[S_KEY['GREEN']].append({
**{k: revealed[k] for k in revealed if k in schema_data[S_KEY['GREEN']]['attr_names']},
'greenLevel': 'Silver',
'auditDate': datetime.date.today().strftime('%Y-%m-%d')
})
i = 0
for s_key in claim_data:
if s_key == S_KEY['BC']:
continue
for c in claim_data[s_key]:
print('\n\n== 18.{} == Data for SRI claim on [{} v{}]: {}'.format(
i,
s_key.name,
s_key.version,
ppjson(c)))
claim_json[s_key] = await did2ag[s_key.origin_did].process_post({
'type': 'claim-create',
'data': {
'claim-req': claim_req[s_key],
'claim-attrs': c
}
})
claim[s_key] = json.loads(claim_json[s_key])
assert claim[s_key]
await holder_prover[s_key.origin_did].process_post({
'type': 'claim-store',
'data': {
'claim': claim[s_key]
}
})
i += 1
# 18. PSPC Org Book agent (as HolderProver) finds all claims, one schema at a time
i = 0
for s_key in schema:
if s_key == S_KEY['BC']:
continue
sri_claim = json.loads(await holder_prover[s_key.origin_did].process_post({
'type': 'claim-request',
'data': {
'schemata': list_schemata([s_key]),
'claim-filter': {
'attr-match': [],
'pred-match': []
},
'requested-attrs': []
}
}))
print('\n\n== 19.{}.0 == SRI claims on [{} v{}], no filter: {}'.format(
i,
s_key.name,
s_key.version,
ppjson(sri_claim)))
assert len(sri_claim['claims']['attrs']) == (len(schema_data[s_key]['attr_names']))
sri_claim = json.loads(await holder_prover[s_key.origin_did].process_post({
'type': 'claim-request',
'data': {
'schemata': [],
'claim-filter': {
'attr-match': [attr_match(s_key)],
'pred-match': []
},
'requested-attrs': []
}
}))
print('\n\n== 19.{}.1 == SRI claims, filter for all attrs in schema [{} v{}]: {}'.format(
i,
s_key.name,
s_key.version,
ppjson(sri_claim)))
i += 1
assert len(sri_claim['claims']['attrs']) == (len(schema_data[s_key]['attr_names']))
# 19. PSPC Org Book agent (as HolderProver) finds all claims, for all schemata, on first attr per schema
sri_claims_all = json.loads(await pspcobag.process_post({
'type': 'claim-request',
'data': {
'schemata': list_schemata([s_key for s_key in schema_data if s_key != S_KEY['BC']]),
'claim-filter': {
'attr-match': [],
'pred-match': [
]
},
'requested-attrs': [req_attrs(s_key, [schema_data[s_key]['attr_names'][0]])
for s_key in schema_data if s_key != S_KEY['BC']]
}
}))
print('\n\n== 20 == All SRI claims at PSPC Org Book, first attr only: {}'.format(ppjson(sri_claims_all)))
assert len(sri_claims_all['claims']['attrs']) == (len(schema_data) - 1) # all schema_data except BC
# 20. PSPC Org Book agent (as HolderProver) finds all claims on all schemata at once
sri_claims_all = json.loads(await pspcobag.process_post({
'type': 'claim-request',
'data': {
'schemata': list_schemata([s_key for s_key in schema_data if s_key != S_KEY['BC']]),
'claim-filter': {
'attr-match': [],
'pred-match': []
},
'requested-attrs': []
}
}))
print('\n\n== 21 == All SRI claims at PSPC Org Book: {}'.format(ppjson(sri_claims_all)))
sri_display = claims_for(sri_claims_all['claims'])
print('\n\n== 22 == All SRI claims at PSPC Org Book by referent: {}'.format(ppjson(sri_display)))
# 21. PSPC Org Book agent (as HolderProver) creates (multi-claim) proof
sri_proof_resp = json.loads(await pspcobag.process_post({
'type': 'proof-request',
'data': {
'schemata': list_schemata([s_key for s_key in schema_data if s_key != S_KEY['BC']]),
'claim-filter': {
'attr-match': [],
'pred-match': []
},
'requested-attrs': []
}
}))
print('\n\n== 23 == PSPC org book proof to all-claims response: {}'.format(ppjson(sri_proof_resp)))
assert len(sri_proof_resp['proof']['proof']['proofs']) == len(sri_display)
# 22. SRI agent (as Verifier) verifies proof
rc_json = await sag.process_post({
'type': 'verification-request',
'data': sri_proof_resp
})
print('\n\n== 24 == SRI agent verifies PSPC org book proof as: {}'.format(ppjson(rc_json)))
assert json.loads(rc_json)
# 23. PSPC Org Book agent (as HolderProver) creates (multi-claim) proof by referent
referent2schema_key = schema_keys_for(sri_claims_all['claims'], {k for k in sri_display})
sri_proof_resp = json.loads(await pspcobag.process_post({
'type': 'proof-request-by-referent',
'data': {
'schemata': list_schemata([referent2schema_key[referent] for referent in sri_display]),
'referents': [
referent for referent in sri_display
],
'requested-attrs': []
}
}))
print('\n\n== 25 == PSPC org book proof to all-claims on referents {}: {}'.format(
[referent for referent in sri_display],
ppjson(sri_proof_resp)))
assert len(sri_proof_resp['proof']['proof']['proofs']) == len(sri_display)
# 24. SRI agent (as Verifier) verifies proof
rc_json = await sag.process_post({
'type': 'verification-request',
'data': sri_proof_resp
})
print('\n\n== 26 == SRI agent verifies PSPC org book proof as: {}'.format(ppjson(rc_json)))
assert json.loads(rc_json)
# 25. PSPC Org Book agent (as HolderProver) creates multi-claim proof, schemata implicit, first attrs only
sri_proof_resp = json.loads(await pspcobag.process_post({
'type': 'proof-request-by-referent',
'data': {
'schemata': [],
'referents': [
referent for referent in sri_display
],
'requested-attrs': [req_attrs(s_key, [schema_data[s_key]['attr_names'][0]])
for s_key in schema_data if s_key != S_KEY['BC']]
}
}))
print('\n\n== 27 == PSPC org book proof to all claims by referent, first attrs, schemata implicit {}: {}'
.format(
[referent for referent in sri_display],
ppjson(sri_proof_resp)))
assert {sri_proof_resp['proof-req']['requested_attrs'][k]['name']
for k in sri_proof_resp['proof-req']['requested_attrs']} == {
schema_data[s_key]['attr_names'][0] for s_key in schema_data if s_key != S_KEY['BC']}
# 26. SRI agent (as Verifier) verifies proof
rc_json = await sag.process_post({
'type': 'verification-request',
'data': sri_proof_resp
})
print('\n\n== 28 == SRI agent verifies PSPC org book proof as: {}'.format(ppjson(rc_json)))
assert json.loads(rc_json)
# 27. PSPC Org Book agent (as HolderProver) creates proof on req-attrs for all green schema attrs
sri_proof_resp = json.loads(await pspcobag.process_post({
'type': 'proof-request',
'data': {
'schemata': [],
'claim-filter': {
'attr-match': [],
'pred-match': []
},
'requested-attrs': [req_attrs(S_KEY['GREEN'])]
}
}))
print('\n\n== 29 == PSPC org book proof to green claims response: {}'.format(ppjson(sri_proof_resp)))
assert {sri_proof_resp['proof-req']['requested_attrs'][k]['name']
for k in sri_proof_resp['proof-req']['requested_attrs']} == set(schema_data[S_KEY['GREEN']]['attr_names'])
# 28. SRI agent (as Verifier) verifies proof
rc_json = await sag.process_post({
'type': 'verification-request',
'data': sri_proof_resp
})
print('\n\n== 30 == SRI agent verifies PSPC Org Book proof as: {}'.format(ppjson(rc_json)))
assert json.loads(rc_json)
# 29. Exercise helper GET calls
txn_json = await sag.process_get_txn(schema[S_KEY['GREEN']]['seqNo'])
print('\n\n== 31 == GREEN schema by txn #{}: {}'.format(schema[S_KEY['GREEN']]['seqNo'], ppjson(txn_json)))
assert json.loads(txn_json)
txn_json = await sag.process_get_txn(99999) # ought not exist
assert not json.loads(txn_json)
did_json = await bcrag.process_get_did()
print('\n\n== 32 == BC Registrar agent did: {}'.format(ppjson(did_json)))
assert json.loads(did_json)
| 40.020752
| 121
| 0.512931
|
a14a0ab9818a1f58585089b02b57135639242f55
| 6,269
|
py
|
Python
|
loldib/getratings/models/NA/na_ekko/na_ekko_sup.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_ekko/na_ekko_sup.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_ekko/na_ekko_sup.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
from getratings.models.ratings import Ratings
class NA_Ekko_Sup_Aatrox(Ratings):
pass
class NA_Ekko_Sup_Ahri(Ratings):
pass
class NA_Ekko_Sup_Akali(Ratings):
pass
class NA_Ekko_Sup_Alistar(Ratings):
pass
class NA_Ekko_Sup_Amumu(Ratings):
pass
class NA_Ekko_Sup_Anivia(Ratings):
pass
class NA_Ekko_Sup_Annie(Ratings):
pass
class NA_Ekko_Sup_Ashe(Ratings):
pass
class NA_Ekko_Sup_AurelionSol(Ratings):
pass
class NA_Ekko_Sup_Azir(Ratings):
pass
class NA_Ekko_Sup_Bard(Ratings):
pass
class NA_Ekko_Sup_Blitzcrank(Ratings):
pass
class NA_Ekko_Sup_Brand(Ratings):
pass
class NA_Ekko_Sup_Braum(Ratings):
pass
class NA_Ekko_Sup_Caitlyn(Ratings):
pass
class NA_Ekko_Sup_Camille(Ratings):
pass
class NA_Ekko_Sup_Cassiopeia(Ratings):
pass
class NA_Ekko_Sup_Chogath(Ratings):
pass
class NA_Ekko_Sup_Corki(Ratings):
pass
class NA_Ekko_Sup_Darius(Ratings):
pass
class NA_Ekko_Sup_Diana(Ratings):
pass
class NA_Ekko_Sup_Draven(Ratings):
pass
class NA_Ekko_Sup_DrMundo(Ratings):
pass
class NA_Ekko_Sup_Ekko(Ratings):
pass
class NA_Ekko_Sup_Elise(Ratings):
pass
class NA_Ekko_Sup_Evelynn(Ratings):
pass
class NA_Ekko_Sup_Ezreal(Ratings):
pass
class NA_Ekko_Sup_Fiddlesticks(Ratings):
pass
class NA_Ekko_Sup_Fiora(Ratings):
pass
class NA_Ekko_Sup_Fizz(Ratings):
pass
class NA_Ekko_Sup_Galio(Ratings):
pass
class NA_Ekko_Sup_Gangplank(Ratings):
pass
class NA_Ekko_Sup_Garen(Ratings):
pass
class NA_Ekko_Sup_Gnar(Ratings):
pass
class NA_Ekko_Sup_Gragas(Ratings):
pass
class NA_Ekko_Sup_Graves(Ratings):
pass
class NA_Ekko_Sup_Hecarim(Ratings):
pass
class NA_Ekko_Sup_Heimerdinger(Ratings):
pass
class NA_Ekko_Sup_Illaoi(Ratings):
pass
class NA_Ekko_Sup_Irelia(Ratings):
pass
class NA_Ekko_Sup_Ivern(Ratings):
pass
class NA_Ekko_Sup_Janna(Ratings):
pass
class NA_Ekko_Sup_JarvanIV(Ratings):
pass
class NA_Ekko_Sup_Jax(Ratings):
pass
class NA_Ekko_Sup_Jayce(Ratings):
pass
class NA_Ekko_Sup_Jhin(Ratings):
pass
class NA_Ekko_Sup_Jinx(Ratings):
pass
class NA_Ekko_Sup_Kalista(Ratings):
pass
class NA_Ekko_Sup_Karma(Ratings):
pass
class NA_Ekko_Sup_Karthus(Ratings):
pass
class NA_Ekko_Sup_Kassadin(Ratings):
pass
class NA_Ekko_Sup_Katarina(Ratings):
pass
class NA_Ekko_Sup_Kayle(Ratings):
pass
class NA_Ekko_Sup_Kayn(Ratings):
pass
class NA_Ekko_Sup_Kennen(Ratings):
pass
class NA_Ekko_Sup_Khazix(Ratings):
pass
class NA_Ekko_Sup_Kindred(Ratings):
pass
class NA_Ekko_Sup_Kled(Ratings):
pass
class NA_Ekko_Sup_KogMaw(Ratings):
pass
class NA_Ekko_Sup_Leblanc(Ratings):
pass
class NA_Ekko_Sup_LeeSin(Ratings):
pass
class NA_Ekko_Sup_Leona(Ratings):
pass
class NA_Ekko_Sup_Lissandra(Ratings):
pass
class NA_Ekko_Sup_Lucian(Ratings):
pass
class NA_Ekko_Sup_Lulu(Ratings):
pass
class NA_Ekko_Sup_Lux(Ratings):
pass
class NA_Ekko_Sup_Malphite(Ratings):
pass
class NA_Ekko_Sup_Malzahar(Ratings):
pass
class NA_Ekko_Sup_Maokai(Ratings):
pass
class NA_Ekko_Sup_MasterYi(Ratings):
pass
class NA_Ekko_Sup_MissFortune(Ratings):
pass
class NA_Ekko_Sup_MonkeyKing(Ratings):
pass
class NA_Ekko_Sup_Mordekaiser(Ratings):
pass
class NA_Ekko_Sup_Morgana(Ratings):
pass
class NA_Ekko_Sup_Nami(Ratings):
pass
class NA_Ekko_Sup_Nasus(Ratings):
pass
class NA_Ekko_Sup_Nautilus(Ratings):
pass
class NA_Ekko_Sup_Nidalee(Ratings):
pass
class NA_Ekko_Sup_Nocturne(Ratings):
pass
class NA_Ekko_Sup_Nunu(Ratings):
pass
class NA_Ekko_Sup_Olaf(Ratings):
pass
class NA_Ekko_Sup_Orianna(Ratings):
pass
class NA_Ekko_Sup_Ornn(Ratings):
pass
class NA_Ekko_Sup_Pantheon(Ratings):
pass
class NA_Ekko_Sup_Poppy(Ratings):
pass
class NA_Ekko_Sup_Quinn(Ratings):
pass
class NA_Ekko_Sup_Rakan(Ratings):
pass
class NA_Ekko_Sup_Rammus(Ratings):
pass
class NA_Ekko_Sup_RekSai(Ratings):
pass
class NA_Ekko_Sup_Renekton(Ratings):
pass
class NA_Ekko_Sup_Rengar(Ratings):
pass
class NA_Ekko_Sup_Riven(Ratings):
pass
class NA_Ekko_Sup_Rumble(Ratings):
pass
class NA_Ekko_Sup_Ryze(Ratings):
pass
class NA_Ekko_Sup_Sejuani(Ratings):
pass
class NA_Ekko_Sup_Shaco(Ratings):
pass
class NA_Ekko_Sup_Shen(Ratings):
pass
class NA_Ekko_Sup_Shyvana(Ratings):
pass
class NA_Ekko_Sup_Singed(Ratings):
pass
class NA_Ekko_Sup_Sion(Ratings):
pass
class NA_Ekko_Sup_Sivir(Ratings):
pass
class NA_Ekko_Sup_Skarner(Ratings):
pass
class NA_Ekko_Sup_Sona(Ratings):
pass
class NA_Ekko_Sup_Soraka(Ratings):
pass
class NA_Ekko_Sup_Swain(Ratings):
pass
class NA_Ekko_Sup_Syndra(Ratings):
pass
class NA_Ekko_Sup_TahmKench(Ratings):
pass
class NA_Ekko_Sup_Taliyah(Ratings):
pass
class NA_Ekko_Sup_Talon(Ratings):
pass
class NA_Ekko_Sup_Taric(Ratings):
pass
class NA_Ekko_Sup_Teemo(Ratings):
pass
class NA_Ekko_Sup_Thresh(Ratings):
pass
class NA_Ekko_Sup_Tristana(Ratings):
pass
class NA_Ekko_Sup_Trundle(Ratings):
pass
class NA_Ekko_Sup_Tryndamere(Ratings):
pass
class NA_Ekko_Sup_TwistedFate(Ratings):
pass
class NA_Ekko_Sup_Twitch(Ratings):
pass
class NA_Ekko_Sup_Udyr(Ratings):
pass
class NA_Ekko_Sup_Urgot(Ratings):
pass
class NA_Ekko_Sup_Varus(Ratings):
pass
class NA_Ekko_Sup_Vayne(Ratings):
pass
class NA_Ekko_Sup_Veigar(Ratings):
pass
class NA_Ekko_Sup_Velkoz(Ratings):
pass
class NA_Ekko_Sup_Vi(Ratings):
pass
class NA_Ekko_Sup_Viktor(Ratings):
pass
class NA_Ekko_Sup_Vladimir(Ratings):
pass
class NA_Ekko_Sup_Volibear(Ratings):
pass
class NA_Ekko_Sup_Warwick(Ratings):
pass
class NA_Ekko_Sup_Xayah(Ratings):
pass
class NA_Ekko_Sup_Xerath(Ratings):
pass
class NA_Ekko_Sup_XinZhao(Ratings):
pass
class NA_Ekko_Sup_Yasuo(Ratings):
pass
class NA_Ekko_Sup_Yorick(Ratings):
pass
class NA_Ekko_Sup_Zac(Ratings):
pass
class NA_Ekko_Sup_Zed(Ratings):
pass
class NA_Ekko_Sup_Ziggs(Ratings):
pass
class NA_Ekko_Sup_Zilean(Ratings):
pass
class NA_Ekko_Sup_Zyra(Ratings):
pass
| 15.033573
| 46
| 0.75642
|
78f3244d0e103c22a49cfd4f4edda02c4848f4d9
| 1,255
|
py
|
Python
|
ubelt/util_func.py
|
russelldj/ubelt
|
c82ae8c8ead66f0c406d28d680430f7df00bb32b
|
[
"Apache-2.0"
] | null | null | null |
ubelt/util_func.py
|
russelldj/ubelt
|
c82ae8c8ead66f0c406d28d680430f7df00bb32b
|
[
"Apache-2.0"
] | null | null | null |
ubelt/util_func.py
|
russelldj/ubelt
|
c82ae8c8ead66f0c406d28d680430f7df00bb32b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Helpers for functional programming
"""
def identity(arg):
"""
The identity function. Simply returns its inputs.
Example:
>>> assert identity(42) == 42
"""
return arg
def inject_method(self, func, name=None):
"""
Injects a function into an object instance as a bound method
Args:
self (object): instance to inject a function into
func (func): the function to inject (must contain an arg for self)
name (str): name of the method. optional. If not specified the name
of the function is used.
Example:
>>> class Foo(object):
>>> def bar(self):
>>> return 'bar'
>>> def baz(self):
>>> return 'baz'
>>> self = Foo()
>>> assert self.bar() == 'bar'
>>> assert not hasattr(self, 'baz')
>>> inject_method(self, baz)
>>> assert not hasattr(Foo, 'baz'), 'should only change one instance'
>>> assert self.baz() == 'baz'
>>> inject_method(self, baz, 'bar')
>>> assert self.bar() == 'baz'
"""
new_method = func.__get__(self, self.__class__)
if name is None:
name = func.__name__
setattr(self, name, new_method)
| 27.282609
| 77
| 0.553785
|
26fd690cf91c1d2a5b40382a82e506fe20d3420d
| 161
|
py
|
Python
|
backend/quiz/quizapp/admin.py
|
Qwizi/Quiz-App
|
4eee0320363cd268724826c08bc902dea8cc557a
|
[
"MIT"
] | 1
|
2019-09-20T17:41:45.000Z
|
2019-09-20T17:41:45.000Z
|
backend/quiz/quizapp/admin.py
|
Qwizi/Quiz-App
|
4eee0320363cd268724826c08bc902dea8cc557a
|
[
"MIT"
] | null | null | null |
backend/quiz/quizapp/admin.py
|
Qwizi/Quiz-App
|
4eee0320363cd268724826c08bc902dea8cc557a
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Quiz, Answer, Question
admin.site.register(Answer)
admin.site.register(Question)
admin.site.register(Quiz)
| 23
| 42
| 0.813665
|
d99dfca7d2c06e2f9f5527b57af58f023586d846
| 2,184
|
py
|
Python
|
test/unit/reflectivity_ui/interfaces/event_handlers/test_main_handler.py
|
mdoucet/reflectivity_ui
|
aa646e6f8ad91eaedf70ec7b9230e79970e3cdf0
|
[
"Apache-2.0"
] | 1
|
2017-11-19T22:23:13.000Z
|
2017-11-19T22:23:13.000Z
|
test/unit/reflectivity_ui/interfaces/event_handlers/test_main_handler.py
|
mdoucet/reflectivity_ui
|
aa646e6f8ad91eaedf70ec7b9230e79970e3cdf0
|
[
"Apache-2.0"
] | null | null | null |
test/unit/reflectivity_ui/interfaces/event_handlers/test_main_handler.py
|
mdoucet/reflectivity_ui
|
aa646e6f8ad91eaedf70ec7b9230e79970e3cdf0
|
[
"Apache-2.0"
] | null | null | null |
# package imports
from reflectivity_ui.interfaces.main_window import MainWindow
from reflectivity_ui.interfaces.event_handlers.main_handler import MainHandler
# 3rd-party imports
from PyQt5.QtWidgets import QApplication
import pytest
# standard imports
import os
import sys
this_module_path = sys.modules[__name__].__file__
class DataManagerMock(object):
current_directory = os.path.dirname(this_module_path)
class MainWindowMock(object):
ui = None
main_window = None
data_manager = DataManagerMock()
class TestMainHandler(object):
app = QApplication(sys.argv)
application = MainWindow()
handler = MainHandler(application)
def test_congruency_fail_report(self, data_server):
# Selected subset of log names with an invalid one
message = self.handler._congruency_fail_report([data_server.path_to('REF_M_24945_event.nxs'),
data_server.path_to('REF_M_24949_event.nxs')],
log_names=['LambdaRequest', 'NoLog'])
assert message == 'NoLog is not a valid Log for comparison'
# Valid subset of log name
message = self.handler._congruency_fail_report([data_server.path_to('REF_M_24945_event.nxs'),
data_server.path_to('REF_M_24949_event.nxs')],
log_names=['LambdaRequest', 'frequency'])
assert message == ''
# Old files
message = self.handler._congruency_fail_report([data_server.path_to('REF_M_24945_event.nxs'),
data_server.path_to('REF_M_24949_event.nxs')])
assert 'values for log S3Vheight that differ above tolerance 0.01' in message
# New files
message = self.handler._congruency_fail_report([data_server.path_to('REF_M_38198.nxs.h5'),
data_server.path_to('REF_M_38199.nxs.h5')])
assert 'values for log DANGLE that differ above tolerance 0.01' in message
if __name__ == '__main__':
pytest.main([__file__])
| 36.4
| 102
| 0.630952
|
2230a5392223add5d5d7548eda8b1146b046b1bb
| 378
|
py
|
Python
|
week 2/isomorphic.py
|
Dawit-Getachew/A2SV_Practice
|
2fe06d725e0acfe668c6dae98fe3ef6e6e26ef61
|
[
"MIT"
] | null | null | null |
week 2/isomorphic.py
|
Dawit-Getachew/A2SV_Practice
|
2fe06d725e0acfe668c6dae98fe3ef6e6e26ef61
|
[
"MIT"
] | null | null | null |
week 2/isomorphic.py
|
Dawit-Getachew/A2SV_Practice
|
2fe06d725e0acfe668c6dae98fe3ef6e6e26ef61
|
[
"MIT"
] | null | null | null |
class Solution:
def isIsomorphic(self, s: str, t: str) -> bool:
isomorphicS = dict()
isomorphicT= dict()
for a, b in zip(s, t):
if ((a in isomorphicS and isomorphicS[a] != b) or (b in isomorphicT and isomorphicT[b] != a)):
return False
isomorphicS[a] = b
isomorphicT[b] = a
return True
| 37.8
| 106
| 0.521164
|
d0b6e57f182873d133b02e6c08ecaaed2b73fb21
| 646
|
py
|
Python
|
custom_components/auto_backup/const.py
|
kcleong/homeassistant-config
|
15b7bc75f5d1055d8620ced87eed9d563475296d
|
[
"MIT"
] | 138
|
2019-12-09T09:18:42.000Z
|
2022-03-30T20:17:30.000Z
|
custom_components/auto_backup/const.py
|
kcleong/homeassistant-config
|
15b7bc75f5d1055d8620ced87eed9d563475296d
|
[
"MIT"
] | 46
|
2019-12-09T09:59:18.000Z
|
2022-03-25T22:52:00.000Z
|
custom_components/auto_backup/const.py
|
kcleong/homeassistant-config
|
15b7bc75f5d1055d8620ced87eed9d563475296d
|
[
"MIT"
] | 12
|
2020-03-17T18:14:14.000Z
|
2022-01-31T21:55:29.000Z
|
DOMAIN = "auto_backup"
DATA_AUTO_BACKUP = "auto_backup"
UNSUB_LISTENER = "unsub_listener"
CONF_AUTO_PURGE = "auto_purge"
CONF_BACKUP_TIMEOUT = "backup_timeout"
DEFAULT_BACKUP_TIMEOUT_SECONDS = 1200
DEFAULT_BACKUP_TIMEOUT = 20
EVENT_BACKUP_SUCCESSFUL = f"{DOMAIN}.backup_successful"
EVENT_BACKUP_START = f"{DOMAIN}.backup_start"
EVENT_BACKUP_FAILED = f"{DOMAIN}.backup_failed"
EVENT_BACKUPS_PURGED = f"{DOMAIN}.purged_backups"
EVENT_SNAPSHOT_SUCCESSFUL = f"{DOMAIN}.snapshot_successful"
EVENT_SNAPSHOT_START = f"{DOMAIN}.snapshot_start"
EVENT_SNAPSHOT_FAILED = f"{DOMAIN}.snapshot_failed"
EVENT_SNAPSHOTS_PURGED = f"{DOMAIN}.purged_snapshots"
| 32.3
| 59
| 0.823529
|
5bbe02e83e0265bcc2561ee00f963a77d6f557cd
| 25,035
|
py
|
Python
|
venv/Lib/site-packages/supervisor/tests/test_http.py
|
hanxt6/BKframeworkObj
|
91a4869db3d48fd4bb10507acbddd94c7c921091
|
[
"Apache-2.0"
] | 39
|
2016-12-05T14:36:37.000Z
|
2021-07-29T18:22:34.000Z
|
venv/Lib/site-packages/supervisor/tests/test_http.py
|
hanxt6/BKframeworkObj
|
91a4869db3d48fd4bb10507acbddd94c7c921091
|
[
"Apache-2.0"
] | 68
|
2016-12-12T20:38:47.000Z
|
2020-07-26T18:28:49.000Z
|
venv/Lib/site-packages/supervisor/tests/test_http.py
|
wenyueFan/devopsOfBk
|
ab5f53f2296101ecb40f8f1b3eead7aa736d12fa
|
[
"Apache-2.0"
] | 120
|
2016-08-18T14:53:03.000Z
|
2020-06-16T13:27:20.000Z
|
import base64
import os
import socket
import stat
import sys
import tempfile
import unittest
try:
from hashlib import sha1
except ImportError:
from sha import new as sha1
from supervisor.tests.base import DummySupervisor
from supervisor.tests.base import PopulatedDummySupervisor
from supervisor.tests.base import DummyRPCInterfaceFactory
from supervisor.tests.base import DummyPConfig
from supervisor.tests.base import DummyOptions
from supervisor.tests.base import DummyRequest
from supervisor.tests.base import DummyLogger
from supervisor.http import NOT_DONE_YET
class HandlerTests:
def _makeOne(self, supervisord):
return self._getTargetClass()(supervisord)
def test_match(self):
class FakeRequest:
def __init__(self, uri):
self.uri = uri
supervisor = DummySupervisor()
handler = self._makeOne(supervisor)
self.assertEqual(handler.match(FakeRequest(handler.path)), True)
class LogtailHandlerTests(HandlerTests, unittest.TestCase):
def _getTargetClass(self):
from supervisor.http import logtail_handler
return logtail_handler
def test_handle_request_stdout_logfile_none(self):
options = DummyOptions()
pconfig = DummyPConfig(options, 'process1', '/bin/process1', priority=1,
stdout_logfile='/tmp/process1.log')
supervisord = PopulatedDummySupervisor(options, 'process1', pconfig)
handler = self._makeOne(supervisord)
request = DummyRequest('/logtail/process1', None, None, None)
handler.handle_request(request)
self.assertEqual(request._error, 410)
def test_handle_request_stdout_logfile_missing(self):
options = DummyOptions()
pconfig = DummyPConfig(options, 'foo', 'foo', 'it/is/missing')
supervisord = PopulatedDummySupervisor(options, 'foo', pconfig)
handler = self._makeOne(supervisord)
request = DummyRequest('/logtail/foo', None, None, None)
handler.handle_request(request)
self.assertEqual(request._error, 410)
def test_handle_request(self):
f = tempfile.NamedTemporaryFile()
t = f.name
options = DummyOptions()
pconfig = DummyPConfig(options, 'foo', 'foo', stdout_logfile=t)
supervisord = PopulatedDummySupervisor(options, 'foo', pconfig)
handler = self._makeOne(supervisord)
request = DummyRequest('/logtail/foo', None, None, None)
handler.handle_request(request)
self.assertEqual(request._error, None)
from supervisor.medusa import http_date
self.assertEqual(request.headers['Last-Modified'],
http_date.build_http_date(os.stat(t)[stat.ST_MTIME]))
self.assertEqual(request.headers['Content-Type'], 'text/plain')
self.assertEqual(len(request.producers), 1)
self.assertEqual(request._done, True)
class MainLogTailHandlerTests(HandlerTests, unittest.TestCase):
def _getTargetClass(self):
from supervisor.http import mainlogtail_handler
return mainlogtail_handler
def test_handle_request_stdout_logfile_none(self):
supervisor = DummySupervisor()
handler = self._makeOne(supervisor)
request = DummyRequest('/mainlogtail', None, None, None)
handler.handle_request(request)
self.assertEqual(request._error, 410)
def test_handle_request_stdout_logfile_missing(self):
supervisor = DummySupervisor()
supervisor.options.logfile = '/not/there'
request = DummyRequest('/mainlogtail', None, None, None)
handler = self._makeOne(supervisor)
handler.handle_request(request)
self.assertEqual(request._error, 410)
def test_handle_request(self):
supervisor = DummySupervisor()
f = tempfile.NamedTemporaryFile()
t = f.name
supervisor.options.logfile = t
handler = self._makeOne(supervisor)
request = DummyRequest('/mainlogtail', None, None, None)
handler.handle_request(request)
self.assertEqual(request._error, None)
from supervisor.medusa import http_date
self.assertEqual(request.headers['Last-Modified'],
http_date.build_http_date(os.stat(t)[stat.ST_MTIME]))
self.assertEqual(request.headers['Content-Type'], 'text/plain')
self.assertEqual(len(request.producers), 1)
self.assertEqual(request._done, True)
class TailFProducerTests(unittest.TestCase):
def _getTargetClass(self):
from supervisor.http import tail_f_producer
return tail_f_producer
def _makeOne(self, request, filename, head):
return self._getTargetClass()(request, filename, head)
def test_handle_more(self):
request = DummyRequest('/logtail/foo', None, None, None)
from supervisor import http
f = tempfile.NamedTemporaryFile()
f.write('a' * 80)
f.flush()
producer = self._makeOne(request, f.name, 80)
result = producer.more()
self.assertEqual(result, 'a' * 80)
f.write('w' * 100)
f.flush()
result = producer.more()
self.assertEqual(result, 'w' * 100)
result = producer.more()
self.assertEqual(result, http.NOT_DONE_YET)
f.truncate(0)
f.flush()
result = producer.more()
self.assertEqual(result, '==> File truncated <==\n')
def test_handle_more_fd_closed(self):
request = DummyRequest('/logtail/foo', None, None, None)
f = tempfile.NamedTemporaryFile()
f.write('a' * 80)
f.flush()
producer = self._makeOne(request, f.name, 80)
producer.file.close()
result = producer.more()
self.assertEqual(result, producer.more())
def test_handle_more_follow_file_recreated(self):
request = DummyRequest('/logtail/foo', None, None, None)
f = tempfile.NamedTemporaryFile()
f.write('a' * 80)
f.flush()
producer = self._makeOne(request, f.name, 80)
result = producer.more()
self.assertEqual(result, 'a' * 80)
f.close()
f2 = open(f.name, 'w')
try:
f2.write('b' * 80)
f2.close()
result = producer.more()
finally:
os.unlink(f2.name)
self.assertEqual(result, 'b' * 80)
def test_handle_more_follow_file_gone(self):
request = DummyRequest('/logtail/foo', None, None, None)
filename = tempfile.mktemp()
f = open(filename, 'wb')
f.write('a' * 80)
f.close()
try:
producer = self._makeOne(request, f.name, 80)
finally:
os.unlink(f.name)
result = producer.more()
self.assertEqual(result, 'a' * 80)
f = open(filename, 'wb')
f.write('b' * 80)
f.close()
try:
result = producer.more() # should open in new file
self.assertEqual(result, 'b' * 80)
finally:
os.unlink(f.name)
class DeferringChunkedProducerTests(unittest.TestCase):
def _getTargetClass(self):
from supervisor.http import deferring_chunked_producer
return deferring_chunked_producer
def _makeOne(self, producer, footers=None):
return self._getTargetClass()(producer, footers)
def test_more_not_done_yet(self):
wrapped = DummyProducer(NOT_DONE_YET)
producer = self._makeOne(wrapped)
self.assertEqual(producer.more(), NOT_DONE_YET)
def test_more_string(self):
wrapped = DummyProducer('hello')
producer = self._makeOne(wrapped)
self.assertEqual(producer.more(), '5\r\nhello\r\n')
def test_more_nodata(self):
wrapped = DummyProducer()
producer = self._makeOne(wrapped, footers=['a', 'b'])
self.assertEqual(producer.more(), '0\r\na\r\nb\r\n\r\n')
def test_more_nodata_footers(self):
wrapped = DummyProducer('')
producer = self._makeOne(wrapped, footers=['a', 'b'])
self.assertEqual(producer.more(), '0\r\na\r\nb\r\n\r\n')
def test_more_nodata_nofooters(self):
wrapped = DummyProducer('')
producer = self._makeOne(wrapped)
self.assertEqual(producer.more(), '0\r\n\r\n')
def test_more_noproducer(self):
producer = self._makeOne(None)
self.assertEqual(producer.more(), '')
class DeferringCompositeProducerTests(unittest.TestCase):
def _getTargetClass(self):
from supervisor.http import deferring_composite_producer
return deferring_composite_producer
def _makeOne(self, producers):
return self._getTargetClass()(producers)
def test_more_not_done_yet(self):
wrapped = DummyProducer(NOT_DONE_YET)
producer = self._makeOne([wrapped])
self.assertEqual(producer.more(), NOT_DONE_YET)
def test_more_string(self):
wrapped1 = DummyProducer('hello')
wrapped2 = DummyProducer('goodbye')
producer = self._makeOne([wrapped1, wrapped2])
self.assertEqual(producer.more(), 'hello')
self.assertEqual(producer.more(), 'goodbye')
self.assertEqual(producer.more(), '')
def test_more_nodata(self):
wrapped = DummyProducer()
producer = self._makeOne([wrapped])
self.assertEqual(producer.more(), '')
class DeferringGlobbingProducerTests(unittest.TestCase):
def _getTargetClass(self):
from supervisor.http import deferring_globbing_producer
return deferring_globbing_producer
def _makeOne(self, producer, buffer_size=1<<16):
return self._getTargetClass()(producer, buffer_size)
def test_more_not_done_yet(self):
wrapped = DummyProducer(NOT_DONE_YET)
producer = self._makeOne(wrapped)
self.assertEqual(producer.more(), NOT_DONE_YET)
def test_more_string(self):
wrapped = DummyProducer('hello', 'there', 'guy')
producer = self._makeOne(wrapped, buffer_size=1)
self.assertEqual(producer.more(), 'hello')
wrapped = DummyProducer('hello', 'there', 'guy')
producer = self._makeOne(wrapped, buffer_size=50)
self.assertEqual(producer.more(), 'hellothereguy')
def test_more_nodata(self):
wrapped = DummyProducer()
producer = self._makeOne(wrapped)
self.assertEqual(producer.more(), '')
class DeferringHookedProducerTests(unittest.TestCase):
def _getTargetClass(self):
from supervisor.http import deferring_hooked_producer
return deferring_hooked_producer
def _makeOne(self, producer, function):
return self._getTargetClass()(producer, function)
def test_more_not_done_yet(self):
wrapped = DummyProducer(NOT_DONE_YET)
producer = self._makeOne(wrapped, None)
self.assertEqual(producer.more(), NOT_DONE_YET)
def test_more_string(self):
wrapped = DummyProducer('hello')
L = []
def callback(bytes):
L.append(bytes)
producer = self._makeOne(wrapped, callback)
self.assertEqual(producer.more(), 'hello')
self.assertEqual(L, [])
producer.more()
self.assertEqual(L, [5])
def test_more_nodata(self):
wrapped = DummyProducer()
L = []
def callback(bytes):
L.append(bytes)
producer = self._makeOne(wrapped, callback)
self.assertEqual(producer.more(), '')
self.assertEqual(L, [0])
def test_more_noproducer(self):
producer = self._makeOne(None, None)
self.assertEqual(producer.more(), '')
class DeferringHttpRequestTests(unittest.TestCase):
def _getTargetClass(self):
from supervisor.http import deferring_http_request
return deferring_http_request
def _makeOne(
self,
channel=None,
req='GET / HTTP/1.0',
command='GET',
uri='/',
version='1.0',
header=(),
):
return self._getTargetClass()(
channel, req, command, uri, version, header
)
def _makeChannel(self):
class Channel:
closed = False
def close_when_done(self):
self.closed = True
def push_with_producer(self, producer):
self.producer = producer
return Channel()
def test_done_http_10_nokeepalive(self):
channel = self._makeChannel()
inst = self._makeOne(channel=channel, version='1.0')
inst.done()
self.assertTrue(channel.closed)
def test_done_http_10_keepalive_no_content_length(self):
channel = self._makeChannel()
inst = self._makeOne(
channel=channel,
version='1.0',
header=['Connection: Keep-Alive'],
)
inst.done()
self.assertTrue(channel.closed)
def test_done_http_10_keepalive_and_content_length(self):
channel = self._makeChannel()
inst = self._makeOne(
channel=channel,
version='1.0',
header=['Connection: Keep-Alive'],
)
inst.reply_headers['Content-Length'] = 1
inst.done()
self.assertEqual(inst['Connection'], 'Keep-Alive')
self.assertFalse(channel.closed)
def test_done_http_11_connection_close(self):
channel = self._makeChannel()
inst = self._makeOne(
channel=channel,
version='1.1',
header=['Connection: close']
)
inst.done()
self.assertTrue(channel.closed)
def test_done_http_11_unknown_transfer_encoding(self):
channel = self._makeChannel()
inst = self._makeOne(
channel=channel,
version='1.1',
)
inst.reply_headers['Transfer-Encoding'] = 'notchunked'
inst.done()
self.assertTrue(channel.closed)
def test_done_http_11_chunked_transfer_encoding(self):
channel = self._makeChannel()
inst = self._makeOne(
channel=channel,
version='1.1',
)
inst.reply_headers['Transfer-Encoding'] = 'chunked'
inst.done()
self.assertFalse(channel.closed)
def test_done_http_11_use_chunked(self):
channel = self._makeChannel()
inst = self._makeOne(
channel=channel,
version='1.1',
)
inst.use_chunked = True
inst.done()
self.assertTrue('Transfer-Encoding' in inst)
self.assertFalse(channel.closed)
def test_done_http_11_wo_content_length_no_te_no_use_chunked_close(self):
channel = self._makeChannel()
inst = self._makeOne(
channel=channel,
version='1.1',
)
inst.use_chunked = False
inst.done()
self.assertTrue(channel.closed)
def test_done_http_09(self):
channel = self._makeChannel()
inst = self._makeOne(
channel=channel,
version=None,
)
inst.done()
self.assertTrue(channel.closed)
class DeferringHttpChannelTests(unittest.TestCase):
def _getTargetClass(self):
from supervisor.http import deferring_http_channel
return deferring_http_channel
def _makeOne(self):
return self._getTargetClass()(
server=None,
conn=None,
addr=None
)
def test_defaults_delay_and_last_writable_check_time(self):
channel = self._makeOne()
self.assertEqual(channel.delay, 0)
self.assertEqual(channel.last_writable_check, 0)
def test_writable_with_delay_is_False_if_elapsed_lt_delay(self):
channel = self._makeOne()
channel.delay = 2
channel.last_writable_check = _NOW
later = _NOW + 1
self.assertFalse(channel.writable(now=later))
self.assertEqual(channel.last_writable_check, _NOW)
def test_writable_with_delay_is_False_if_elapsed_eq_delay(self):
channel = self._makeOne()
channel.delay = 2
channel.last_writable_check = _NOW
later = _NOW + channel.delay
self.assertFalse(channel.writable(now=later))
self.assertEqual(channel.last_writable_check, _NOW)
def test_writable_with_delay_is_True_if_elapsed_gt_delay(self):
channel = self._makeOne()
channel.delay = 2
channel.last_writable_check = _NOW
later = _NOW + channel.delay + 0.1
self.assertTrue(channel.writable(now=later))
self.assertEqual(channel.last_writable_check, later)
def test_writable_with_delay_is_True_if_system_time_goes_backwards(self):
channel = self._makeOne()
channel.delay = 2
channel.last_writable_check = _NOW
later = _NOW - 3600 # last check was in the future
self.assertTrue(channel.writable(now=later))
self.assertEqual(channel.last_writable_check, later)
_NOW = 1470085990
class EncryptedDictionaryAuthorizedTests(unittest.TestCase):
def _getTargetClass(self):
from supervisor.http import encrypted_dictionary_authorizer
return encrypted_dictionary_authorizer
def _makeOne(self, dict):
return self._getTargetClass()(dict)
def test_authorize_baduser(self):
authorizer = self._makeOne({})
self.assertFalse(authorizer.authorize(('foo', 'bar')))
def test_authorize_gooduser_badpassword(self):
authorizer = self._makeOne({'foo':'password'})
self.assertFalse(authorizer.authorize(('foo', 'bar')))
def test_authorize_gooduser_goodpassword(self):
authorizer = self._makeOne({'foo':'password'})
self.assertTrue(authorizer.authorize(('foo', 'password')))
def test_authorize_gooduser_goodpassword_with_colon(self):
authorizer = self._makeOne({'foo':'pass:word'})
self.assertTrue(authorizer.authorize(('foo', 'pass:word')))
def test_authorize_gooduser_badpassword_sha(self):
password = '{SHA}' + sha1('password').hexdigest()
authorizer = self._makeOne({'foo':password})
self.assertFalse(authorizer.authorize(('foo', 'bar')))
def test_authorize_gooduser_goodpassword_sha(self):
password = '{SHA}' + sha1('password').hexdigest()
authorizer = self._makeOne({'foo':password})
self.assertTrue(authorizer.authorize(('foo', 'password')))
class SupervisorAuthHandlerTests(unittest.TestCase):
def _getTargetClass(self):
from supervisor.http import supervisor_auth_handler
return supervisor_auth_handler
def _makeOne(self, dict, handler):
return self._getTargetClass()(dict, handler)
def test_ctor(self):
handler = self._makeOne({'a':1}, None)
from supervisor.http import encrypted_dictionary_authorizer
self.assertEqual(handler.authorizer.__class__,
encrypted_dictionary_authorizer)
def test_handle_request_authorizes_good_credentials(self):
request = DummyRequest('/logtail/process1', None, None, None)
encoded = base64.b64encode("user:password")
request.header = ["Authorization: Basic %s" % encoded]
handler = DummyHandler()
auth_handler = self._makeOne({'user':'password'}, handler)
auth_handler.handle_request(request)
self.assertTrue(handler.handled_request)
def test_handle_request_authorizes_good_password_with_colon(self):
request = DummyRequest('/logtail/process1', None, None, None)
encoded = base64.b64encode("user:pass:word") # password contains colon
request.header = ["Authorization: Basic %s" % encoded]
handler = DummyHandler()
auth_handler = self._makeOne({'user':'pass:word'}, handler)
auth_handler.handle_request(request)
self.assertTrue(handler.handled_request)
def test_handle_request_does_not_authorize_bad_credentials(self):
request = DummyRequest('/logtail/process1', None, None, None)
encoded = base64.b64encode("wrong:wrong")
request.header = ["Authorization: Basic %s" % encoded]
handler = DummyHandler()
auth_handler = self._makeOne({'user':'password'}, handler)
auth_handler.handle_request(request)
self.assertFalse(handler.handled_request)
class LogWrapperTests(unittest.TestCase):
def _getTargetClass(self):
from supervisor.http import LogWrapper
return LogWrapper
def _makeOne(self, logger):
return self._getTargetClass()(logger)
def test_strips_trailing_newlines_from_msgs(self):
logger = DummyLogger()
log_wrapper = self._makeOne(logger)
log_wrapper.log("foo\n")
logdata = logger.data
self.assertEqual(len(logdata), 1)
self.assertEqual(logdata[0], "foo")
def test_logs_msgs_with_error_at_error_level(self):
logger = DummyLogger()
log_wrapper = self._makeOne(logger)
errors = []
logger.error = errors.append
log_wrapper.log("Server Error")
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0], "Server Error")
def test_logs_other_messages_at_trace_level(self):
logger = DummyLogger()
log_wrapper = self._makeOne(logger)
traces = []
logger.trace = traces.append
log_wrapper.log("GET /")
self.assertEqual(len(traces), 1)
self.assertEqual(traces[0], "GET /")
class TopLevelFunctionTests(unittest.TestCase):
def _make_http_servers(self, sconfigs):
options = DummyOptions()
options.server_configs = sconfigs
options.rpcinterface_factories = [('dummy',DummyRPCInterfaceFactory,{})]
supervisord = DummySupervisor()
from supervisor.http import make_http_servers
servers = make_http_servers(options, supervisord)
try:
for config, s in servers:
s.close()
socketfile = config.get('file')
if socketfile is not None:
os.unlink(socketfile)
finally:
from asyncore import socket_map
socket_map.clear()
return servers
def test_make_http_servers_socket_type_error(self):
config = {'family':999, 'host':'localhost', 'port':17735,
'username':None, 'password':None,
'section':'inet_http_server'}
try:
servers = self._make_http_servers([config])
self.fail('nothing raised')
except ValueError, exc:
self.assertEqual(exc.args[0], 'Cannot determine socket type 999')
def test_make_http_servers_noauth(self):
socketfile = tempfile.mktemp()
inet = {'family':socket.AF_INET, 'host':'localhost', 'port':17735,
'username':None, 'password':None, 'section':'inet_http_server'}
unix = {'family':socket.AF_UNIX, 'file':socketfile, 'chmod':int('700', 8),
'chown':(-1, -1), 'username':None, 'password':None,
'section':'unix_http_server'}
servers = self._make_http_servers([inet, unix])
self.assertEqual(len(servers), 2)
inetdata = servers[0]
self.assertEqual(inetdata[0], inet)
server = inetdata[1]
idents = [
'Supervisor XML-RPC Handler',
'Logtail HTTP Request Handler',
'Main Logtail HTTP Request Handler',
'Supervisor Web UI HTTP Request Handler',
'Default HTTP Request Handler'
]
self.assertEqual([x.IDENT for x in server.handlers], idents)
unixdata = servers[1]
self.assertEqual(unixdata[0], unix)
server = unixdata[1]
self.assertEqual([x.IDENT for x in server.handlers], idents)
def test_make_http_servers_withauth(self):
socketfile = tempfile.mktemp()
inet = {'family':socket.AF_INET, 'host':'localhost', 'port':17736,
'username':'username', 'password':'password',
'section':'inet_http_server'}
unix = {'family':socket.AF_UNIX, 'file':socketfile, 'chmod':int('700', 8),
'chown':(-1, -1), 'username':'username', 'password':'password',
'section':'unix_http_server'}
servers = self._make_http_servers([inet, unix])
self.assertEqual(len(servers), 2)
from supervisor.http import supervisor_auth_handler
for config, server in servers:
for handler in server.handlers:
self.assertTrue(isinstance(handler, supervisor_auth_handler),
handler)
class DummyHandler:
def __init__(self):
self.handled_request = False
def handle_request(self, request):
self.handled_request = True
class DummyProducer:
def __init__(self, *data):
self.data = list(data)
def more(self):
if self.data:
return self.data.pop(0)
else:
return ''
def test_suite():
return unittest.findTestCases(sys.modules[__name__])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| 36.388081
| 82
| 0.64258
|
612b42b77321e3f7dde87294b85fa339848bc7b5
| 599
|
py
|
Python
|
src/modules/time/time_parsing.py
|
mhaberler/gribmagic
|
f7a8c3f0653e421ca57f4200ec2e0ba0ce4064e0
|
[
"MIT"
] | 1
|
2021-11-14T06:25:44.000Z
|
2021-11-14T06:25:44.000Z
|
src/modules/time/time_parsing.py
|
mhaberler/gribmagic
|
f7a8c3f0653e421ca57f4200ec2e0ba0ce4064e0
|
[
"MIT"
] | null | null | null |
src/modules/time/time_parsing.py
|
mhaberler/gribmagic
|
f7a8c3f0653e421ca57f4200ec2e0ba0ce4064e0
|
[
"MIT"
] | null | null | null |
""" generic time parsing function """
from datetime import datetime
from dateutil.parser import parse
def convert_iso_timestamp_to_date_time(value: str) -> datetime:
"""
Convert string in ISO8601/RFC3339-format to timezone aware datetime object
Args:
value: string of timestamp in ISO8601/RFC3339-format
Returns: timezone aware datetime object
"""
timestamp = parse(value)
if timestamp.tzinfo is None or \
timestamp.tzinfo.utcoffset(timestamp) is None:
raise ValueError("Timestamp is not timezone aware")
else:
return timestamp
| 29.95
| 78
| 0.707846
|
040f919d4ed1b7f36e21a1566655eef283c7b8d4
| 1,214
|
py
|
Python
|
dvc/dependency/repo.py
|
jaipradeesh/dvc
|
365f70c00d68d3957a7a81b83782fd9f566bbf62
|
[
"Apache-2.0"
] | 2
|
2019-06-23T14:24:48.000Z
|
2019-07-08T12:22:53.000Z
|
dvc/dependency/repo.py
|
jaipradeesh/dvc
|
365f70c00d68d3957a7a81b83782fd9f566bbf62
|
[
"Apache-2.0"
] | null | null | null |
dvc/dependency/repo.py
|
jaipradeesh/dvc
|
365f70c00d68d3957a7a81b83782fd9f566bbf62
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
import os
import copy
from dvc.utils.compat import urlparse
from dvc.external_repo import ExternalRepo
from .local import DependencyLOCAL
class DependencyREPO(DependencyLOCAL):
PARAM_REPO = "repo"
def __init__(self, erepo, stage, *args, **kwargs):
self.erepo = ExternalRepo(stage.repo.dvc_dir, **erepo)
super(DependencyLOCAL, self).__init__(stage, *args, **kwargs)
def _parse_path(self, remote, path):
self.erepo.install(self.repo.cache.local.cache_dir)
out_path = os.path.join(
self.erepo.repo.root_dir, urlparse(path).path.lstrip("/")
)
out, = self.erepo.repo.find_outs_by_path(out_path)
self.info = copy.copy(out.info)
self._erepo_stage = copy.copy(out.stage.path)
return self.REMOTE.path_cls(out.cache_path)
@property
def is_in_repo(self):
return False
def dumpd(self):
ret = super(DependencyLOCAL, self).dumpd()
ret[self.PARAM_REPO] = self.erepo.dumpd()
return ret
def download(self, to, resume=False):
self.erepo.repo.fetch(self._erepo_stage)
to.info = copy.copy(self.info)
to.checkout()
| 27.590909
| 69
| 0.666392
|
e86571ef1be70336c5dea12ccba6017475b7da91
| 398
|
py
|
Python
|
fundraising/migrations/0002_djangohero_approved_booleanfield.py
|
Venus9023/django_for_beginer
|
c12edb9d347f444f9817d0c6e13716284476820c
|
[
"BSD-3-Clause"
] | 1,440
|
2015-01-05T13:06:12.000Z
|
2022-03-30T23:09:24.000Z
|
fundraising/migrations/0002_djangohero_approved_booleanfield.py
|
Venus9023/django_for_beginer
|
c12edb9d347f444f9817d0c6e13716284476820c
|
[
"BSD-3-Clause"
] | 711
|
2015-01-01T19:42:33.000Z
|
2022-03-29T08:36:29.000Z
|
fundraising/migrations/0002_djangohero_approved_booleanfield.py
|
Venus9023/django_for_beginer
|
c12edb9d347f444f9817d0c6e13716284476820c
|
[
"BSD-3-Clause"
] | 887
|
2015-01-01T03:17:20.000Z
|
2022-03-23T09:15:26.000Z
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fundraising', '0001_squashed_0007_inkinddonor'),
]
operations = [
migrations.AlterField(
model_name='djangohero',
name='approved',
field=models.BooleanField(null=True, verbose_name='Name, URL, and Logo approved?'),
),
]
| 24.875
| 95
| 0.620603
|
f6f3a5b7e617adc52ca1228ece282130f88861bc
| 7,654
|
py
|
Python
|
metrics/sacrebleu/sacrebleu.py
|
leondz/datasets
|
4110fb6034f79c5fb470cf1043ff52180e9c63b7
|
[
"Apache-2.0"
] | 3,395
|
2020-05-13T21:16:50.000Z
|
2020-09-10T14:36:50.000Z
|
metrics/sacrebleu/sacrebleu.py
|
leondz/datasets
|
4110fb6034f79c5fb470cf1043ff52180e9c63b7
|
[
"Apache-2.0"
] | 370
|
2020-05-13T21:28:57.000Z
|
2020-09-10T11:03:38.000Z
|
metrics/sacrebleu/sacrebleu.py
|
leondz/datasets
|
4110fb6034f79c5fb470cf1043ff52180e9c63b7
|
[
"Apache-2.0"
] | 258
|
2020-05-15T01:17:09.000Z
|
2020-09-10T12:41:43.000Z
|
# Copyright 2020 The HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" SACREBLEU metric. """
import sacrebleu as scb
from packaging import version
import datasets
_CITATION = """\
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
"""
_DESCRIPTION = """\
SacreBLEU provides hassle-free computation of shareable, comparable, and reproducible BLEU scores.
Inspired by Rico Sennrich's `multi-bleu-detok.perl`, it produces the official WMT scores but works with plain text.
It also knows all the standard test sets and handles downloading, processing, and tokenization for you.
See the [README.md] file at https://github.com/mjpost/sacreBLEU for more information.
"""
_KWARGS_DESCRIPTION = """
Produces BLEU scores along with its sufficient statistics
from a source against one or more references.
Args:
predictions (`list` of `str`): list of translations to score. Each translation should be tokenized into a list of tokens.
references (`list` of `list` of `str`): A list of lists of references. The contents of the first sub-list are the references for the first prediction, the contents of the second sub-list are for the second prediction, etc. Note that there must be the same number of references for each prediction (i.e. all sub-lists must be of the same length).
smooth_method (`str`): The smoothing method to use, defaults to `'exp'`. Possible values are:
- `'none'`: no smoothing
- `'floor'`: increment zero counts
- `'add-k'`: increment num/denom by k for n>1
- `'exp'`: exponential decay
smooth_value (`float`): The smoothing value. Only valid when `smooth_method='floor'` (in which case `smooth_value` defaults to `0.1`) or `smooth_method='add-k'` (in which case `smooth_value` defaults to `1`).
tokenize (`str`): Tokenization method to use for BLEU. If not provided, defaults to `'zh'` for Chinese, `'ja-mecab'` for Japanese and `'13a'` (mteval) otherwise. Possible values are:
- `'none'`: No tokenization.
- `'zh'`: Chinese tokenization.
- `'13a'`: mimics the `mteval-v13a` script from Moses.
- `'intl'`: International tokenization, mimics the `mteval-v14` script from Moses
- `'char'`: Language-agnostic character-level tokenization.
- `'ja-mecab'`: Japanese tokenization. Uses the [MeCab tokenizer](https://pypi.org/project/mecab-python3).
lowercase (`bool`): If `True`, lowercases the input, enabling case-insensitivity. Defaults to `False`.
force (`bool`): If `True`, insists that your tokenized input is actually detokenized. Defaults to `False`.
use_effective_order (`bool`): If `True`, stops including n-gram orders for which precision is 0. This should be `True`, if sentence-level BLEU will be computed. Defaults to `False`.
Returns:
'score': BLEU score,
'counts': Counts,
'totals': Totals,
'precisions': Precisions,
'bp': Brevity penalty,
'sys_len': predictions length,
'ref_len': reference length,
Examples:
Example 1:
>>> predictions = ["hello there general kenobi", "foo bar foobar"]
>>> references = [["hello there general kenobi", "hello there !"], ["foo bar foobar", "foo bar foobar"]]
>>> sacrebleu = datasets.load_metric("sacrebleu")
>>> results = sacrebleu.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
['score', 'counts', 'totals', 'precisions', 'bp', 'sys_len', 'ref_len']
>>> print(round(results["score"], 1))
100.0
Example 2:
>>> predictions = ["hello there general kenobi",
... "on our way to ankh morpork"]
>>> references = [["hello there general kenobi", "hello there !"],
... ["goodbye ankh morpork", "ankh morpork"]]
>>> sacrebleu = datasets.load_metric("sacrebleu")
>>> results = sacrebleu.compute(predictions=predictions,
... references=references)
>>> print(list(results.keys()))
['score', 'counts', 'totals', 'precisions', 'bp', 'sys_len', 'ref_len']
>>> print(round(results["score"], 1))
39.8
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Sacrebleu(datasets.Metric):
def _info(self):
if version.parse(scb.__version__) < version.parse("1.4.12"):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
'You can install it with `pip install "sacrebleu>=1.4.12"`.'
)
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
homepage="https://github.com/mjpost/sacreBLEU",
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Value("string", id="sequence"),
"references": datasets.Sequence(datasets.Value("string", id="sequence"), id="references"),
}
),
codebase_urls=["https://github.com/mjpost/sacreBLEU"],
reference_urls=[
"https://github.com/mjpost/sacreBLEU",
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
],
)
def _compute(
self,
predictions,
references,
smooth_method="exp",
smooth_value=None,
force=False,
lowercase=False,
tokenize=None,
use_effective_order=False,
):
references_per_prediction = len(references[0])
if any(len(refs) != references_per_prediction for refs in references):
raise ValueError("Sacrebleu requires the same number of references for each prediction")
transformed_references = [[refs[i] for refs in references] for i in range(references_per_prediction)]
output = scb.corpus_bleu(
predictions,
transformed_references,
smooth_method=smooth_method,
smooth_value=smooth_value,
force=force,
lowercase=lowercase,
use_effective_order=use_effective_order,
**(dict(tokenize=tokenize) if tokenize else {}),
)
output_dict = {
"score": output.score,
"counts": output.counts,
"totals": output.totals,
"precisions": output.precisions,
"bp": output.bp,
"sys_len": output.sys_len,
"ref_len": output.ref_len,
}
return output_dict
| 46.108434
| 349
| 0.644108
|
09cb55c64d55729dadc4eff4a652c8655e6cf402
| 585
|
py
|
Python
|
cart/migrations/0004_cartitem_buyer.py
|
hpanwar08/greatkart
|
834ff9fabdbb9493f54bcfd5d23505831b4a66d2
|
[
"MIT"
] | null | null | null |
cart/migrations/0004_cartitem_buyer.py
|
hpanwar08/greatkart
|
834ff9fabdbb9493f54bcfd5d23505831b4a66d2
|
[
"MIT"
] | null | null | null |
cart/migrations/0004_cartitem_buyer.py
|
hpanwar08/greatkart
|
834ff9fabdbb9493f54bcfd5d23505831b4a66d2
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2 on 2022-01-27 12:15
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('cart', '0003_alter_cartitem_cart'),
]
operations = [
migrations.AddField(
model_name='cartitem',
name='buyer',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| 26.590909
| 121
| 0.676923
|
3a3e6dc43a0dd023a4dd101df4d942c6fb41eebe
| 1,644
|
py
|
Python
|
orbit.py
|
ReedOnly/planetOrbit
|
a04bb06518e57053a9929eecdbddbd226b04df91
|
[
"MIT"
] | null | null | null |
orbit.py
|
ReedOnly/planetOrbit
|
a04bb06518e57053a9929eecdbddbd226b04df91
|
[
"MIT"
] | null | null | null |
orbit.py
|
ReedOnly/planetOrbit
|
a04bb06518e57053a9929eecdbddbd226b04df91
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import datetime as dt
import time as ti
from astropy import time
from astropy import units as u
from poliastro.neos import neows
from poliastro.examples import molniya
from poliastro.plotting import plot, OrbitPlotter, BODY_COLORS
from poliastro.bodies import Sun, Earth, Mars
from poliastro.twobody import Orbit
date = time.Time("2018-02-07 12:00", scale='utc')
start=dt.datetime(2018, 2, 1, 12, 0)
length=1
days_dt=[dt.datetime(2018, 2, 1, 12, 0)+dt.timedelta(days=1*n) for n in range(length)]
days_as=[time.Time(day, scale='tdb') for day in days_dt]
op = OrbitPlotter(num_points=1000)
r_p = Sun.R + 165 * u.km
r_a = Sun.R + 215 * u.km
a = (r_p + r_a) / 2
roadster=Orbit.from_classical(attractor=Sun,
a=0.9860407221838553 * u.AU,
ecc=0.2799145376150214*u.one,
inc=1.194199764898942*u.deg,
raan=49*u.deg,
argp=286*u.deg,
nu=23*u.deg,
epoch=date)
for date in days_as:
apophis_orbit = neows.orbit_from_name('99942')
spacex = neows.orbit_from_name('-143205')
op.orbits.clear()
earth = Orbit.from_body_ephem(Earth, date)
mars = Orbit.from_body_ephem(Mars, date)
op.plot(earth, label=Earth)
op.plot(mars, label=Mars)
op.plot(roadster, label='Roadster')
op.plot(apophis_orbit, label='Apophis')
op._redraw()
plt.pause(0.01)
input('type to exit')
op.plot(Orbit.from_body_ephem(Mars, time.Time("2018-07-28 12:00", scale='utc')), label=Mars)
| 29.890909
| 92
| 0.623479
|
961145aae08845d9651a3b8f9341c71874140d7f
| 7,924
|
py
|
Python
|
plugins/simpleweapon/simpleweapon.py
|
joeyhome/FPS-kit
|
ff369369059133b937035780214c7dc285806a1b
|
[
"Apache-2.0"
] | 1
|
2019-01-19T22:19:14.000Z
|
2019-01-19T22:19:14.000Z
|
plugins/simpleweapon/simpleweapon.py
|
joeyhome/FPS-kit
|
ff369369059133b937035780214c7dc285806a1b
|
[
"Apache-2.0"
] | null | null | null |
plugins/simpleweapon/simpleweapon.py
|
joeyhome/FPS-kit
|
ff369369059133b937035780214c7dc285806a1b
|
[
"Apache-2.0"
] | 2
|
2018-11-16T23:01:00.000Z
|
2021-10-21T21:54:25.000Z
|
# -*- coding: utf-8 -*-
# Copyright Tom SF Haines
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import posixpath
import random
import math
from bin.shared import ray_cast
from bin.shared import csp
from direct.actor.Actor import Actor
from direct.interval.IntervalGlobal import *
from direct.interval.ActorInterval import ActorInterval
from pandac.PandaModules import *
class SimpleWeapon:
"""Provides a simple weapon system - not very sophisticaed,
but good enough to test shooting things."""
def __init__(self,manager,xml):
self.gunView = render.attachNewNode('gun-view')
self.ray = None
self.reload(manager,xml)
def destroy(self):
self.gunView.removeNode()
if self.ray!=None:
self.ray.destroy()
def reload(self,manager,xml):
# Get the path to load weapons from...
basePath = manager.get('paths').getConfig().find('weapons').get('path')
# Variables to manage the firing state (Used G36 as reference for defaults.)...
bullet = xml.find('bullet')
if bullet!=None:
self.bulletRate = float(bullet.get('rate',1.0/12.5))
self.bulletSpeed = float(bullet.get('speed',920.0))
self.bulletWeight = float(bullet.get('mass',0.004))
else:
self.bulletRate = 1.0/12.5
self.bulletSpeed = 920.0
self.bulletWeight = 0.004
# Determine the weapon meshes path...
self.meshPath = posixpath.join(basePath, xml.find('egg').get('file'))
# Get the camera interface, so we can zoom in when the player aims...
self.camera = manager.get(xml.find('camera').get('plugin'))
# Create our gun node - both the gun and the ray used for shooting track this - allows for gun jitter, kick back etc...
parent = xml.find('parent')
self.gunView.reparentTo(manager.get(parent.get('plugin')).getNode(parent.get('node')))
# Create a ray cast to detect what the player is looking at... and what will be shot...
self.space = manager.get('ode').getSpace()
if self.ray!=None:
self.ray.destroy()
self.ray = OdeRayGeom(100.0)
self.ray.setCategoryBits(BitMask32(0xfffffffe))
self.ray.setCollideBits(BitMask32(0xfffffffe))
# Get all the stuff we need to do the muzzle flash particle effect...
flash = xml.find('muzzle_flash')
self.flashManager = manager.get(flash.get('plugin'))
self.flashEffect = flash.get('effect')
self.flashBone = flash.get('bone') # Will be swapped out for the actual node latter.
self.flashPos = csp.getPos(flash.get('pos'))
# Get all the stuff we need to do the bullet hit sparks effect...
sparks = xml.find('sparks')
self.sparksManager = manager.get(sparks.get('plugin'))
self.sparksEffect = sparks.get('effect')
# Create a quaternion that rotates +ve z to +ve y - used to point it in the weapon direction rather than up...
self.zToY = Quat()
self.zToY.setFromAxisAngle(-90.0,Vec3(1.0,0.0,0.0))
# State for the animation...
self.state = False # False==casual, True==aim.
self.nextState = False
# Firing state...
self.firing = False # True if the trigger is being held.
self.triggerTime = 0.0 # How long the trigger has been held for, so we know when to eject ammo.
# For bullet holes
bh = xml.find('bullet_holes')
if bh != None:
self.bulletHoles = manager.get(bh.get('plugin'))
else:
self.bulletHoles = None
def postInit(self):
for i in self.postReload():
yield i
def postReload(self):
# Load the actor...
self.mesh = Actor(self.meshPath)
yield
# Shader generator makes it shiny, plus we need it in the right places in the render graph...
self.mesh.setShaderAuto()
self.mesh.reparentTo(self.gunView)
self.mesh.hide()
yield
# Set its animation going... except we pause it until needed...
self.nextAni()
self.interval.pause()
# Gun flash requires an exposed bone...
self.flashBone = self.mesh.exposeJoint(None,"modelRoot",self.flashBone)
yield
def gunControl(self,task):
# Update the gun direction ray to follow the players view...
self.ray.setPosition(self.gunView.getPos(render))
self.ray.setQuaternion(self.zToY.multiply(self.gunView.getQuat(render)))
# If the gun is firing update the trigger time, if a bullet is ejected do the maths...
if self.firing:
dt = globalClock.getDt()
self.triggerTime += dt
while self.triggerTime>self.bulletRate:
self.triggerTime -= self.bulletRate
hit,pos,norm = ray_cast.nearestHit(self.space,self.ray)
# Create a muzzle flash effect...
self.flashManager.doEffect(self.flashEffect, self.flashBone, True, self.flashPos)
if hit:
# Create an impact sparks effect...
# Calculate the reflection direction...
rd = self.ray.getDirection()
sparkDir = (norm * (2.0*norm.dot(rd))) - rd
# Convert the reflection direction into a quaternion that will rotate +ve z to the required direction...
try:
ang = -math.acos(sparkDir[2])
except:
print 'Angle problem', sparkDir
ang = 0.0
axis = Vec3(0.0,0.0,1.0).cross(sparkDir)
axis.normalize()
sparkQuat = Quat()
sparkQuat.setFromAxisAngleRad(ang,axis)
# Set it going...
self.sparksManager.doEffect(self.sparksEffect, render, False, pos, sparkQuat)
# Make a bullet hole
if hit.hasBody() and isinstance(hit.getBody().getData(), NodePath):
self.bulletHoles.makeNew(pos, norm, hit.getBody().getData())
else:
self.bulletHoles.makeNew(pos, norm, None)
# Impart some energy on the object...
if hit and hit.hasBody():
body = hit.getBody()
# Calculate the force required to supply the energy the bullet contains to the body...
force = self.bulletWeight*self.bulletSpeed/0.05
# Get the direction of travel of the bullet, multiply by force...
d = self.ray.getDirection()
d *= force
# If the object is asleep awaken it...
if not body.isEnabled():
body.enable()
# Add the force to the object...
body.addForceAtPos(d,pos)
return task.cont
def start(self):
# Make the gun visible...
self.mesh.show()
# Set the gun animation going...
self.interval.finish()
# Weapon task - this primarily makes it shoot...
self.task = taskMgr.add(self.gunControl,'GunControl')
def stop(self):
self.interval.pause()
self.mesh.hide()
taskMgr.remove(self.task)
def nextAni(self):
self.state = self.nextState
if self.state:
ani = random.choice(('aim_wiggle_a','aim_wiggle_b','aim_wiggle_c'))
else:
ani = random.choice(('casual_wiggle_a','casual_wiggle_b','casual_wiggle_c'))
self.mesh.pose(ani,0)
self.interval = Sequence(self.mesh.actorInterval(ani),Func(self.nextAni))
self.interval.start()
def setAiming(self,s):
if self.nextState!=s:
self.interval.pause()
self.nextState = s
self.camera.setZoomed(s)
def wib():
self.interval.finish()
if s: ani = 'casual_aim'
else: ani = 'aim_casual'
transition = Sequence(self.mesh.actorInterval(ani),Func(wib))
transition.start()
def setFiring(self,s):
self.firing = s
if self.firing:
self.triggerTime = 0.0
| 31.951613
| 123
| 0.656108
|
924f377099891230b656c5704b89a82ebbcc0b44
| 2,158
|
py
|
Python
|
bookwyrm/views/list/curate.py
|
mouse-reeve/fedireads
|
e3471fcc3500747a1b1deaaca662021aae5b08d4
|
[
"CC0-1.0"
] | 270
|
2020-01-27T06:06:07.000Z
|
2020-06-21T00:28:18.000Z
|
bookwyrm/views/list/curate.py
|
mouse-reeve/fedireads
|
e3471fcc3500747a1b1deaaca662021aae5b08d4
|
[
"CC0-1.0"
] | 158
|
2020-02-10T20:36:54.000Z
|
2020-06-26T17:12:54.000Z
|
bookwyrm/views/list/curate.py
|
mouse-reeve/fedireads
|
e3471fcc3500747a1b1deaaca662021aae5b08d4
|
[
"CC0-1.0"
] | 15
|
2020-02-13T21:53:33.000Z
|
2020-06-17T16:52:46.000Z
|
""" book list views"""
from django.contrib.auth.decorators import login_required
from django.db.models import Max
from django.shortcuts import get_object_or_404, redirect
from django.template.response import TemplateResponse
from django.utils.decorators import method_decorator
from django.views import View
from bookwyrm import forms, models
from bookwyrm.views.list.list import increment_order_in_reverse
from bookwyrm.views.list.list import normalize_book_list_ordering
# pylint: disable=no-self-use
@method_decorator(login_required, name="dispatch")
class Curate(View):
"""approve or discard list suggestsions"""
def get(self, request, list_id):
"""display a pending list"""
book_list = get_object_or_404(models.List, id=list_id)
book_list.raise_not_editable(request.user)
data = {
"list": book_list,
"pending": book_list.listitem_set.filter(approved=False),
"list_form": forms.ListForm(instance=book_list),
}
return TemplateResponse(request, "lists/curate.html", data)
def post(self, request, list_id):
"""edit a book_list"""
book_list = get_object_or_404(models.List, id=list_id)
book_list.raise_not_editable(request.user)
suggestion = get_object_or_404(models.ListItem, id=request.POST.get("item"))
approved = request.POST.get("approved") == "true"
if approved:
# update the book and set it to be the last in the order of approved books,
# before any pending books
suggestion.approved = True
order_max = (
book_list.listitem_set.filter(approved=True).aggregate(Max("order"))[
"order__max"
]
or 0
) + 1
suggestion.order = order_max
increment_order_in_reverse(book_list.id, order_max)
suggestion.save()
else:
deleted_order = suggestion.order
suggestion.delete(broadcast=False)
normalize_book_list_ordering(book_list.id, start=deleted_order)
return redirect("list-curate", book_list.id)
| 38.535714
| 87
| 0.666358
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.