hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0bd8eba2e360867ae5e0e40a00c03d3eecb7c920
| 170
|
py
|
Python
|
main.py
|
caow2/ML-Project
|
1d4bc2ed26780c787db0a64c2677a1ececc37e1d
|
[
"MIT"
] | null | null | null |
main.py
|
caow2/ML-Project
|
1d4bc2ed26780c787db0a64c2677a1ececc37e1d
|
[
"MIT"
] | null | null | null |
main.py
|
caow2/ML-Project
|
1d4bc2ed26780c787db0a64c2677a1ececc37e1d
|
[
"MIT"
] | null | null | null |
import sqlite3
db_file = 'data/ATLDataBase.db'
#db_file = 'ATL&PHL_Data_9_17_18_to_9_22_18/PHLDataBase.db'
conn = sqlite3.connect(db_file)
c = conn.cursor()
c.close()
| 17
| 59
| 0.758824
|
42a45b4c1beb53a4a6ebb96bcdf2bead5d09f5e2
| 413
|
py
|
Python
|
numba/tests/builtins/test_builtin_int.py
|
glycerine/numba
|
4cb9e4f0b3cabd9e6a144fa4f3f7e5d6bee45635
|
[
"BSD-2-Clause"
] | 1
|
2019-04-17T10:03:53.000Z
|
2019-04-17T10:03:53.000Z
|
numba/tests/builtins/test_builtin_int.py
|
glycerine/numba
|
4cb9e4f0b3cabd9e6a144fa4f3f7e5d6bee45635
|
[
"BSD-2-Clause"
] | null | null | null |
numba/tests/builtins/test_builtin_int.py
|
glycerine/numba
|
4cb9e4f0b3cabd9e6a144fa4f3f7e5d6bee45635
|
[
"BSD-2-Clause"
] | null | null | null |
"""
>>> int() == 0
True
>>> convert_int(2.5) == 2
True
>>> convert_to_int('FF', 16) == 255
True
"""
import sys
from numba import *
@autojit(backend='ast')
def empty_int():
x = float()
return x
@autojit(backend='ast')
def convert_int(x):
return int(x)
@autojit(backend='ast')
def convert_to_int(s, base):
return int(s, base)
if __name__ == '__main__':
import doctest
doctest.testmod()
| 14.241379
| 35
| 0.622276
|
b78ab0b133acea51380a04e9ed295fc4edf0fdc0
| 3,107
|
py
|
Python
|
ucsmsdk/mometa/equipment/EquipmentPsuFsmTask.py
|
thinkitdata/ucsmsdk
|
da6599e1dbc1207a30eabe548a7e5791af5f476b
|
[
"Apache-2.0"
] | null | null | null |
ucsmsdk/mometa/equipment/EquipmentPsuFsmTask.py
|
thinkitdata/ucsmsdk
|
da6599e1dbc1207a30eabe548a7e5791af5f476b
|
[
"Apache-2.0"
] | null | null | null |
ucsmsdk/mometa/equipment/EquipmentPsuFsmTask.py
|
thinkitdata/ucsmsdk
|
da6599e1dbc1207a30eabe548a7e5791af5f476b
|
[
"Apache-2.0"
] | null | null | null |
"""This module contains the general information for EquipmentPsuFsmTask ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class EquipmentPsuFsmTaskConsts:
COMPLETION_CANCELLED = "cancelled"
COMPLETION_COMPLETED = "completed"
COMPLETION_PROCESSING = "processing"
COMPLETION_SCHEDULED = "scheduled"
ITEM_UPDATE_PSU = "UpdatePSU"
ITEM_NOP = "nop"
class EquipmentPsuFsmTask(ManagedObject):
"""This is EquipmentPsuFsmTask class."""
consts = EquipmentPsuFsmTaskConsts()
naming_props = set([u'item'])
mo_meta = MoMeta("EquipmentPsuFsmTask", "equipmentPsuFsmTask", "task-[item]", VersionMeta.Version302c, "OutputOnly", 0xf, [], [""], [u'equipmentPsu'], [], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version302c, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"completion": MoPropertyMeta("completion", "completion", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, None, ["cancelled", "completed", "processing", "scheduled"], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"flags": MoPropertyMeta("flags", "flags", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""(defaultValue){0,1}""", [], []),
"item": MoPropertyMeta("item", "item", "string", VersionMeta.Version302c, MoPropertyMeta.NAMING, None, None, None, None, ["UpdatePSU", "nop"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"seq_id": MoPropertyMeta("seq_id", "seqId", "uint", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version302c, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"completion": "completion",
"dn": "dn",
"flags": "flags",
"item": "item",
"rn": "rn",
"sacl": "sacl",
"seqId": "seq_id",
"status": "status",
}
def __init__(self, parent_mo_or_dn, item, **kwargs):
self._dirty_mask = 0
self.item = item
self.child_action = None
self.completion = None
self.flags = None
self.sacl = None
self.seq_id = None
self.status = None
ManagedObject.__init__(self, "EquipmentPsuFsmTask", parent_mo_or_dn, **kwargs)
| 51.783333
| 249
| 0.650467
|
363c892f811052bf7c191734764d53c8ca22f178
| 6,942
|
py
|
Python
|
tests/local/warehouse/metrics/test_missing_and_invalid_customizations.py
|
b1nslashsh/soda-sql
|
927d19561d6315b6cbd84ada87d4953ac2e2717d
|
[
"Apache-2.0"
] | null | null | null |
tests/local/warehouse/metrics/test_missing_and_invalid_customizations.py
|
b1nslashsh/soda-sql
|
927d19561d6315b6cbd84ada87d4953ac2e2717d
|
[
"Apache-2.0"
] | 1
|
2021-02-23T20:47:40.000Z
|
2021-03-06T09:03:48.000Z
|
tests/local/warehouse/metrics/test_missing_and_invalid_customizations.py
|
b1nslashsh/soda-sql
|
927d19561d6315b6cbd84ada87d4953ac2e2717d
|
[
"Apache-2.0"
] | 1
|
2021-02-23T20:41:24.000Z
|
2021-02-23T20:41:24.000Z
|
# Copyright 2020 Soda
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sodasql.scan.metric import Metric
from sodasql.scan.scan_yml_parser import KEY_COLUMNS, KEY_METRICS, COLUMN_KEY_VALID_FORMAT, KEY_METRIC_GROUPS
from tests.common.sql_test_case import SqlTestCase
class TestMissingAndInvalidCustomizations(SqlTestCase):
def test_scan_customized_missing_values(self):
self.sql_recreate_table(
[f"name {self.dialect.data_type_varchar_255}"],
["('one')",
"('')",
"(' ')",
"('no value')",
"(null)"])
scan_result = self.scan({
KEY_COLUMNS: {
'name': {
KEY_METRIC_GROUPS: [
Metric.METRIC_GROUP_MISSING
],
'missing_values': [
'no value'
]
}
}
})
self.assertEqual(scan_result.get(Metric.MISSING_COUNT, 'name'), 2)
self.assertEqual(scan_result.get(Metric.MISSING_PERCENTAGE, 'name'), 40.0)
self.assertEqual(scan_result.get(Metric.VALUES_COUNT, 'name'), 3)
self.assertEqual(scan_result.get(Metric.VALUES_PERCENTAGE, 'name'), 60.0)
def test_scan_customized_missing_format_empty(self):
self.sql_recreate_table(
[f"name {self.dialect.data_type_varchar_255}"],
["('one')",
"('two')",
"('three')",
"('four')",
"('')",
"(' ')",
"(' ')",
"(null)",
"(null)",
"(null)"])
scan_result = self.scan({
KEY_COLUMNS: {
'name': {
KEY_METRIC_GROUPS: [
Metric.METRIC_GROUP_MISSING
],
'missing_format': 'empty'
}
}
})
self.assertEqual(scan_result.get(Metric.MISSING_COUNT, 'name'), 4)
self.assertEqual(scan_result.get(Metric.MISSING_PERCENTAGE, 'name'), 40.0)
self.assertEqual(scan_result.get(Metric.VALUES_COUNT, 'name'), 6)
self.assertEqual(scan_result.get(Metric.VALUES_PERCENTAGE, 'name'), 60.0)
def test_scan_customized_missing_format_whitespace(self):
self.sql_recreate_table(
[f"name {self.dialect.data_type_varchar_255}"],
["('one')",
"('two')",
"('three')",
"('four')",
"('')",
"(' ')",
"(' ')",
"(null)",
"(null)",
"(null)"])
scan_result = self.scan({
KEY_COLUMNS: {
'name': {
KEY_METRIC_GROUPS: [
Metric.METRIC_GROUP_MISSING
],
'missing_format': 'whitespace'
}
}
})
self.assertEqual(scan_result.get(Metric.MISSING_COUNT, 'name'), 6)
self.assertEqual(scan_result.get(Metric.MISSING_PERCENTAGE, 'name'), 60.0)
self.assertEqual(scan_result.get(Metric.VALUES_COUNT, 'name'), 4)
self.assertEqual(scan_result.get(Metric.VALUES_PERCENTAGE, 'name'), 40.0)
def test_scan_missing_customized_and_validity(self):
self.sql_recreate_table(
[f"name {self.dialect.data_type_varchar_255}"],
["('one')",
"('')",
"(' ')",
"('no value')",
"(null)"])
scan_result = self.scan({
KEY_COLUMNS: {
'name': {
KEY_METRICS: [
Metric.INVALID_COUNT
],
'missing_values': [
'no value'
],
'valid_regex': 'one'
}
}
})
self.assertEqual(scan_result.get(Metric.INVALID_COUNT, 'name'), 2)
self.assertEqual(scan_result.get(Metric.VALID_COUNT, 'name'), 1)
self.assertEqual(scan_result.get(Metric.MISSING_COUNT, 'name'), 2)
def test_scan_valid_regex(self):
self.sql_recreate_table(
[f"name {self.dialect.data_type_varchar_255}"],
["('one')",
"('')",
"(' ')",
"('no value')",
"(null)"])
scan_result = self.scan({
KEY_COLUMNS: {
'name': {
KEY_METRIC_GROUPS: [
Metric.METRIC_GROUP_VALIDITY
],
'valid_regex': 'one'
}
}
})
self.assertEqual(scan_result.get(Metric.MISSING_COUNT, 'name'), 1)
self.assertEqual(scan_result.get(Metric.MISSING_PERCENTAGE, 'name'), 20.0)
self.assertEqual(scan_result.get(Metric.VALUES_COUNT, 'name'), 4)
self.assertEqual(scan_result.get(Metric.VALUES_PERCENTAGE, 'name'), 80)
self.assertEqual(scan_result.get(Metric.INVALID_COUNT, 'name'), 3)
self.assertEqual(scan_result.get(Metric.INVALID_PERCENTAGE, 'name'), 60.0)
self.assertEqual(scan_result.get(Metric.VALID_COUNT, 'name'), 1)
self.assertEqual(scan_result.get(Metric.VALID_PERCENTAGE, 'name'), 20.0)
def test_scan_valid_format(self):
self.sql_recreate_table(
[f"col {self.dialect.data_type_varchar_255}"],
["('1')",
"('2')",
"('3')",
"('4')",
"('4')",
"('4')",
"('xxx') ",
"('yyy') ",
"(null)",
"('10')"])
scan_result = self.scan({
KEY_COLUMNS: {
'col': {
KEY_METRIC_GROUPS: [
Metric.METRIC_GROUP_VALIDITY
],
COLUMN_KEY_VALID_FORMAT: 'number_whole'
}
}
})
self.assertEqual(scan_result.get(Metric.MISSING_COUNT, 'col'), 1)
self.assertEqual(scan_result.get(Metric.MISSING_PERCENTAGE, 'col'), 10)
self.assertEqual(scan_result.get(Metric.VALUES_COUNT, 'col'), 9)
self.assertEqual(scan_result.get(Metric.VALUES_PERCENTAGE, 'col'), 90)
self.assertEqual(scan_result.get(Metric.INVALID_COUNT, 'col'), 2)
self.assertEqual(scan_result.get(Metric.INVALID_PERCENTAGE, 'col'), 20.0)
self.assertEqual(scan_result.get(Metric.VALID_COUNT, 'col'), 7)
self.assertEqual(scan_result.get(Metric.VALID_PERCENTAGE, 'col'), 70.0)
| 35.968912
| 109
| 0.530539
|
f4f7eeab1ae46c26e791d1a6b640fa43bde23cda
| 4,414
|
py
|
Python
|
sdk/python/pulumi_aws/get_ami_ids.py
|
johnktims/pulumi-aws
|
c838bc79043f5376c66fc66275a1e012edd3ab7d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/get_ami_ids.py
|
johnktims/pulumi-aws
|
c838bc79043f5376c66fc66275a1e012edd3ab7d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/get_ami_ids.py
|
johnktims/pulumi-aws
|
c838bc79043f5376c66fc66275a1e012edd3ab7d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from . import utilities, tables
class GetAmiIdsResult:
"""
A collection of values returned by getAmiIds.
"""
def __init__(__self__, executable_users=None, filters=None, id=None, ids=None, name_regex=None, owners=None, sort_ascending=None):
if executable_users and not isinstance(executable_users, list):
raise TypeError("Expected argument 'executable_users' to be a list")
__self__.executable_users = executable_users
if filters and not isinstance(filters, list):
raise TypeError("Expected argument 'filters' to be a list")
__self__.filters = filters
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
if ids and not isinstance(ids, list):
raise TypeError("Expected argument 'ids' to be a list")
__self__.ids = ids
if name_regex and not isinstance(name_regex, str):
raise TypeError("Expected argument 'name_regex' to be a str")
__self__.name_regex = name_regex
if owners and not isinstance(owners, list):
raise TypeError("Expected argument 'owners' to be a list")
__self__.owners = owners
if sort_ascending and not isinstance(sort_ascending, bool):
raise TypeError("Expected argument 'sort_ascending' to be a bool")
__self__.sort_ascending = sort_ascending
class AwaitableGetAmiIdsResult(GetAmiIdsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAmiIdsResult(
executable_users=self.executable_users,
filters=self.filters,
id=self.id,
ids=self.ids,
name_regex=self.name_regex,
owners=self.owners,
sort_ascending=self.sort_ascending)
def get_ami_ids(executable_users=None,filters=None,name_regex=None,owners=None,sort_ascending=None,opts=None):
"""
Use this data source to get a list of AMI IDs matching the specified criteria.
:param list executable_users: Limit search to users with *explicit* launch
permission on the image. Valid items are the numeric account ID or `self`.
:param list filters: One or more name/value pairs to filter off of. There
are several valid keys, for a full reference, check out
[describe-images in the AWS CLI reference][1].
:param str name_regex: A regex string to apply to the AMI list returned
by AWS. This allows more advanced filtering not supported from the AWS API.
This filtering is done locally on what AWS returns, and could have a performance
impact if the result is large. It is recommended to combine this with other
options to narrow down the list AWS returns.
:param list owners: List of AMI owners to limit search. At least 1 value must be specified. Valid values: an AWS account ID, `self` (the current account), or an AWS owner alias (e.g. `amazon`, `aws-marketplace`, `microsoft`).
:param bool sort_ascending: Used to sort AMIs by creation time.
The **filters** object supports the following:
* `name` (`str`)
* `values` (`list`)
"""
__args__ = dict()
__args__['executableUsers'] = executable_users
__args__['filters'] = filters
__args__['nameRegex'] = name_regex
__args__['owners'] = owners
__args__['sortAscending'] = sort_ascending
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:index/getAmiIds:getAmiIds', __args__, opts=opts).value
return AwaitableGetAmiIdsResult(
executable_users=__ret__.get('executableUsers'),
filters=__ret__.get('filters'),
id=__ret__.get('id'),
ids=__ret__.get('ids'),
name_regex=__ret__.get('nameRegex'),
owners=__ret__.get('owners'),
sort_ascending=__ret__.get('sortAscending'))
| 43.27451
| 229
| 0.673539
|
dc07fbc1d21cb1b3e30baeb8db6441de044ced39
| 3,387
|
py
|
Python
|
python/paddle/distributed/launch/utils/nvsmi.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | 11
|
2016-08-29T07:43:26.000Z
|
2016-08-29T07:51:24.000Z
|
python/paddle/distributed/launch/utils/nvsmi.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/distributed/launch/utils/nvsmi.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | 1
|
2021-09-24T11:23:36.000Z
|
2021-09-24T11:23:36.000Z
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import shlex
import os
import json
import shutil
class Info(object):
def __repr__(self):
return str(self.__dict__)
def json(self):
return json.dumps(self.__dict__)
def dict(self):
return self.__dict__
def str(self, keys=None):
if keys is None:
keys = self.__dict__.keys()
if isinstance(keys, str):
keys = keys.split(',')
values = [str(self.__dict__.get(k, '')) for k in keys]
return ",".join(values)
def query_smi(query=None, query_type="gpu", index=None, dtype=None):
"""
query_type: gpu/compute
"""
if not has_nvidia_smi():
return []
cmd = ["nvidia-smi", "--format=csv,noheader,nounits"]
if isinstance(query, list) and query_type == "gpu":
cmd.extend(["--query-gpu={}".format(",".join(query))])
elif isinstance(query, list) and query_type.startswith("compute"):
cmd.extend(["--query-compute-apps={}".format(",".join(query))])
else:
return
if isinstance(index, list) and len(index) > 0:
cmd.extend(["--id={}".format(",".join(index))])
if not isinstance(dtype, list) or len(dtype) != len(query):
dtype = [str] * len(query)
output = subprocess.check_output(cmd, timeout=3)
lines = output.decode("utf-8").split(os.linesep)
ret = []
for line in lines:
if not line:
continue
info = Info()
for k, v, d in zip(query, line.split(", "), dtype):
setattr(info, k.replace(".", "_"), d(v))
ret.append(info)
return ret
def get_gpu_info(index=None):
q = "index,uuid,driver_version,name,gpu_serial,display_active,display_mode".split(
",")
d = [int, str, str, str, str, str, str]
index = index if index is None or isinstance(
index, list) else str(index).split(",")
return query_smi(q, index=index, dtype=d)
def get_gpu_util(index=None):
q = "index,utilization.gpu,memory.total,memory.used,memory.free,timestamp".split(
",")
d = [int, int, int, int, int, str]
index = index if index is None or isinstance(
index, list) else str(index).split(",")
return query_smi(q, index=index, dtype=d)
def get_gpu_process(index=None):
q = "pid,process_name,gpu_uuid,gpu_name,used_memory".split(",")
d = [int, str, str, str, int]
index = index if index is None or isinstance(
index, list) else str(index).split(",")
return query_smi(q, index=index, query_type="compute", dtype=d)
def has_nvidia_smi():
return shutil.which("nvidia-smi")
if __name__ == '__main__':
print(get_gpu_info(0))
print(get_gpu_util(0))
print(get_gpu_process(0))
u = get_gpu_util()
for i in u:
print(i.str())
| 28.462185
| 86
| 0.632713
|
4a36d3f26f3bd4d60cdfb02db4f31f9ee87695a4
| 62,377
|
py
|
Python
|
lib/galaxy/visualization/data_providers/genome.py
|
vimalkumarvelayudhan/galaxy
|
ea89dd8f149778b6c2f0f3f4a34c8b21f7033af7
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/visualization/data_providers/genome.py
|
vimalkumarvelayudhan/galaxy
|
ea89dd8f149778b6c2f0f3f4a34c8b21f7033af7
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/visualization/data_providers/genome.py
|
vimalkumarvelayudhan/galaxy
|
ea89dd8f149778b6c2f0f3f4a34c8b21f7033af7
|
[
"CC-BY-3.0"
] | null | null | null |
"""
Data providers for genome visualizations.
"""
import itertools
import math
import os
import random
import re
import sys
from galaxy import eggs
eggs.require('numpy') # noqa
eggs.require('bx-python') # noqa
from bx.interval_index_file import Indexes
from bx.bbi.bigbed_file import BigBedFile
from bx.bbi.bigwig_file import BigWigFile
eggs.require('pysam') # noqa
from pysam import csamtools, ctabix
from galaxy.datatypes.interval import Bed, Gff, Gtf
from galaxy.datatypes.util.gff_util import convert_gff_coords_to_bed, GFFFeature, GFFInterval, GFFReaderWrapper, parse_gff_attributes
from galaxy.util.json import loads
from galaxy.visualization.data_providers.basic import BaseDataProvider
from galaxy.visualization.data_providers.cigar import get_ref_based_read_seq_and_cigar
#
# Utility functions.
#
def float_nan(n):
'''
Return None instead of NaN to pass jQuery 1.4's strict JSON
'''
if n != n: # NaN != NaN
return None
else:
return float(n)
def get_bounds( reads, start_pos_index, end_pos_index ):
'''
Returns the minimum and maximum position for a set of reads.
'''
max_low = sys.maxint
max_high = -sys.maxint
for read in reads:
if read[ start_pos_index ] < max_low:
max_low = read[ start_pos_index ]
if read[ end_pos_index ] > max_high:
max_high = read[ end_pos_index ]
return max_low, max_high
def _convert_between_ucsc_and_ensemble_naming( chrom ):
'''
Convert between UCSC chromosome ('chr1') naming conventions and Ensembl
naming conventions ('1')
'''
if chrom.startswith( 'chr' ):
# Convert from UCSC to Ensembl
return chrom[ 3: ]
else:
# Convert from Ensembl to UCSC
return 'chr' + chrom
def _chrom_naming_matches( chrom1, chrom2 ):
return ( chrom1.startswith( 'chr' ) and chrom2.startswith( 'chr' ) ) or ( not chrom1.startswith( 'chr' ) and not chrom2.startswith( 'chr' ) )
class FeatureLocationIndexDataProvider( BaseDataProvider ):
"""
Reads/writes/queries feature location index (FLI) datasets.
"""
def __init__( self, converted_dataset ):
self.converted_dataset = converted_dataset
def get_data( self, query ):
# Init.
textloc_file = open( self.converted_dataset.file_name, 'r' )
line_len = int( textloc_file.readline() )
file_len = os.path.getsize( self.converted_dataset.file_name )
query = query.lower()
# Find query in file using binary search.
low = 0
high = file_len / line_len
while low < high:
mid = ( low + high ) // 2
position = mid * line_len
textloc_file.seek( position )
# Compare line with query and update low, high.
line = textloc_file.readline()
if line < query:
low = mid + 1
else:
high = mid
# Need to move back one line because last line read may be included in
# results.
position = low * line_len
textloc_file.seek( position )
# At right point in file, generate hits.
result = []
while True:
line = textloc_file.readline()
if not line.startswith( query ):
break
if line[ -1: ] == '\n':
line = line[ :-1 ]
result.append( line.split()[1:] )
textloc_file.close()
return result
class GenomeDataProvider( BaseDataProvider ):
"""
Base class for genome data providers. All genome providers use BED coordinate
format (0-based, half-open coordinates) for both queries and returned data.
"""
dataset_type = None
"""
Mapping from column name to payload data; this mapping is used to create
filters. Key is column name, value is a dict with mandatory key 'index' and
optional key 'name'. E.g. this defines column 4
col_name_data_attr_mapping = {4 : { index: 5, name: 'Score' } }
"""
col_name_data_attr_mapping = {}
def __init__( self, converted_dataset=None, original_dataset=None, dependencies=None,
error_max_vals="Only the first %i %s in this region are displayed." ):
super( GenomeDataProvider, self ).__init__( converted_dataset=converted_dataset,
original_dataset=original_dataset,
dependencies=dependencies,
error_max_vals=error_max_vals )
# File/pointer where data is obtained from. It is useful to set this for repeated
# queries, such as is necessary for genome-wide data.
# TODO: add functions to (a) create data_file and (b) clean up data_file.
self.data_file = None
def write_data_to_file( self, regions, filename ):
"""
Write data in region defined by chrom, start, and end to a file.
"""
raise Exception( "Unimplemented Function" )
def valid_chroms( self ):
"""
Returns chroms/contigs that the dataset contains
"""
return None # by default
def has_data( self, chrom, start, end, **kwargs ):
"""
Returns true if dataset has data in the specified genome window, false
otherwise.
"""
raise Exception( "Unimplemented Function" )
def get_iterator( self, chrom, start, end, **kwargs ):
"""
Returns an iterator that provides data in the region chrom:start-end
"""
raise Exception( "Unimplemented Function" )
def process_data( self, iterator, start_val=0, max_vals=None, **kwargs ):
"""
Process data from an iterator to a format that can be provided to client.
"""
raise Exception( "Unimplemented Function" )
def get_data( self, chrom=None, low=None, high=None, start_val=0, max_vals=sys.maxint, **kwargs ):
"""
Returns data in region defined by chrom, start, and end. start_val and
max_vals are used to denote the data to return: start_val is the first element to
return and max_vals indicates the number of values to return.
Return value must be a dictionary with the following attributes:
dataset_type, data
"""
start, end = int( low ), int( high )
iterator = self.get_iterator( chrom, start, end, **kwargs )
return self.process_data( iterator, start_val, max_vals, start=start, end=end, **kwargs )
def get_genome_data( self, chroms_info, **kwargs ):
"""
Returns data for complete genome.
"""
genome_data = []
for chrom_info in chroms_info[ 'chrom_info' ]:
chrom = chrom_info[ 'chrom' ]
chrom_len = chrom_info[ 'len' ]
chrom_data = self.get_data( chrom, 0, chrom_len, **kwargs )
# FIXME: data providers probably should never return None.
# Some data providers return None when there's no data, so
# create a dummy dict if necessary.
if not chrom_data:
chrom_data = {
'data': None
}
chrom_data[ 'region' ] = "%s:%i-%i" % ( chrom, 0, chrom_len )
genome_data.append( chrom_data )
return {
'data': genome_data,
'dataset_type': self.dataset_type
}
def get_filters( self ):
"""
Returns filters for provider's data. Return value is a list of
filters; each filter is a dictionary with the keys 'name', 'index', 'type'.
NOTE: This method uses the original dataset's datatype and metadata to
create the filters.
"""
# Get column names.
try:
column_names = self.original_dataset.datatype.column_names
except AttributeError:
try:
column_names = range( self.original_dataset.metadata.columns )
except: # Give up
return []
# Dataset must have column types; if not, cannot create filters.
try:
column_types = self.original_dataset.metadata.column_types
except AttributeError:
return []
# Create and return filters.
filters = []
if self.original_dataset.metadata.viz_filter_cols:
for viz_col_index in self.original_dataset.metadata.viz_filter_cols:
# Some columns are optional, so can't assume that a filter
# column is in dataset.
if viz_col_index >= len( column_names ):
continue
col_name = column_names[ viz_col_index ]
# Make sure that column has a mapped index. If not, do not add filter.
try:
attrs = self.col_name_data_attr_mapping[ col_name ]
except KeyError:
continue
filters.append(
{ 'name' : attrs[ 'name' ], 'type' : column_types[viz_col_index],
'index' : attrs[ 'index' ] } )
return filters
def get_default_max_vals( self ):
return 5000
#
# -- Base mixins and providers --
#
class FilterableMixin:
def get_filters( self ):
""" Returns a dataset's filters. """
# is_ functions taken from Tabular.set_meta
def is_int( column_text ):
try:
int( column_text )
return True
except:
return False
def is_float( column_text ):
try:
float( column_text )
return True
except:
if column_text.strip().lower() == 'na':
return True # na is special cased to be a float
return False
#
# Get filters.
# TODOs:
# (a) might be useful to move this into each datatype's set_meta method;
# (b) could look at first N lines to ensure GTF attribute types are consistent.
#
filters = []
# HACK: first 8 fields are for drawing, so start filter column index at 9.
filter_col = 8
if isinstance( self.original_dataset.datatype, Gff ):
# Can filter by score and GTF attributes.
filters = [ { 'name': 'Score',
'type': 'number',
'index': filter_col,
'tool_id': 'Filter1',
'tool_exp_name': 'c6' } ]
filter_col += 1
if isinstance( self.original_dataset.datatype, Gtf ):
# Create filters based on dataset metadata.
for name, a_type in self.original_dataset.metadata.attribute_types.items():
if a_type in [ 'int', 'float' ]:
filters.append(
{ 'name': name,
'type': 'number',
'index': filter_col,
'tool_id': 'gff_filter_by_attribute',
'tool_exp_name': name } )
filter_col += 1
elif isinstance( self.original_dataset.datatype, Bed ):
# Can filter by score column only.
filters = [ { 'name': 'Score',
'type': 'number',
'index': filter_col,
'tool_id': 'Filter1',
'tool_exp_name': 'c5'
} ]
return filters
class TabixDataProvider( FilterableMixin, GenomeDataProvider ):
dataset_type = 'tabix'
"""
Tabix index data provider for the Galaxy track browser.
"""
col_name_data_attr_mapping = { 4 : { 'index': 4 , 'name' : 'Score' } }
def get_iterator( self, chrom, start, end, **kwargs ):
start, end = int(start), int(end)
if end >= (2 << 29):
end = (2 << 29 - 1) # Tabix-enforced maximum
bgzip_fname = self.dependencies['bgzip'].file_name
if not self.data_file:
self.data_file = ctabix.Tabixfile(bgzip_fname, index_filename=self.converted_dataset.file_name)
# Get iterator using either naming scheme.
iterator = iter( [] )
if chrom in self.data_file.contigs:
iterator = self.data_file.fetch(reference=chrom, start=start, end=end)
else:
# Try alternative naming scheme.
chrom = _convert_between_ucsc_and_ensemble_naming( chrom )
if chrom in self.data_file.contigs:
iterator = self.data_file.fetch(reference=chrom, start=start, end=end)
return iterator
def write_data_to_file( self, regions, filename ):
out = open( filename, "w" )
for region in regions:
# Write data in region.
chrom = region.chrom
start = region.start
end = region.end
iterator = self.get_iterator( chrom, start, end )
for line in iterator:
out.write( "%s\n" % line )
out.close()
#
# -- Interval data providers --
#
class IntervalDataProvider( GenomeDataProvider ):
dataset_type = 'interval_index'
"""
Processes interval data from native format to payload format.
Payload format: [ uid (offset), start, end, name, strand, thick_start, thick_end, blocks ]
"""
def get_iterator( self, chrom, start, end, **kwargs ):
raise Exception( "Unimplemented Function" )
def process_data( self, iterator, start_val=0, max_vals=None, **kwargs ):
"""
Provides
"""
# Build data to return. Payload format is:
# [ <guid/offset>, <start>, <end>, <name>, <strand> ]
#
# First three entries are mandatory, others are optional.
#
filter_cols = loads( kwargs.get( "filter_cols", "[]" ) )
no_detail = ( "no_detail" in kwargs )
rval = []
message = None
# Subtract one b/c columns are 1-based but indices are 0-based.
def col_fn(col):
return None if col is None else col - 1
start_col = self.original_dataset.metadata.startCol - 1
end_col = self.original_dataset.metadata.endCol - 1
strand_col = col_fn( self.original_dataset.metadata.strandCol )
name_col = col_fn( self.original_dataset.metadata.nameCol )
for count, line in enumerate( iterator ):
if count < start_val:
continue
if max_vals and count - start_val >= max_vals:
message = self.error_max_vals % ( max_vals, "features" )
break
feature = line.split()
length = len(feature)
# Unique id is just a hash of the line
payload = [ hash(line), int( feature[start_col] ), int( feature[end_col] ) ]
if no_detail:
rval.append( payload )
continue
# Name, strand.
if name_col:
payload.append( feature[name_col] )
if strand_col:
# Put empty name as placeholder.
if not name_col:
payload.append( "" )
payload.append( feature[strand_col] )
# Score (filter data)
if length >= 5 and filter_cols and filter_cols[0] == "Score":
try:
payload.append( float( feature[4] ) )
except:
payload.append( feature[4] )
rval.append( payload )
return { 'data': rval, 'message': message }
def write_data_to_file( self, regions, filename ):
raise Exception( "Unimplemented Function" )
class IntervalTabixDataProvider( TabixDataProvider, IntervalDataProvider ):
"""
Provides data from a BED file indexed via tabix.
"""
pass
#
# -- BED data providers --
#
class BedDataProvider( GenomeDataProvider ):
"""
Processes BED data from native format to payload format.
Payload format: [ uid (offset), start, end, name, strand, thick_start, thick_end, blocks ]
"""
dataset_type = 'interval_index'
def get_iterator( self, chrom, start, end, **kwargs ):
raise Exception( "Unimplemented Method" )
def process_data( self, iterator, start_val=0, max_vals=None, **kwargs ):
"""
Provides
"""
# Build data to return. Payload format is:
# [ <guid/offset>, <start>, <end>, <name>, <strand>, <thick_start>,
# <thick_end>, <blocks> ]
#
# First three entries are mandatory, others are optional.
#
filter_cols = loads( kwargs.get( "filter_cols", "[]" ) )
no_detail = ( "no_detail" in kwargs )
rval = []
message = None
for count, line in enumerate( iterator ):
if count < start_val:
continue
if max_vals and count - start_val >= max_vals:
message = self.error_max_vals % ( max_vals, "features" )
break
# TODO: can we use column metadata to fill out payload?
# TODO: use function to set payload data
feature = line.split()
length = len(feature)
# Unique id is just a hash of the line
payload = [ hash(line), int(feature[1]), int(feature[2]) ]
if no_detail:
rval.append( payload )
continue
# Name, strand, thick start, thick end.
if length >= 4:
payload.append(feature[3])
if length >= 6:
payload.append(feature[5])
if length >= 8:
payload.append(int(feature[6]))
payload.append(int(feature[7]))
# Blocks.
if length >= 12:
block_sizes = [ int(n) for n in feature[10].split(',') if n != '']
block_starts = [ int(n) for n in feature[11].split(',') if n != '' ]
blocks = zip( block_sizes, block_starts )
payload.append( [ ( int(feature[1]) + block[1], int(feature[1]) + block[1] + block[0] ) for block in blocks ] )
# Score (filter data)
if length >= 5 and filter_cols and filter_cols[0] == "Score":
# If dataset doesn't have name/strand/thick start/thick end/blocks,
# add placeholders. There should be 8 entries if all attributes
# are present.
payload.extend( [ None for i in range( 8 - len( payload ) ) ] )
try:
payload.append( float( feature[4] ) )
except:
payload.append( feature[4] )
rval.append( payload )
return { 'data': rval, 'dataset_type': self.dataset_type, 'message': message }
def write_data_to_file( self, regions, filename ):
out = open( filename, "w" )
for region in regions:
# Write data in region.
chrom = region.chrom
start = region.start
end = region.end
iterator = self.get_iterator( chrom, start, end )
for line in iterator:
out.write( "%s\n" % line )
out.close()
class BedTabixDataProvider( TabixDataProvider, BedDataProvider ):
"""
Provides data from a BED file indexed via tabix.
"""
pass
class RawBedDataProvider( BedDataProvider ):
"""
Provide data from BED file.
NOTE: this data provider does not use indices, and hence will be very slow
for large datasets.
"""
def get_iterator( self, source, chrom=None, start=None, end=None, **kwargs ):
# Read first line in order to match chrom naming format.
line = source.readline()
dataset_chrom = line.split()[0]
if not _chrom_naming_matches( chrom, dataset_chrom ):
chrom = _convert_between_ucsc_and_ensemble_naming( chrom )
# Undo read.
source.seek( 0 )
def line_filter_iter():
for line in open( self.original_dataset.file_name ):
if line.startswith( "track" ) or line.startswith( "browser" ):
continue
feature = line.split()
feature_chrom = feature[0]
feature_start = int( feature[1] )
feature_end = int( feature[2] )
if ( chrom is not None and feature_chrom != chrom ) \
or ( start is not None and feature_start > end ) \
or ( end is not None and feature_end < start ):
continue
yield line
return line_filter_iter()
#
# -- VCF data providers --
#
class VcfDataProvider( GenomeDataProvider ):
"""
Abstract class that processes VCF data from native format to payload format.
Payload format: An array of entries for each locus in the file. Each array
has the following entries:
1. GUID (unused)
2. location (0-based)
3. reference base(s)
4. alternative base(s)
5. quality score
6. whether variant passed filter
7. sample genotypes -- a single string with samples separated by commas; empty string
denotes the reference genotype
8-end: allele counts for each alternative
"""
col_name_data_attr_mapping = { 'Qual' : { 'index': 6 , 'name' : 'Qual' } }
dataset_type = 'variant'
def process_data( self, iterator, start_val=0, max_vals=None, **kwargs ):
"""
Returns a dict with the following attributes::
data - a list of variants with the format
.. raw:: text
[<guid>, <start>, <end>, <name>, cigar, seq]
message - error/informative message
"""
data = []
message = None
def get_mapping( ref, alt ):
"""
Returns ( offset, new_seq, cigar ) tuple that defines mapping of
alt to ref. Cigar format is an array of [ op_index, length ] pairs
where op_index is the 0-based index into the string "MIDNSHP=X"
"""
cig_ops = "MIDNSHP=X"
ref_len = len( ref )
alt_len = len( alt )
# Substitutions?
if ref_len == alt_len:
return 0, alt, [ [ cig_ops.find( "M" ), ref_len ] ]
# Deletions?
alt_in_ref_index = ref.find( alt )
if alt_in_ref_index != -1:
return alt_in_ref_index, ref[ alt_in_ref_index + 1: ], [ [ cig_ops.find( "D" ), ref_len - alt_len ] ]
# Insertions?
ref_in_alt_index = alt.find( ref )
if ref_in_alt_index != -1:
return ref_in_alt_index, alt[ ref_in_alt_index + 1: ], [ [ cig_ops.find( "I" ), alt_len - ref_len ] ]
# Pack data.
genotype_re = re.compile( '/|\|' )
for count, line in enumerate( iterator ):
if count < start_val:
continue
if max_vals and count - start_val >= max_vals:
message = self.error_max_vals % ( max_vals, "features" )
break
# Split line and aggregate data.
feature = line.split()
pos, c_id, ref, alt, qual, c_filter, info = feature[ 1:8 ]
# Format and samples data are optional.
samples_data = []
if len( feature ) > 8:
samples_data = feature[ 9: ]
# VCF is 1-based but provided position is 0-based.
pos = int( pos ) - 1
# FIXME: OK to skip?
if alt == '.':
count -= 1
continue
# Set up array to track allele counts.
allele_counts = [ 0 for i in range( alt.count( ',' ) + 1 ) ]
sample_gts = []
if samples_data:
# Process and pack samples' genotype and count alleles across samples.
alleles_seen = {}
has_alleles = False
for i, sample in enumerate( samples_data ):
# Parse and count alleles.
genotype = sample.split( ':' )[ 0 ]
has_alleles = False
alleles_seen.clear()
for allele in genotype_re.split( genotype ):
try:
# This may throw a ValueError if allele is missing.
allele = int( allele )
# Only count allele if it hasn't been seen yet.
if allele != 0 and allele not in alleles_seen:
allele_counts[ allele - 1 ] += 1
alleles_seen[ allele ] = True
has_alleles = True
except ValueError:
pass
# If no alleles, use empty string as proxy.
if not has_alleles:
genotype = ''
sample_gts.append( genotype )
else:
# No samples, so set allele count and sample genotype manually.
allele_counts = [ 1 ]
sample_gts = [ '1/1' ]
# Add locus data.
locus_data = [
-1,
pos,
c_id,
ref,
alt,
qual,
c_filter,
','.join( sample_gts )
]
locus_data.extend( allele_counts )
data.append( locus_data )
return { 'data': data, 'message': message }
def write_data_to_file( self, regions, filename ):
out = open( filename, "w" )
for region in regions:
# Write data in region.
chrom = region.chrom
start = region.start
end = region.end
iterator = self.get_iterator( chrom, start, end )
for line in iterator:
out.write( "%s\n" % line )
out.close()
class VcfTabixDataProvider( TabixDataProvider, VcfDataProvider ):
"""
Provides data from a VCF file indexed via tabix.
"""
dataset_type = 'variant'
class RawVcfDataProvider( VcfDataProvider ):
"""
Provide data from VCF file.
NOTE: this data provider does not use indices, and hence will be very slow
for large datasets.
"""
def get_iterator( self, chrom, start, end, **kwargs ):
source = open( self.original_dataset.file_name )
# Skip comments.
line = None
for line in source:
if not line.startswith("#"):
break
# If last line is a comment, there are no data lines.
if line.startswith( "#" ):
return []
# Match chrom naming format.
if line:
dataset_chrom = line.split()[0]
if not _chrom_naming_matches( chrom, dataset_chrom ):
chrom = _convert_between_ucsc_and_ensemble_naming( chrom )
def line_in_region( vcf_line, chrom, start, end ):
""" Returns true if line is in region. """
variant_chrom, variant_start = vcf_line.split()[ 0:2 ]
# VCF format is 1-based.
variant_start = int( variant_start ) - 1
return variant_chrom == chrom and variant_start >= start and variant_start <= end
def line_filter_iter():
""" Yields lines in source that are in region chrom:start-end """
# Yield data line read above.
if line_in_region( line, chrom, start, end ):
yield line
# Search for and yield other data lines.
for data_line in source:
if line_in_region( data_line, chrom, start, end ):
yield data_line
return line_filter_iter()
class BamDataProvider( GenomeDataProvider, FilterableMixin ):
"""
Provides access to intervals from a sorted indexed BAM file. Coordinate
data is reported in BED format: 0-based, half-open.
"""
dataset_type = 'bai'
def get_filters( self ):
"""
Returns filters for dataset.
"""
# HACK: first 7 fields are for drawing, so start filter column index at 7.
filter_col = 7
filters = []
filters.append( { 'name': 'Mapping Quality',
'type': 'number',
'index': filter_col }
)
return filters
def write_data_to_file( self, regions, filename ):
"""
Write reads in regions to file.
"""
# Open current BAM file using index.
bamfile = csamtools.Samfile( filename=self.original_dataset.file_name, mode='rb',
index_filename=self.converted_dataset.file_name )
# TODO: write headers as well?
new_bamfile = csamtools.Samfile( template=bamfile, filename=filename, mode='wb' )
for region in regions:
# Write data from region.
chrom = region.chrom
start = region.start
end = region.end
try:
data = bamfile.fetch(start=start, end=end, reference=chrom)
except ValueError:
# Try alternative chrom naming.
chrom = _convert_between_ucsc_and_ensemble_naming( chrom )
try:
data = bamfile.fetch( start=start, end=end, reference=chrom )
except ValueError:
return None
# Write reads in region.
for i, read in enumerate( data ):
new_bamfile.write( read )
# Cleanup.
new_bamfile.close()
bamfile.close()
def get_iterator( self, chrom, start, end, **kwargs ):
"""
Returns an iterator that provides data in the region chrom:start-end
"""
start, end = int( start ), int( end )
orig_data_filename = self.original_dataset.file_name
index_filename = self.converted_dataset.file_name
# Attempt to open the BAM file with index
bamfile = csamtools.Samfile( filename=orig_data_filename, mode='rb', index_filename=index_filename )
try:
data = bamfile.fetch( start=start, end=end, reference=chrom )
except ValueError:
# Try alternative chrom naming.
chrom = _convert_between_ucsc_and_ensemble_naming( chrom )
try:
data = bamfile.fetch( start=start, end=end, reference=chrom )
except ValueError:
return None
return data
def process_data( self, iterator, start_val=0, max_vals=None, ref_seq=None,
iterator_type='nth', mean_depth=None, start=0, end=0, **kwargs ):
"""
Returns a dict with the following attributes::
data - a list of reads with the format
[<guid>, <start>, <end>, <name>, <read_1>, <read_2>, [empty], <mapq_scores>]
where <read_1> has the format
[<start>, <end>, <cigar>, <strand>, <read_seq>]
and <read_2> has the format
[<start>, <end>, <cigar>, <strand>, <read_seq>]
Field 7 is empty so that mapq scores' location matches that in single-end reads.
For single-end reads, read has format:
[<guid>, <start>, <end>, <name>, <cigar>, <strand>, <seq>, <mapq_score>]
NOTE: read end and sequence data are not valid for reads outside of
requested region and should not be used.
max_low - lowest coordinate for the returned reads
max_high - highest coordinate for the returned reads
message - error/informative message
"""
# No iterator indicates no reads.
if iterator is None:
return { 'data': [], 'message': None }
#
# Helper functions.
#
def decode_strand( read_flag, mask ):
""" Decode strand from read flag. """
strand_flag = ( read_flag & mask == 0 )
if strand_flag:
return "+"
else:
return "-"
def _random_read_iterator( read_iterator, threshold ):
"""
An iterator that returns a random stream of reads from the read_iterator
as well as corresponding pairs for returned reads.
threshold is a value in [0,1] that denotes the percentage of reads
to return.
"""
for e in read_iterator:
if e.qname in paired_pending or random.uniform( 0, 1 ) <= threshold:
yield e
def _nth_read_iterator( read_iterator, threshold ):
"""
An iterator that returns every nth read.
"""
# Convert threshold to N for stepping through iterator.
n = int( 1 / threshold )
return itertools.islice( read_iterator, None, None, n )
# -- Choose iterator. --
# Calculate threshold for non-sequential iterators based on mean_depth and read length.
try:
first_read = next( iterator )
except StopIteration:
# no reads.
return { 'data': [], 'message': None, 'max_low': start, 'max_high': start }
read_len = len( first_read.seq )
num_reads = max( ( end - start ) * mean_depth / float( read_len ), 1 )
threshold = float( max_vals ) / num_reads
iterator = itertools.chain( iter( [ first_read ] ), iterator )
# Use specified iterator type, save for when threshold is >= 1.
# A threshold of >= 1 indicates all reads are to be returned, so no
# sampling needed and seqential iterator will be used.
if iterator_type == 'sequential' or threshold >= 1:
read_iterator = iterator
elif iterator_type == 'random':
read_iterator = _random_read_iterator( iterator, threshold )
elif iterator_type == 'nth':
read_iterator = _nth_read_iterator( iterator, threshold )
#
# Encode reads as list of lists.
#
results = []
paired_pending = {}
unmapped = 0
message = None
count = 0
for read in read_iterator:
if count < start_val:
continue
if ( count - start_val - unmapped ) >= max_vals:
message = self.error_max_vals % ( max_vals, "reads" )
break
# If not mapped, skip read.
is_mapped = ( read.flag & 0x0004 == 0 )
if not is_mapped:
unmapped += 1
continue
qname = read.qname
seq = read.seq
strand = decode_strand( read.flag, 0x0010 )
if read.cigar is not None:
read_len = sum( [cig[1] for cig in read.cigar] ) # Use cigar to determine length
else:
read_len = len(seq) # If no cigar, just use sequence length
if read.is_proper_pair:
if qname in paired_pending:
# Found pair.
pair = paired_pending[qname]
results.append( [ hash( "%i_%s" % ( pair['start'], qname ) ),
pair['start'],
read.pos + read_len,
qname,
[ pair['start'], pair['end'], pair['cigar'], pair['strand'], pair['seq'] ],
[ read.pos, read.pos + read_len, read.cigar, strand, seq ],
None, [ pair['mapq'], read.mapq ]
] )
del paired_pending[qname]
else:
# Insert first of pair.
paired_pending[qname] = { 'start': read.pos, 'end': read.pos + read_len, 'seq': seq, 'mate_start': read.mpos,
'rlen': read_len, 'strand': strand, 'cigar': read.cigar, 'mapq': read.mapq }
count += 1
else:
results.append( [ hash( "%i_%s" % ( read.pos, qname ) ),
read.pos, read.pos + read_len, qname,
read.cigar, strand, read.seq, read.mapq ] )
count += 1
# Take care of reads whose mates are out of range.
for qname, read in paired_pending.iteritems():
if read['mate_start'] < read['start']:
# Mate is before read.
read_start = read['mate_start']
read_end = read['end']
# Make read_1 start=end so that length is 0 b/c we don't know
# read length.
r1 = [ read['mate_start'], read['mate_start'] ]
r2 = [ read['start'], read['end'], read['cigar'], read['strand'], read['seq'] ]
else:
# Mate is after read.
read_start = read['start']
# Make read_2 start=end so that length is 0 b/c we don't know
# read length. Hence, end of read is start of read_2.
read_end = read['mate_start']
r1 = [ read['start'], read['end'], read['cigar'], read['strand'], read['seq'] ]
r2 = [ read['mate_start'], read['mate_start'] ]
results.append( [ hash( "%i_%s" % ( read_start, qname ) ), read_start, read_end, qname, r1, r2, [read[ 'mapq' ], 125] ] )
# Clean up. TODO: is this needed? If so, we'll need a cleanup function after processing the data.
# bamfile.close()
def compress_seq_and_cigar( read, start_field, cigar_field, seq_field ):
'''
Use reference-based compression to compress read sequence and cigar.
'''
read_seq, read_cigar = get_ref_based_read_seq_and_cigar( read[ seq_field ].upper(),
read[ start_field ],
ref_seq.sequence,
ref_seq.start,
read[ cigar_field ] )
read[ seq_field ] = read_seq
read[ cigar_field ] = read_cigar
def convert_cigar( read, start_field, cigar_field, seq_field ):
'''
Convert read cigar from pysam format to string format.
'''
cigar_ops = 'MIDNSHP=X'
read_cigar = ''
for op_tuple in read[ cigar_field ]:
read_cigar += '%i%s' % ( op_tuple[1], cigar_ops[ op_tuple[0] ] )
read[ cigar_field ] = read_cigar
# Choose method for processing reads. Use reference-based compression
# if possible. Otherwise, convert cigar.
if ref_seq:
# Uppercase for easy comparison.
ref_seq.sequence = ref_seq.sequence.upper()
process_read = compress_seq_and_cigar
else:
process_read = convert_cigar
# Process reads.
for read in results:
if isinstance( read[ 5 ], list ):
# Paired-end read.
if len( read[4] ) > 2:
process_read( read[4], 0, 2, 4 )
if len( read[5] ) > 2:
process_read( read[5], 0, 2, 4 )
else:
# Single-end read.
process_read( read, 1, 4, 6)
max_low, max_high = get_bounds( results, 1, 2 )
return { 'data': results, 'message': message, 'max_low': max_low, 'max_high': max_high }
class SamDataProvider( BamDataProvider ):
dataset_type = 'bai'
def __init__( self, converted_dataset=None, original_dataset=None, dependencies=None ):
""" Create SamDataProvider. """
super( SamDataProvider, self ).__init__( converted_dataset=converted_dataset,
original_dataset=original_dataset,
dependencies=dependencies )
# To use BamDataProvider, original dataset must be BAM and
# converted dataset must be BAI. Use BAI from BAM metadata.
if converted_dataset:
self.original_dataset = converted_dataset
self.converted_dataset = converted_dataset.metadata.bam_index
class BBIDataProvider( GenomeDataProvider ):
"""
BBI data provider for the Galaxy track browser.
"""
dataset_type = 'bigwig'
def valid_chroms( self ):
# No way to return this info as of now
return None
def has_data( self, chrom ):
f, bbi = self._get_dataset()
all_dat = bbi.query( chrom, 0, 2147483647, 1 ) or \
bbi.query( _convert_between_ucsc_and_ensemble_naming( chrom ), 0, 2147483647, 1 )
f.close()
return all_dat is not None
def get_data( self, chrom, start, end, start_val=0, max_vals=None, num_samples=1000, **kwargs ):
start = int( start )
end = int( end )
# Helper function for getting summary data regardless of chromosome
# naming convention.
def _summarize_bbi( bbi, chrom, start, end, num_points ):
return bbi.summarize( chrom, start, end, num_points ) or \
bbi.summarize( _convert_between_ucsc_and_ensemble_naming( chrom ) , start, end, num_points )
# Bigwig can be a standalone bigwig file, in which case we use
# original_dataset, or coming from wig->bigwig conversion in
# which we use converted_dataset
f, bbi = self._get_dataset()
# If stats requested, compute overall summary data for the range
# start:endbut no reduced data. This is currently used by client
# to determine the default range.
if 'stats' in kwargs:
summary = _summarize_bbi( bbi, chrom, start, end, 1 )
f.close()
min_val = 0
max_val = 0
mean = 0
sd = 0
if summary is not None:
# Does the summary contain any defined values?
valid_count = summary.valid_count[0]
if summary.valid_count > 0:
# Compute $\mu \pm 2\sigma$ to provide an estimate for upper and lower
# bounds that contain ~95% of the data.
mean = summary.sum_data[0] / valid_count
var = summary.sum_squares[0] - mean
if valid_count > 1:
var /= valid_count - 1
sd = math.sqrt( var )
min_val = summary.min_val[0]
max_val = summary.max_val[0]
return dict( data=dict( min=min_val, max=max_val, mean=mean, sd=sd ) )
def summarize_region( bbi, chrom, start, end, num_points ):
'''
Returns results from summarizing a region using num_points.
NOTE: num_points cannot be greater than end - start or BBI
will return None for all positions.
'''
result = []
# Get summary; this samples at intervals of length
# (end - start)/num_points -- i.e. drops any fractional component
# of interval length.
summary = _summarize_bbi( bbi, chrom, start, end, num_points )
if summary:
# mean = summary.sum_data / summary.valid_count
# Standard deviation by bin, not yet used
# var = summary.sum_squares - mean
# var /= minimum( valid_count - 1, 1 )
# sd = sqrt( var )
pos = start
step_size = (end - start) / num_points
for i in range( num_points ):
result.append( (pos, float_nan( summary.sum_data[i] / summary.valid_count[i] ) ) )
pos += step_size
return result
# Approach is different depending on region size.
num_samples = int( num_samples )
if end - start < num_samples:
# Get values for individual bases in region, including start and end.
# To do this, need to increase end to next base and request number of points.
num_points = end - start + 1
end += 1
else:
#
# The goal is to sample the region between start and end uniformly
# using ~N (num_samples) data points. The challenge is that the size of
# sampled intervals rarely is full bases, so sampling using N points
# will leave the end of the region unsampled due to remainders for
# each interval. To recitify this, a new N is calculated based on the
# step size that covers as much of the region as possible.
#
# However, this still leaves some of the region unsampled. This
# could be addressed by repeatedly sampling remainder using a
# smaller and smaller step_size, but that would require iteratively
# going to BBI, which could be time consuming.
#
# Start with N samples.
num_points = num_samples
step_size = ( end - start ) / num_points
# Add additional points to sample in the remainder not covered by
# the initial N samples.
remainder_start = start + step_size * num_points
additional_points = ( end - remainder_start ) / step_size
num_points += additional_points
result = summarize_region( bbi, chrom, start, end, num_points )
# Cleanup and return.
f.close()
return {
'data': result,
'dataset_type': self.dataset_type
}
class BigBedDataProvider( BBIDataProvider ):
def _get_dataset( self ):
# Nothing converts to bigBed so we don't consider converted dataset
f = open( self.original_dataset.file_name )
return f, BigBedFile(file=f)
class BigWigDataProvider ( BBIDataProvider ):
"""
Provides data from BigWig files; position data is reported in 1-based
coordinate system, i.e. wiggle format.
"""
def _get_dataset( self ):
if self.converted_dataset is not None:
f = open( self.converted_dataset.file_name )
else:
f = open( self.original_dataset.file_name )
return f, BigWigFile(file=f)
class IntervalIndexDataProvider( FilterableMixin, GenomeDataProvider ):
"""
Interval index files used for GFF, Pileup files.
"""
col_name_data_attr_mapping = { 4 : { 'index': 4 , 'name' : 'Score' } }
dataset_type = 'interval_index'
def write_data_to_file( self, regions, filename ):
source = open( self.original_dataset.file_name )
index = Indexes( self.converted_dataset.file_name )
out = open( filename, 'w' )
for region in regions:
# Write data from region.
chrom = region.chrom
start = region.start
end = region.end
for start, end, offset in index.find( chrom, start, end ):
source.seek( offset )
# HACK: write differently depending on original dataset format.
if self.original_dataset.ext not in [ 'gff', 'gff3', 'gtf' ]:
line = source.readline()
out.write( line )
else:
reader = GFFReaderWrapper( source, fix_strand=True )
feature = reader.next()
for interval in feature.intervals:
out.write( '\t'.join( interval.fields ) + '\n' )
source.close()
out.close()
def get_iterator( self, chrom, start, end, **kwargs ):
"""
Returns an array with values: (a) source file and (b) an iterator that
provides data in the region chrom:start-end
"""
start, end = int(start), int(end)
index = Indexes( self.converted_dataset.file_name )
if chrom not in index.indexes:
# Try alternative naming.
chrom = _convert_between_ucsc_and_ensemble_naming( chrom )
return index.find(chrom, start, end)
def process_data( self, iterator, start_val=0, max_vals=None, **kwargs ):
results = []
message = None
source = open( self.original_dataset.file_name )
#
# Build data to return. Payload format is:
# [ <guid/offset>, <start>, <end>, <name>, <score>, <strand>, <thick_start>,
# <thick_end>, <blocks> ]
#
# First three entries are mandatory, others are optional.
#
filter_cols = loads( kwargs.get( "filter_cols", "[]" ) )
no_detail = ( "no_detail" in kwargs )
for count, val in enumerate( iterator ):
offset = val[2]
if count < start_val:
continue
if count - start_val >= max_vals:
message = self.error_max_vals % ( max_vals, "features" )
break
source.seek( offset )
# TODO: can we use column metadata to fill out payload?
# GFF dataset.
reader = GFFReaderWrapper( source, fix_strand=True )
feature = reader.next()
payload = package_gff_feature( feature, no_detail, filter_cols )
payload.insert( 0, offset )
results.append( payload )
return { 'data': results, 'message': message }
class RawGFFDataProvider( GenomeDataProvider ):
"""
Provide data from GFF file that has not been indexed.
NOTE: this data provider does not use indices, and hence will be very slow
for large datasets.
"""
dataset_type = 'interval_index'
def get_iterator( self, chrom, start, end, **kwargs ):
"""
Returns an iterator that provides data in the region chrom:start-end as well as
a file offset.
"""
source = open( self.original_dataset.file_name )
# Read first line in order to match chrom naming format.
line = source.readline()
# If line empty, assume file is empty and return empty iterator.
if len( line ) == 0:
return iter([])
# Determine chromosome naming format.
dataset_chrom = line.split()[0]
if not _chrom_naming_matches( chrom, dataset_chrom ):
chrom = _convert_between_ucsc_and_ensemble_naming( chrom )
# Undo read.
source.seek( 0 )
def features_in_region_iter():
offset = 0
for feature in GFFReaderWrapper( source, fix_strand=True ):
# Only provide features that are in region.
feature_start, feature_end = convert_gff_coords_to_bed( [ feature.start, feature.end ] )
if feature.chrom == chrom and feature_end > start and feature_start < end:
yield feature, offset
offset += feature.raw_size
return features_in_region_iter()
def process_data( self, iterator, start_val=0, max_vals=None, **kwargs ):
"""
Process data from an iterator to a format that can be provided to client.
"""
filter_cols = loads( kwargs.get( "filter_cols", "[]" ) )
no_detail = ( "no_detail" in kwargs )
results = []
message = None
for count, ( feature, offset ) in enumerate( iterator ):
if count < start_val:
continue
if count - start_val >= max_vals:
message = self.error_max_vals % ( max_vals, "reads" )
break
payload = package_gff_feature( feature, no_detail=no_detail, filter_cols=filter_cols )
payload.insert( 0, offset )
results.append( payload )
return { 'data': results, 'dataset_type': self.dataset_type, 'message': message }
class GtfTabixDataProvider( TabixDataProvider ):
"""
Returns data from GTF datasets that are indexed via tabix.
"""
def process_data( self, iterator, start_val=0, max_vals=None, **kwargs ):
# Loop through lines and group by transcript_id; each group is a feature.
# TODO: extend this code or use code in gff_util to process GFF/3 as well
# and then create a generic GFFDataProvider that can be used with both
# raw and tabix datasets.
features = {}
for count, line in enumerate( iterator ):
line_attrs = parse_gff_attributes( line.split('\t')[8] )
transcript_id = line_attrs[ 'transcript_id' ]
if transcript_id in features:
feature = features[ transcript_id ]
else:
feature = []
features[ transcript_id ] = feature
feature.append( GFFInterval( None, line.split( '\t') ) )
# Process data.
filter_cols = loads( kwargs.get( "filter_cols", "[]" ) )
no_detail = ( "no_detail" in kwargs )
results = []
message = None
for count, intervals in enumerate( features.values() ):
if count < start_val:
continue
if count - start_val >= max_vals:
message = self.error_max_vals % ( max_vals, "reads" )
break
feature = GFFFeature( None, intervals=intervals )
payload = package_gff_feature( feature, no_detail=no_detail, filter_cols=filter_cols )
payload.insert( 0, feature.intervals[ 0 ].attributes[ 'transcript_id' ] )
results.append( payload )
return { 'data': results, 'message': message }
#
# -- ENCODE Peak data providers.
#
class ENCODEPeakDataProvider( GenomeDataProvider ):
"""
Abstract class that processes ENCODEPeak data from native format to payload format.
Payload format: [ uid (offset), start, end, name, strand, thick_start, thick_end, blocks ]
"""
def get_iterator( self, chrom, start, end, **kwargs ):
raise "Unimplemented Method"
def process_data( self, iterator, start_val=0, max_vals=None, **kwargs ):
"""
Provides
"""
# FIXMEs:
# (1) should be able to unify some of this code with BedDataProvider.process_data
# (2) are optional number of parameters supported?
# Build data to return. Payload format is:
# [ <guid/offset>, <start>, <end>, <name>, <strand>, <thick_start>,
# <thick_end>, <blocks> ]
#
# First three entries are mandatory, others are optional.
#
no_detail = ( "no_detail" in kwargs )
rval = []
message = None
for count, line in enumerate( iterator ):
if count < start_val:
continue
if max_vals and count - start_val >= max_vals:
message = self.error_max_vals % ( max_vals, "features" )
break
feature = line.split()
# Feature initialization.
payload = [
# GUID is just a hash of the line
hash( line ),
# Add start, end.
int( feature[1] ),
int( feature[2] )
]
if no_detail:
rval.append( payload )
continue
# Extend with additional data.
payload.extend( [
# Add name, strand.
feature[3],
feature[5],
# Thick start, end are feature start, end for now.
int( feature[1] ),
int( feature[2] ),
# No blocks.
None,
# Filtering data: Score, signalValue, pValue, qValue.
float( feature[4] ),
float( feature[6] ),
float( feature[7] ),
float( feature[8] )
] )
rval.append( payload )
return { 'data': rval, 'message': message }
class ENCODEPeakTabixDataProvider( TabixDataProvider, ENCODEPeakDataProvider ):
"""
Provides data from an ENCODEPeak dataset indexed via tabix.
"""
def get_filters( self ):
"""
Returns filters for dataset.
"""
# HACK: first 8 fields are for drawing, so start filter column index at 9.
filter_col = 8
filters = []
filters.append( { 'name': 'Score',
'type': 'number',
'index': filter_col,
'tool_id': 'Filter1',
'tool_exp_name': 'c6' } )
filter_col += 1
filters.append( { 'name': 'Signal Value',
'type': 'number',
'index': filter_col,
'tool_id': 'Filter1',
'tool_exp_name': 'c7' } )
filter_col += 1
filters.append( { 'name': 'pValue',
'type': 'number',
'index': filter_col,
'tool_id': 'Filter1',
'tool_exp_name': 'c8' } )
filter_col += 1
filters.append( { 'name': 'qValue',
'type': 'number',
'index': filter_col,
'tool_id': 'Filter1',
'tool_exp_name': 'c9' } )
return filters
#
# -- ChromatinInteraction data providers --
#
class ChromatinInteractionsDataProvider( GenomeDataProvider ):
def process_data( self, iterator, start_val=0, max_vals=None, **kwargs ):
"""
Provides
"""
rval = []
message = None
for count, line in enumerate( iterator ):
if count < start_val:
continue
if max_vals and count - start_val >= max_vals:
message = self.error_max_vals % ( max_vals, "interactions" )
break
feature = line.split()
s1 = int( feature[1] )
e1 = int( feature[2] )
c = feature[3]
s2 = int( feature[4] )
e2 = int( feature[5] )
v = float( feature[6] )
# Feature initialization.
payload = [
# GUID is just a hash of the line
hash( line ),
# Add start1, end1, chr2, start2, end2, value.
s1, e1, c, s2, e2, v
]
rval.append( payload )
return { 'data': rval, 'message': message }
def get_default_max_vals( self ):
return 100000
class ChromatinInteractionsTabixDataProvider( TabixDataProvider, ChromatinInteractionsDataProvider ):
def get_iterator( self, chrom, start=0, end=sys.maxint, interchromosomal=False, **kwargs ):
"""
"""
# Modify start as needed to get earlier interactions with start region.
span = int( end ) - int( start )
filter_start = max( 0, int( start ) - span - span / 2 )
def filter( iter ):
for line in iter:
feature = line.split()
s1 = int( feature[1] )
e1 = int( feature[2] )
c = feature[3]
s2 = int( feature[4] )
e2 = int( feature[5] )
# Check for intrachromosal interactions.
if ( ( s1 + s2 ) / 2 <= end ) and ( ( e1 + e2 ) / 2 >= start ) and ( c == chrom ):
yield line
# Check for interchromosal interactions.
if interchromosomal and c != chrom:
yield line
return filter( TabixDataProvider.get_iterator( self, chrom, filter_start, end ) )
#
# -- Helper methods. --
#
def package_gff_feature( feature, no_detail=False, filter_cols=[] ):
""" Package a GFF feature in an array for data providers. """
feature = convert_gff_coords_to_bed( feature )
# No detail means only start, end.
if no_detail:
return [ feature.start, feature.end ]
# Return full feature.
payload = [ feature.start,
feature.end,
feature.name(),
feature.strand,
# No notion of thick start, end in GFF, so make everything
# thick.
feature.start,
feature.end
]
# HACK: ignore interval with name 'transcript' from feature.
# Cufflinks puts this interval in each of its transcripts,
# and they mess up trackster by covering the feature's blocks.
# This interval will always be a feature's first interval,
# and the GFF's third column is its feature name.
feature_intervals = feature.intervals
if feature.intervals[0].fields[2] == 'transcript':
feature_intervals = feature.intervals[1:]
# Add blocks.
block_sizes = [ (interval.end - interval.start ) for interval in feature_intervals ]
block_starts = [ ( interval.start - feature.start ) for interval in feature_intervals ]
blocks = zip( block_sizes, block_starts )
payload.append( [ ( feature.start + block[1], feature.start + block[1] + block[0] ) for block in blocks ] )
# Add filter data to payload.
for col in filter_cols:
if col == "Score":
if feature.score == 'nan':
payload.append( feature.score )
else:
try:
f = float( feature.score )
payload.append( f )
except:
payload.append( feature.score )
elif col in feature.attributes:
if feature.attributes[col] == 'nan':
payload.append( feature.attributes[col] )
else:
try:
f = float( feature.attributes[col] )
payload.append( f )
except:
payload.append( feature.attributes[col] )
else:
# Dummy value.
payload.append( 0 )
return payload
| 36.627716
| 145
| 0.545377
|
33e1e3cb4e234e17a744ebb65b79e1fb3b91f4f4
| 25,413
|
py
|
Python
|
clef15/text-alignment/pan15_text_alignment_evaluator_character_level.py
|
deltonvaz/pan-code
|
7a1d11eab81d9b4111d3ba0fb7d7038ff7ebfa0a
|
[
"MIT"
] | null | null | null |
clef15/text-alignment/pan15_text_alignment_evaluator_character_level.py
|
deltonvaz/pan-code
|
7a1d11eab81d9b4111d3ba0fb7d7038ff7ebfa0a
|
[
"MIT"
] | null | null | null |
clef15/text-alignment/pan15_text_alignment_evaluator_character_level.py
|
deltonvaz/pan-code
|
7a1d11eab81d9b4111d3ba0fb7d7038ff7ebfa0a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python2
# Copyright (C) 2009 webis.de. All rights reserved.
"""Plagiarism detection performance measures.
This module implements the measures recall, precision, and granularity
as described by the authors of [1]. The measures can be calculated
macro-averaged and micro-averaged with the respective functions; each
one's parameters are iterables of reference plagiarism cases and
plagiarism detections. The latter are compared with the former in order
to determine how accurate the cases have been detected with respect to
different ideas of detection accuracy. Moreover, the function
plagdet_score combines recall, precision and granularity values to a
single value which allows to rank plagiarism detection algorithms.
The parameters 'cases' and 'detections' both must contain instances of
the Annotation class, a 7-tuple consisting of the following exemplified
values:
>>> Annotation('suspicious-document00001.txt', 10000, 1000, \
... 'source-document00001.txt', 5000, 1100, True)
where the first three values reference a section of text in a
suspicious document by means of char offset and length, and likewise
the following three values reference a section of text in a source
document. The last value specifies whether the annotation is to be
treated as an external detection or as an intrinsic detection. In the
latter case, the precedin values should be set to '', 0, 0, respectively.
Finally, this module contains functions to extract plagiarism
annotations from XML documents which contain tags with a given name
attribute, and with values corresponding to those required by the
Annotation class, e.g., the XML format used in the PAN benchmarking
workshops [2,3].
[1] Martin Potthast, Benno Stein, Alberto Barron-Cedeno, and Paolo Rosso.
An Evaluation Framework for Plagiarism Detection.
In Proceedings of the 23rd International Conference on Computational
Linguistics (COLING 2010), Beijing, China. August 2010. ACL.
[2] Martin Potthast, Benno Stein, Andreas Eiselt, Alberto Barron-Cedeno,
and Paolo Rosso. Overview of the 1st International Competition on
Plagiarism Detection. In Benno Stein, Paolo Rosso, Efstathios
Stamatatos, Moshe Koppel, and Eneko Agirre, editors, SEPLN 2009
Workshop on Uncovering Plagiarism, Authorship, and Social Software
Misuse (PAN 09), pages 1-9, September 2009. CEUR-WS.org.
ISSN 1613-0073.
[3] Martin Potthast, Benno Stein, Andreas Eiselt, Alberto Barron-Cedeno,
and Paolo Rosso. Overview of the 2nd International Benchmarking
Workshop on Plagiarism Detection. In Benno Stein, Paolo Rosso,
Efstathios Stamatatos, and Moshe Koppel, editors, Proceedings of
PAN at CLEF 2010: Uncovering Plagiarism, Authorship, and Social
Software Misuse, September 2010.
"""
from __future__ import division
__author__ = "Martin Potthast"
__email__ = "martin.potthast at uni-weimar dot de"
__version__ = "1.3"
__all__ = ["macro_avg_recall_and_precision", "micro_avg_recall_and_precision",
"granularity", "plagdet_score", "Annotation"]
from collections import namedtuple
import getopt
import glob
import math
from numpy import int8 as npint8
from numpy.ma import zeros, sum as npsum
import os
import sys
import unittest
import xml.dom.minidom
import shutil
import json
TREF, TOFF, TLEN = 'this_reference', 'this_offset', 'this_length'
SREF, SOFF, SLEN = 'source_reference', 'source_offset', 'source_length'
EXT = 'is_external'
Annotation = namedtuple('Annotation', [TREF, TOFF, TLEN, SREF, SOFF, SLEN, EXT])
TREF, TOFF, TLEN, SREF, SOFF, SLEN, EXT = range(7)
def macro_avg_recall_and_precision(cases, detections):
"""Returns tuple (rec, prec); the macro-averaged recall and precision of the
detections in detecting the plagiarism cases"""
return macro_avg_recall(cases, detections), \
macro_avg_precision(cases, detections)
def micro_avg_recall_and_precision(cases, detections):
"""Returns tuple (rec, prec); the micro-averaged recall and precision of the
detections in detecting the plagiarism cases"""
if len(cases) == 0 and len(detections) == 0:
return 1, 1
if len(cases) == 0 or len(detections) == 0:
return 0, 0
num_plagiarized, num_detected, num_plagiarized_detected = 0, 0, 0 # chars
num_plagiarized += count_chars(cases)
num_detected += count_chars(detections)
print(len(detections))
detections = true_detections(cases, detections)
print(len(detections))
num_plagiarized_detected += count_chars(detections)
print(num_plagiarized, num_detected, num_plagiarized_detected)
rec, prec = 0, 0
if num_plagiarized > 0:
rec = num_plagiarized_detected / num_plagiarized
if num_detected > 0:
prec = num_plagiarized_detected / num_detected
return rec, prec
def granularity(cases, detections):
"""Granularity of the detections in detecting the plagiarism cases."""
if len(detections) == 0:
return 1
detections_per_case = list()
case_index = index_annotations(cases)
det_index = index_annotations(detections)
for tref in case_index:
cases, detections = case_index[tref], det_index.get(tref, False)
if not detections: # No detections for document tref.
continue
for case in cases:
num_dets = sum((is_overlapping(case, det) for det in detections))
detections_per_case.append(num_dets)
detected_cases = sum((num_dets > 0 for num_dets in detections_per_case))
if detected_cases == 0:
return 1
return sum(detections_per_case) / detected_cases
def plagdet_score(rec, prec, gran):
"""Combines recall, precision, and granularity to a allow for ranking."""
if (rec == 0 and prec == 0) or prec < 0 or rec < 0 or gran < 1:
return 0
return ((2 * rec * prec) / (rec + prec)) / math.log(1 + gran, 2)
def macro_avg_recall(cases, detections):
"""Recall of the detections in detecting plagiarism cases."""
if len(cases) == 0 and len(detections) == 0:
return 1
elif len(cases) == 0 or len(detections) == 0:
return 0
num_cases, recall_per_case = len(cases), list()
case_index = index_annotations(cases)
det_index = index_annotations(detections)
for tref in case_index:
cases, detections = case_index[tref], det_index.get(tref, False)
if not detections: # No detections for document tref.
continue
for case in cases:
recall_per_case.append(case_recall(case, detections))
return sum(recall_per_case) / num_cases
def case_recall(case, detections):
"""Recall of the detections in detecting the plagiarism case."""
num_detected_plagiarized = overlapping_chars(case, detections)
num_plagiarized = case[TLEN] + case[SLEN]
return num_detected_plagiarized / num_plagiarized
def macro_avg_precision(cases, detections):
"""Precision of the detections in detecting the plagiarism cases."""
# Observe the difference to calling 'macro_avg_recall(cases, detections)'.
return macro_avg_recall(detections, cases)
def true_detections(cases, detections):
"""Recreates the detections so that only true detections remain and so that
the true detections are reduced to the passages that actually overlap
with the respective detected case."""
true_dets = list()
case_index = index_annotations(cases)
det_index = index_annotations(detections)
for tref in case_index:
cases, detections = case_index[tref], det_index.get(tref, False)
if not detections: # No detections for document tref.
continue
for case in cases:
case_dets = (det for det in detections if is_overlapping(case, det))
true_case_dets = (overlap_annotation(case, det) for det in case_dets)
true_dets.extend(true_case_dets)
return true_dets
def overlap_annotation(ann1, ann2):
"""Returns an Annotation that annotates overlaps between ann1 and ann2."""
tref, sref, ext = ann1[TREF], ann1[SREF], ann1[EXT] and ann2[EXT]
toff, tlen, soff, slen = 0, 0, 0, 0
if is_overlapping(ann1, ann2):
toff, tlen = overlap_chars(ann1, ann2, TOFF, TLEN)
if ext:
soff, slen = overlap_chars(ann1, ann2, SOFF, SLEN)
return Annotation(tref, toff, tlen, sref, soff, slen, ext)
def overlap_chars(ann1, ann2, xoff, xlen):
"""Returns the overlapping passage between ann1 and ann2, given the keys
xoff and xlen."""
overlap_start, overlap_length = 0, 0
max_ann = ann1 if ann1[xoff] >= ann2[xoff] else ann2
min_ann = ann1 if ann1[xoff] < ann2[xoff] else ann2
if min_ann[xoff] + min_ann[xlen] > max_ann[xoff]:
overlap_start = max_ann[xoff]
overlap_end = min(min_ann[xoff] + min_ann[xlen], \
max_ann[xoff] + max_ann[xlen])
overlap_length = overlap_end - overlap_start
return overlap_start, overlap_length
def count_chars(annotations):
"""Returns the number of chars covered by the annotations, while counting
overlapping chars only once."""
num_chars = count_chars2(annotations, TREF, TOFF, TLEN)
num_chars += count_chars2(annotations, SREF, SOFF, SLEN)
return num_chars
def count_chars2(annotations, xref, xoff, xlen):
"""Returns the number of cvhars covered by the annotations with regard to
the keys xref, xoff, and xlen."""
num_chars = 0
max_length = max((ann[xoff] + ann[xlen] for ann in annotations))
char_bits = zeros(max_length, dtype=bool)
xref_index = index_annotations(annotations, xref)
for xref in xref_index:
annotations = xref_index[xref]
char_bits[:] = False
for ann in annotations:
char_bits[ann[xoff]:ann[xoff] + ann[xlen]] = True
num_chars += npsum(char_bits)
return num_chars
def overlapping_chars(ann1, annotations):
"""Returns the number of chars in ann1 that overlap with the annotations."""
annotations = [ann2 for ann2 in annotations if is_overlapping(ann1, ann2)]
if len(annotations) == 0 or not isinstance(ann1, Annotation):
return 0
this_overlaps = zeros(ann1[TLEN], dtype=bool)
source_overlaps = zeros(ann1[SLEN], dtype=bool)
for ann2 in annotations:
mark_overlapping_chars(this_overlaps, ann1, ann2, TOFF, TLEN)
mark_overlapping_chars(source_overlaps, ann1, ann2, SOFF, SLEN)
return npsum(this_overlaps) + npsum(source_overlaps)
def mark_overlapping_chars(char_bits, ann1, ann2, xoff, xlen):
"""Sets the i-th boolean in char_bits to true if ann2 overlaps with the i-th
char in ann1, respecting the given xoff and xlen index."""
offset_difference = ann2[xoff] - ann1[xoff]
overlap_start = min(max(0, offset_difference), ann1[xlen])
overlap_end = min(max(0, offset_difference + ann2[xlen]), ann1[xlen])
char_bits[overlap_start:overlap_end] = True
def is_overlapping(ann1, ann2):
"""Returns true iff the ann2 overlaps with ann1."""
detected = ann1[TREF] == ann2[TREF] and \
ann2[TOFF] + ann2[TLEN] > ann1[TOFF] and \
ann2[TOFF] < ann1[TOFF] + ann1[TLEN]
if ann1[EXT] == True and ann2[EXT] == True:
detected = detected and ann1[SREF] == ann2[SREF] and \
ann2[SOFF] + ann2[SLEN] > ann1[SOFF] and \
ann2[SOFF] < ann1[SOFF] + ann1[SLEN]
return detected
def index_annotations(annotations, xref=TREF):
"""Returns an inverted index that maps references to annotation lists."""
index = dict()
for ann in annotations:
index.setdefault(ann[xref], []).append(ann)
return index
def count_files(path):
"""Counts number of files in directory."""
if not os.path.exists(path):
print("Path not accessible:", path)
sys.exit(2)
xmlfiles = glob.glob(os.path.join(path, '*.xml'))
xmlfiles.extend(glob.glob(os.path.join(path, os.path.join('*', '*.xml'))))
return len(xmlfiles)
def extract_average_execution_time(path):
"""Extracts the execution time from each xml file and averages it."""
if not os.path.exists(path):
print("Path not accessible:", path)
sys.exit(2)
times = []
xmlfiles = glob.glob(os.path.join(path, '*.xml'))
xmlfiles.extend(glob.glob(os.path.join(path, os.path.join('*', '*.xml'))))
for xmlfile in xmlfiles:
doc = xml.dom.minidom.parse(xmlfile)
if doc.documentElement.hasAttribute('reference'):
for node in doc.documentElement.childNodes:
if node.nodeType == xml.dom.Node.ELEMENT_NODE and \
node.hasAttribute('name') and \
node.getAttribute('name').endswith("execution_time"):
times.append(float(node.getAttribute("seconds")))
return sum(times)/len(times)
def extract_annotations_from_files(path, tagname):
"""Returns a set of plagiarism annotations from XML files below path."""
if not os.path.exists(path):
print("Path not accessible:", path)
sys.exit(2)
annotations = set()
xmlfiles = glob.glob(os.path.join(path, '*.xml'))
xmlfiles.extend(glob.glob(os.path.join(path, os.path.join('*', '*.xml'))))
for xmlfile in xmlfiles:
annotations.update(extract_annotations_from_file(xmlfile, tagname))
return annotations
def extract_annotations_from_file(xmlfile, tagname):
"""Returns a set of plagiarism annotations from an XML file."""
doc = xml.dom.minidom.parse(xmlfile)
annotations = set()
if not doc.documentElement.hasAttribute('reference'):
return annotations
t_ref = doc.documentElement.getAttribute('reference')
for node in doc.documentElement.childNodes:
if node.nodeType == xml.dom.Node.ELEMENT_NODE and \
node.hasAttribute('name') and \
node.getAttribute('name').endswith(tagname):
ann = extract_annotation_from_node(node, t_ref)
if ann:
annotations.add(ann)
return annotations
def extract_annotation_from_node(xmlnode, t_ref):
"""Returns a plagiarism annotation from an XML feature tag node."""
if not (xmlnode.hasAttribute('this_offset') and \
xmlnode.hasAttribute('this_length')):
return False
t_off = int(xmlnode.getAttribute('this_offset'))
t_len = int(xmlnode.getAttribute('this_length'))
s_ref, s_off, s_len, ext = '', 0, 0, False
if xmlnode.hasAttribute('source_reference') and \
xmlnode.hasAttribute('source_offset') and \
xmlnode.hasAttribute('source_length'):
s_ref = xmlnode.getAttribute('source_reference')
s_off = int(xmlnode.getAttribute('source_offset'))
s_len = int(xmlnode.getAttribute('source_length'))
ext = True
return Annotation(t_ref, t_off, t_len, s_ref, s_off, s_len, ext)
class TestPerfMeasures(unittest.TestCase):
"""Unit tests for the plagiarism detection performance measures."""
ann1 = Annotation('tref1', 0, 100, 'sref1', 0, 100, True)
ann2 = Annotation('tref1', 0, 100, '', 0, 0, False)
ann3 = Annotation('tref1', 100, 100, 'sref1', 100, 100, True)
ann4 = Annotation('tref1', 0, 200, 'sref1', 0, 200, True)
ann5 = Annotation('tref1', 0, 1, 'sref1', 0, 1, True)
ann6 = Annotation('tref1', 99, 1, 'sref1', 99, 1, True)
ann7 = Annotation('tref2', 0, 100, 'sref2', 0, 100, True)
ann8 = Annotation('tref2', 0, 100, '', 0, 0, False)
ann9 = Annotation('tref2', 50, 100, 'sref2', 50, 100, True)
ann10 = Annotation('tref2', 25, 75, 'sref2', 25, 75, True)
def test_macro_averaged_recall(self):
self.assertEqual(1, macro_avg_recall([], []))
self.assertEqual(0, macro_avg_recall(['sth'], []))
self.assertEqual(0, macro_avg_recall([], ['sth']))
self.assertEqual(1, macro_avg_recall([self.ann1], [self.ann1]))
self.assertEqual(1, macro_avg_recall([self.ann2], [self.ann2]))
self.assertEqual(0.5, macro_avg_recall([self.ann1, self.ann7], \
[self.ann1]))
self.assertEqual(0.5, macro_avg_recall([self.ann2, self.ann8], \
[self.ann2]))
self.assertEqual(0, macro_avg_recall([self.ann1], [self.ann7]))
self.assertEqual(0, macro_avg_recall([self.ann2], [self.ann8]))
def test_case_recall(self):
self.assertEqual(0, case_recall(self.ann1, []))
self.assertEqual(1, case_recall(self.ann1, [self.ann1]))
self.assertEqual(0.5, case_recall(self.ann1, [self.ann2]))
self.assertEqual(0, case_recall(self.ann1, [self.ann3]))
self.assertEqual(1, case_recall(self.ann1, [self.ann4]))
self.assertEqual(1, case_recall(self.ann1, [self.ann4, self.ann7]))
self.assertEqual(0, case_recall(self.ann1, [self.ann7, self.ann9]))
self.assertEqual(0.5, case_recall(self.ann7, [self.ann9]))
self.assertEqual(0.75, case_recall(self.ann7, [self.ann10]))
self.assertEqual(0.75, case_recall(self.ann7, [self.ann9, self.ann10]))
def test_macro_averaged_precision(self):
self.assertEqual(1, macro_avg_precision([], []))
self.assertEqual(0, macro_avg_precision(['sth'], []))
self.assertEqual(0, macro_avg_precision([], ['sth']))
self.assertEqual(1, macro_avg_precision([self.ann1], [self.ann1]))
self.assertEqual(1, macro_avg_precision([self.ann2], [self.ann2]))
self.assertEqual(1, macro_avg_precision([self.ann1, self.ann7], \
[self.ann1]))
self.assertEqual(1, macro_avg_precision([self.ann2, self.ann8], \
[self.ann2]))
self.assertEqual(0.5, macro_avg_precision([self.ann1], [self.ann4]))
self.assertEqual(1, macro_avg_precision([self.ann7], [self.ann10]))
self.assertEqual(1, macro_avg_precision([self.ann7], [self.ann10]))
self.assertEqual(0.75, macro_avg_precision([self.ann7], \
[self.ann9, self.ann10]))
self.assertEqual(0.25, macro_avg_precision([self.ann1], \
[self.ann3, self.ann4]))
def test_granularity(self):
self.assertEqual(1, granularity([], []))
self.assertEqual(1, granularity([self.ann1], [self.ann2]))
self.assertEqual(1, granularity([self.ann1], [self.ann2, self.ann3]))
self.assertEqual(2, granularity([self.ann1],
[self.ann2, self.ann3, self.ann4]))
self.assertEqual(1.5, granularity([self.ann1, self.ann3],
[self.ann2, self.ann4]))
def test_plagdet_score(self):
self.assertEqual(0, plagdet_score(-1, 0, 0))
self.assertEqual(0, plagdet_score(0, -1, 0))
self.assertEqual(0, plagdet_score(0, 0, -1))
self.assertEqual(0, plagdet_score(0, 0, 1))
self.assertEqual(0, plagdet_score(0, 1, 1))
self.assertEqual(0, plagdet_score(1, 0, 1))
self.assertEqual(1, plagdet_score(1, 1, 1))
self.assertEqual(2 / 3, plagdet_score(0.5, 1, 1))
self.assertEqual(2 / 3, plagdet_score(1, 0.5, 1))
self.assertAlmostEqual(0.63092975, plagdet_score(1, 1, 2))
self.assertAlmostEqual(0.23659865, plagdet_score(0.25, 0.75, 2))
def test_is_overlapping(self):
self.assertTrue(is_overlapping(self.ann1, self.ann2))
self.assertFalse(is_overlapping(self.ann1, self.ann3))
self.assertTrue(is_overlapping(self.ann1, self.ann4))
self.assertFalse(is_overlapping(self.ann1, self.ann7))
self.assertFalse(is_overlapping(self.ann1, self.ann8))
self.assertFalse(is_overlapping(self.ann1, self.ann9))
self.assertFalse(is_overlapping(self.ann1, self.ann10))
self.assertTrue(is_overlapping(self.ann1, self.ann5))
self.assertTrue(is_overlapping(self.ann1, self.ann6))
def test_index_annotations(self):
index = index_annotations([self.ann1, self.ann7, self.ann2, self.ann8])
self.assertEqual([self.ann1, self.ann2], index.get('tref1'))
self.assertEqual([self.ann7, self.ann8], index.get('tref2'))
def usage():
"""Prints command line usage manual."""
print("""\
Usage: pan14-text-alignment-eval.py [options]
Options:
-h, --help Show this message
--micro Compute micro-averaged recall and precision,
default: macro-averaged recall and precision
-t, --truth-dir Path to the XML files with plagiarism annotations
--plag-tag Tag name suffix of plagiarism annotations,
default: 'plagiarism'
-i, --input-dir Path to the XML files with detection annotations
--det-tag Tag name of the detection annotations,
default: 'detected-plagiarism'
-o, --output-file Path to output file
""")
def parse_options():
"""Parses the command line options."""
try:
long_options = ["micro", "truth-dir=", "plag-tag=", "input-dir=",
"det-tag=", "output-file=", "help"]
opts, _ = getopt.getopt(sys.argv[1:], "t:i:o:h", long_options)
except Exception as err:
print(str(err))
usage()
sys.exit(2)
micro_averaged = False
plag_path, det_path = "undefined", "undefined"
output_filename="undefined"
plag_tag_name, det_tag_name = "plagiarism", "detected-plagiarism"
for opt, arg in opts:
if opt in ("--micro"):
micro_averaged = True
elif opt in ("-t", "--truth-dir"):
plag_path = arg
elif opt == "--plag-tag":
plag_tag_name = arg
elif opt in ("-i", "--input-dir"):
det_path = arg
elif opt in ("-o", "--output-file"):
output_filename = arg
elif opt == "--det-tag":
det_tag_name = arg
elif opt in ("-h", "--help"):
usage()
sys.exit()
else:
assert False, "Unknown option."
if plag_path == "undefined":
print("Truth directory undefined. Use option -t or --truth-dir.")
sys.exit()
if det_path == "undefined":
print("Input directory undefined. Use option -i or --input-dir.")
sys.exit()
if output_filename == "undefined":
print("Output file undefined. Use option -o or --outputfile.")
sys.exit()
return (micro_averaged, plag_path, plag_tag_name, det_path, det_tag_name, output_filename)
def reduce_detections(plag_path, detections):
pairs = [x.strip().split(' ') for x in open(os.path.join(plag_path, 'pairs'))]
reduced_detections = set()
for d in detections:
for p in pairs:
if d.this_reference == p[0] and d.source_reference == p[1]:
reduced_detections.add(d)
break
return reduced_detections
def character_level_performance(micro_averaged, plag_path, plag_tag_name, det_path, det_tag_name):
cases = extract_annotations_from_files(plag_path, plag_tag_name)
"""print 'Reading', det_path"""
detections = extract_annotations_from_files(det_path, det_tag_name)
detections = reduce_detections(plag_path, detections)
"""print 'Processing... (this may take a while)'"""
rec, prec = 0, 0
if micro_averaged:
rec, prec = micro_avg_recall_and_precision(cases, detections)
else:
rec, prec = macro_avg_recall_and_precision(cases, detections)
gran = granularity(cases, detections)
plag = plagdet_score(rec, prec, gran)
num_files = count_files(det_path)
return (plag, rec, prec, gran, num_files)
def main(micro_averaged, plag_path, plag_tag_name, det_path, det_tag_name, output_filename):
"""Main method of this module."""
"""print 'Reading', plag_path"""
plag, rec, prec, gran, num_files = character_level_performance(micro_averaged, plag_path, plag_tag_name, det_path, det_tag_name)
output_string = '{\n'+ \
'"plagdet":"%0.5f",\n' % plag + \
'"recall":"%0.5f",\n' % rec + \
'"precision":"%0.5f",\n' % prec + \
'"granularity":"%0.5f",\n' % gran + \
'"documents":"%d"\n' % num_files + '}'
print(plag)
print (output_string)
o=open(output_filename, "w")
o.write(output_string)
o.close()
# write prototext file
json_data = json.loads(output_string)
prototext_filename= output_filename[:output_filename.rindex(".")]+".prototext"
prototext_file=open(prototext_filename,"w")
for i in json_data:
text = '''measure{
key : "%s"
value: "%s"
}''' % (i, json_data[i])
prototext_file.write(text+"\n")
prototext_file.close()
if __name__ == '__main__':
main(*parse_options())
| 43.740103
| 133
| 0.645024
|
6b5a8408f103ecb1afc0a81d03d2b8dd1d2e3846
| 485
|
py
|
Python
|
test/silicon/3_wan/plot_bands.py
|
mir-group/BRAVE
|
45a870946661d7d76fcca273036b3004f21a49bd
|
[
"MIT"
] | 8
|
2020-11-03T03:28:35.000Z
|
2022-03-18T21:22:03.000Z
|
test/silicon/3_wan/plot_bands.py
|
mir-group/BRAVE
|
45a870946661d7d76fcca273036b3004f21a49bd
|
[
"MIT"
] | 1
|
2021-02-28T08:41:21.000Z
|
2021-02-28T08:41:21.000Z
|
test/silicon/3_wan/plot_bands.py
|
mir-group/BRAVE
|
45a870946661d7d76fcca273036b3004f21a49bd
|
[
"MIT"
] | 1
|
2022-03-20T19:27:33.000Z
|
2022-03-20T19:27:33.000Z
|
import brave
bnd = brave.Diagram()
bnd.read('internal', ['../../kpath/fcc.full.in'])
bnd.calc_kindex('density')
bnd.calc_kpoint()
bnd.read('wannier-out', ['silicon.win', 'silicon-bands.dat'])
bnd.nelec = 8.0
bnd.calc_efermi()
bnd.set_plot('energy')
bnd.plot.ylim = [[0, 12]]
bnd.plot.pagesize = [5.0, 3.75]
bnd.plot.note = [[[0.5, 1.02, 'center', 'bottom', 'fcc Si' + 20 * ' ' + 'LDA' + 20 * ' ' + 'Wannier90', 'black', 1.0]]]
bnd.plot.write('matplotlib', 'png', 'silicon_bands.png')
| 32.333333
| 119
| 0.626804
|
28abe137f2ac997f67b62b6e77b76020a7f2c4cb
| 4,450
|
py
|
Python
|
backend/app/core/regressor.py
|
Laende/anleggsmaskin-salgsprisprekisjon
|
d1e448d9ee081ba055c3ac05c1b1883294f19c99
|
[
"MIT"
] | null | null | null |
backend/app/core/regressor.py
|
Laende/anleggsmaskin-salgsprisprekisjon
|
d1e448d9ee081ba055c3ac05c1b1883294f19c99
|
[
"MIT"
] | null | null | null |
backend/app/core/regressor.py
|
Laende/anleggsmaskin-salgsprisprekisjon
|
d1e448d9ee081ba055c3ac05c1b1883294f19c99
|
[
"MIT"
] | null | null | null |
from logging import getLogger
from pathlib import Path
from typing import List
import joblib
import numpy as np
import pandas as pd
from app.core.config import (get_feature_codes, get_features_list,
get_state_list)
from app.schemas.prediction import SalePricePredictionResult
from app.schemas.sales import SalesCreate
log = getLogger(__name__)
class SalePriceRegressor(object):
def __init__(self, path):
self.path = path
self.feature_codes = get_feature_codes()
self.features_list = get_features_list()
self.state_list = get_state_list()
if Path(self.path).is_file():
self._load_local_model()
else:
log.error("Model path doesnt exist.")
def _load_local_model(self):
self.model = joblib.load(self.path)
def _add_and_remove_df_columns(self, input_data) -> pd.DataFrame:
df = pd.DataFrame([input_data])
# Make sure saledate is of datetime type
df["saledate"] = pd.to_datetime(df["saledate"], format="%Y-%m-%d")
# Create new columns based on saledate
df["saledate_year"] = df["saledate"].dt.year
df["saledate_month"] = df["saledate"].dt.month
df["saledate_day"] = df["saledate"].dt.day
df["saledate_dayofweek"] = df["saledate"].dt.dayofweek
df["saledate_quarter"] = df["saledate"].dt.quarter
# Drop saleprice and saledate as it isnt part of the featureset for the model
if "saleprice" in df.columns:
df.drop(["saleprice"], axis=1, inplace=True)
if "saledate" in df.columns:
df.drop(["saledate"], axis=1, inplace=True)
# If the dataframe doesnt have all the required features, add them and fill with zero
for col in self.state_list:
if col not in df.columns:
df[col] = 0
return df
def _fill_state_in_df(self, df) -> pd.DataFrame:
# One-hot encode the state column, this will fill a 1 into the correct in_state column
if df.at[0, "state"] != "":
df.loc[:, df.columns.str.contains(df.at[0, "state"].lower())] = 1
df.drop(["state"], axis=1, inplace=True)
return df
def _fill_df_with_numbers(self, df) -> pd.DataFrame:
# Fill string type columns with numbers. Check if string exists in feature dict, if it does return the code.
for label, content in df.items():
if pd.api.types.is_string_dtype(content):
df[label] = df[label].str.lower()
val = df.at[0, label]
list_of_feature_values = list(self.feature_codes[label].values())
list_of_feature_codes = list(self.feature_codes[label].keys())
try:
result = list_of_feature_codes[list_of_feature_values.index(val)]
df[label] = result
except ValueError as e:
df[label] = 0
df = df.astype(float)
return df
def _pre_process(self, input_data: SalesCreate) -> List:
# Create a dataframe from the input data, the input data is a dict
df = self._add_and_remove_df_columns(input_data)
df = self._fill_state_in_df(df)
# All features should be in df now, order them as expected by model
df = df[self.features_list]
df["machine_hours_current_meter"] = df["machine_hours_current_meter"].astype("int")
df = self._fill_df_with_numbers(df)
features_to_dict = df.to_dict(orient="records")
out = np.array(list(features_to_dict[0].values())).reshape(1, -1)
return out
def _post_process(self, prediction: np.ndarray) -> SalePricePredictionResult:
log.debug("Post-processing prediction.")
return SalePricePredictionResult(price=prediction[0])
def _predict(self, features: List) -> np.ndarray:
log.debug("Predicting.")
prediction_result = self.model.predict(features)
return prediction_result
def predict(self, input_data: SalesCreate):
if input_data is None:
raise ValueError(f"{input_data} is not valid.")
pre_processed_payload = self._pre_process(input_data)
prediction = self._predict(pre_processed_payload)
log.info(f"predicted saleprice: {prediction[0]}")
post_processed_result = self._post_process(prediction)
return post_processed_result
| 38.034188
| 116
| 0.63618
|
e1dc7008e8a796fb78f1594b1d0972fd14d057c4
| 7,152
|
py
|
Python
|
pycore/blocks.py
|
passlab/PlotNeuralNet
|
c8fff27fe2a7b839c01437d77363ac150d3c6d68
|
[
"MIT"
] | null | null | null |
pycore/blocks.py
|
passlab/PlotNeuralNet
|
c8fff27fe2a7b839c01437d77363ac150d3c6d68
|
[
"MIT"
] | null | null | null |
pycore/blocks.py
|
passlab/PlotNeuralNet
|
c8fff27fe2a7b839c01437d77363ac150d3c6d68
|
[
"MIT"
] | null | null | null |
from .tikzeng import *
#define new block
def block_2ConvPool( name, botton, top, s_filer=256, n_filer=64, offset="(1,0,0)", size=(32,32,3.5), opacity=0.5 ):
return [
to_ConvConvRelu(
name="ccr_{}".format( name ),
s_filer=str(s_filer),
n_filer=(n_filer,n_filer),
offset=offset,
to="({}-east)".format( botton ),
width=(size[2],size[2]),
height=size[0],
depth=size[1],
),
to_Pool(
name="{}".format( top ),
offset="(0,0,0)",
to="(ccr_{}-east)".format( name ),
width=1,
height=size[0] - int(size[0]/4),
depth=size[1] - int(size[0]/4),
opacity=opacity, ),
to_connection(
"{}".format( botton ),
"ccr_{}".format( name )
)
]
def block_Unconv( name, botton, top, s_filer=256, n_filer=64, offset="(1,0,0)", size=(32,32,3.5), opacity=0.5 ):
return [
to_UnPool( name='unpool_{}'.format(name), offset=offset, to="({}-east)".format(botton), width=1, height=size[0], depth=size[1], opacity=opacity ),
to_ConvRes( name='ccr_res_{}'.format(name), offset="(0,0,0)", to="(unpool_{}-east)".format(name), s_filer=str(s_filer), n_filer=str(n_filer), width=size[2], height=size[0], depth=size[1], opacity=opacity ),
to_Conv( name='ccr_{}'.format(name), offset="(0,0,0)", to="(ccr_res_{}-east)".format(name), s_filer=str(s_filer), n_filer=str(n_filer), width=size[2], height=size[0], depth=size[1] ),
to_ConvRes( name='ccr_res_c_{}'.format(name), offset="(0,0,0)", to="(ccr_{}-east)".format(name), s_filer=str(s_filer), n_filer=str(n_filer), width=size[2], height=size[0], depth=size[1], opacity=opacity ),
to_Conv( name='{}'.format(top), offset="(0,0,0)", to="(ccr_res_c_{}-east)".format(name), s_filer=str(s_filer), n_filer=str(n_filer), width=size[2], height=size[0], depth=size[1] ),
to_connection(
"{}".format( botton ),
"unpool_{}".format( name )
)
]
def block_Res( num, name, botton, top, s_filer=256, n_filer=64, offset="(0,0,0)", size=(32,32,3.5), opacity=0.5 ):
lys = []
layers = [ *[ '{}_{}'.format(name,i) for i in range(num-1) ], top]
for name in layers:
ly = [ to_Conv(
name='{}'.format(name),
offset=offset,
to="({}-east)".format( botton ),
s_filer=str(s_filer),
n_filer=str(n_filer),
width=size[2],
height=size[0],
depth=size[1]
),
to_connection(
"{}".format( botton ),
"{}".format( name )
)
]
botton = name
lys+=ly
lys += [
to_skip( of=layers[1], to=layers[-2], pos=1.25),
]
return lys
def block_Encoder( name, botton, top, caption=256, n_filer=64, offset="(1,0,0)", size=(32,32,3.5), maxpool=False, opacity=0.5 ):
encoder = [];
if (botton == ""):
encoder.append(
to_Conv(
name="ccr1_{}".format( name ),
offset=offset,
s_filer="",
to="(0,0,0)",
n_filer=n_filer,
width=size[2],
height=size[0],
depth=size[1],
)
);
else:
encoder.append(
to_Conv(
name="ccr1_{}".format( name ),
offset=offset,
s_filer="",
n_filer=n_filer,
to="({}-east)".format( botton ),
width=size[2],
height=size[0],
depth=size[1],
)
);
encoder.append(
to_Conv(
name="ccr2_{}".format( name ),
offset="(0,0,0)",
s_filer="",
caption=caption,
to="(ccr1_{}-east)".format( name ),
n_filer=n_filer,
width=size[2],
height=size[0],
depth=size[1],
)
);
encoder.append(
to_ConvRelu(
name="ccr3_{}".format( name ),
offset="(0,0,0)",
s_filer="",
to="(ccr2_{}-east)".format( name ),
n_filer=n_filer,
width=size[2],
height=size[0],
depth=size[1],
)
);
encoder.append(
to_skip(
of="ccr1_{}".format( name ),
to="ccr3_{}".format( name ),
pos_of=1.25,
)
);
if (maxpool):
encoder.append(
to_Pool(
name="{}".format( top ),
offset="(0,0,0)",
caption="",
to="(ccr3_{}-east)".format( name ),
width=size[2],
height=size[0]//2,
depth=size[1]//2,
opacity=opacity
)
);
if (botton != ""):
encoder.append(
to_connection(
"{}".format( botton ),
"ccr1_{}".format( name )
)
);
return encoder;
def block_Decoder( name, botton, top, caption="", n_filer=64, offset="(1,0,0)", size=(32,32,3.5), opacity=0.5 ):
return [
to_TransposeConv(
name='ccr_res_{}'.format(name),
offset=offset,
to="({}-east)".format(botton),
n_filer="",
s_filer="",
width=1,
height=size[0],
depth=size[1]
),
to_Concat(
name='concate_ccr_res_{}'.format(name),
offset="(0,0,0)",
to="(ccr_res_{}-east)".format(name),
n_filer="",
s_filer="",
width=1,
height=size[0],
depth=size[1],
opacity=opacity
),
to_Conv(
name="ccr1_{}".format( name ),
offset="(0,0,0)",
s_filer="",
caption="",
to="(concate_ccr_res_{}-east)".format( name ),
n_filer=n_filer,
width=size[2],
height=size[0],
depth=size[1],
),
to_Conv(
name="ccr2_{}".format( name ),
offset="(0,0,0)",
s_filer="",
caption=caption,
to="(ccr1_{}-east)".format( name ),
n_filer=n_filer,
width=size[2],
height=size[0],
depth=size[1],
),
to_ConvRelu(
name='{}'.format(top),
offset="(0,0,0)",
to="(ccr2_{}-east)".format(name),
s_filer="",
caption="",
n_filer=n_filer,
width=size[2],
height=size[0],
depth=size[1],
),
to_skip(
of="ccr1_{}".format( name ),
to="{}".format( top ),
pos_of=1.25,
),
to_connection(
"{}".format( botton ),
"ccr_res_{}".format(name)
)
]
| 30.827586
| 226
| 0.435263
|
e4ecaec849c9162d4324dcfffa751c45a2c130b0
| 973
|
py
|
Python
|
LeetCode/running-sum-1d-array.py
|
amgad01/algorithms
|
53eecf06e907cde806d4b78dc78fcd70d0271e3e
|
[
"MIT"
] | 1
|
2021-03-05T18:13:02.000Z
|
2021-03-05T18:13:02.000Z
|
LeetCode/running-sum-1d-array.py
|
amgad01/algorithms
|
53eecf06e907cde806d4b78dc78fcd70d0271e3e
|
[
"MIT"
] | null | null | null |
LeetCode/running-sum-1d-array.py
|
amgad01/algorithms
|
53eecf06e907cde806d4b78dc78fcd70d0271e3e
|
[
"MIT"
] | 1
|
2021-07-25T01:55:12.000Z
|
2021-07-25T01:55:12.000Z
|
def running_sum(nums):
"""Given an array nums. We define a running sum of an array as runningSum[i] = sum(nums[0]…nums[i]).
Return the running sum of nums.
Example 1: Input: nums = [1,2,3,4] Output: [1,3,6,10]
Explanation: Running sum is obtained as follows: [1, 1+2, 1+2+3, 1+2+3+4].
Example 2: Input: nums = [1,1,1,1,1] Output: [1,2,3,4,5] Explanation: Running sum is obtained as follows: [1, 1+1, 1+1+1, 1+1+1+1, 1+1+1+1+1].
Example 3: Input: nums = [3,1,2,10,1] Output: [3,4,6,16,17]
:type nums: List[int]
:rtype: List[int]
"""
# https://leetcode.com/submissions/detail/466562475/
# runtime beats 80.92 % of python submissions.
# memory usage beats 98.99 % of python submissions.
r_sum = nums[0]
for index in range(1, len(nums)):
r_sum += nums[index]
nums[index] = r_sum
return nums
print(running_sum([1, 2, 3, 4]))
print(running_sum([1, 1, 1, 1, 1]))
print(running_sum([3, 1, 2, 10, 1]))
| 37.423077
| 146
| 0.612539
|
45a6e241a0a4f280e45f68722b7f3dfe4000139b
| 1,248
|
py
|
Python
|
CEGO/testFunctions/C3DTLZ4.py
|
napa-jmm/CEGO
|
172d511133a608ca5bf265d9ebd2937b8a171b3e
|
[
"MIT"
] | 6
|
2018-07-18T06:38:42.000Z
|
2021-11-17T21:01:40.000Z
|
CEGO/testFunctions/C3DTLZ4.py
|
napa-jmm/CEGO
|
172d511133a608ca5bf265d9ebd2937b8a171b3e
|
[
"MIT"
] | null | null | null |
CEGO/testFunctions/C3DTLZ4.py
|
napa-jmm/CEGO
|
172d511133a608ca5bf265d9ebd2937b8a171b3e
|
[
"MIT"
] | 6
|
2018-10-15T09:35:24.000Z
|
2021-05-08T13:40:19.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 21 16:50:49 2018
@author: r.dewinter
"""
import numpy as np
def C3DTLZ4(x):
x = np.array(x)
gx = np.sum((x[1:]-0.5)**2)
f1 = (1+(gx))*np.cos(x[0]*np.pi/2)
f2 = (1+(gx))*np.sin(x[0]*np.pi/2)
c1 = (f1**2)/4 + f2**2 - 1
c2 = (f2**2)/4 + f1**2 - 1
#-1* constr because of sacobra's constraint handling
return [ np.array([f1, f2]), -1*np.array([c1,c2]) ]
#import matplotlib.pyplot as plt
#results = np.empty((1000000,2))
#constraints= np.empty((1000000,2))
#results[:] = np.nan
#constraints[:] = np.nan
#ii = 0
#for i in range(10):
# for j in range(10):
# for k in range(10):
# for l in range(10):
# for m in range(10):
# for n in range(10):
# x = np.array([i/10, j/10, k/10, l/10, m/10, n/10])
# results[ii], constraints[ii] = C3DTLZ4(x)
# ii+=1
#
#constr = np.sum(constraints<0, axis=1)==2
#results2 = np.sum(results<1, axis=1)==2
#results2 = results[constr]
#plt.plot(results2[:,0], results2[:,1], 'ro')
#iteration time 193.98799991607666
#21351.97400021553
#21406.330000162125
| 26.553191
| 76
| 0.508013
|
8e8301e31b38aa9b4357fd4b86b20aedc11293ea
| 974
|
py
|
Python
|
src/robusta/core/model/services.py
|
robusta-dev/robusta
|
5dadeb48724c37882759bcdd785e11f59a0afb09
|
[
"MIT"
] | 273
|
2021-12-28T20:48:48.000Z
|
2022-03-31T16:03:13.000Z
|
src/robusta/core/model/services.py
|
robusta-dev/robusta
|
5dadeb48724c37882759bcdd785e11f59a0afb09
|
[
"MIT"
] | 103
|
2022-01-10T11:45:47.000Z
|
2022-03-31T16:31:11.000Z
|
src/robusta/core/model/services.py
|
robusta-dev/robusta
|
5dadeb48724c37882759bcdd785e11f59a0afb09
|
[
"MIT"
] | 35
|
2021-12-30T15:30:14.000Z
|
2022-03-28T11:43:57.000Z
|
from pydantic import BaseModel
from typing import List, Dict
class ServiceInfo(BaseModel):
name: str
service_type: str
namespace: str
classification: str = "None"
deleted: bool = False
images: List[str]
labels: Dict[str, str]
def get_service_key(self) -> str:
return f"{self.namespace}/{self.service_type}/{self.name}"
def __eq__(self, other):
if not isinstance(other, ServiceInfo):
return NotImplemented
return (
self.name == other.name
and self.service_type == other.service_type
and self.namespace == other.namespace
and self.classification == other.classification
and self.deleted == other.deleted
and sorted(self.images) == sorted(other.images)
and len(self.labels.keys()) == len(other.labels.keys())
and all(self.labels.get(key) == other.labels.get(key) for key in self.labels.keys())
)
| 31.419355
| 96
| 0.620123
|
bd953357e594b719257ffd4f4a09625d8716e9fe
| 473
|
py
|
Python
|
django_resto/settings.py
|
innoteq/django-resto
|
2ea04dd116af882e62643d9f819c05d5a62389bd
|
[
"BSD-3-Clause"
] | 31
|
2015-01-14T14:01:37.000Z
|
2020-12-12T18:39:50.000Z
|
django_resto/settings.py
|
innoteq/django-resto
|
2ea04dd116af882e62643d9f819c05d5a62389bd
|
[
"BSD-3-Clause"
] | 4
|
2015-01-10T17:20:41.000Z
|
2018-01-02T15:31:43.000Z
|
django_resto/settings.py
|
innoteq/django-resto
|
2ea04dd116af882e62643d9f819c05d5a62389bd
|
[
"BSD-3-Clause"
] | 5
|
2015-01-10T04:39:38.000Z
|
2018-10-12T06:42:33.000Z
|
"""Default settings. Override them in the settings file of your project.
See the README for more information.
"""
from __future__ import unicode_literals
from django.conf import settings
def get_setting(name):
name = 'RESTO_%s' % name
# raise a KeyError if we have no such setting
default = globals()[name]
return getattr(settings, name, default)
RESTO_TIMEOUT = 2
RESTO_MEDIA_HOSTS = ()
RESTO_FATAL_EXCEPTIONS = True
RESTO_SHOW_TRACEBACK = False
| 18.92
| 72
| 0.742072
|
4da6735369d7ed529d11e1b16ec981856ad2ba96
| 29,541
|
py
|
Python
|
sdk/python/pulumi_azure_native/machinelearningservices/v20200501preview/aci_service.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/machinelearningservices/v20200501preview/aci_service.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/machinelearningservices/v20200501preview/aci_service.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ACIServiceArgs', 'ACIService']
@pulumi.input_type
class ACIServiceArgs:
def __init__(__self__, *,
compute_type: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
workspace_name: pulumi.Input[str],
app_insights_enabled: Optional[pulumi.Input[bool]] = None,
auth_enabled: Optional[pulumi.Input[bool]] = None,
cname: Optional[pulumi.Input[str]] = None,
container_resource_requirements: Optional[pulumi.Input['ContainerResourceRequirementsArgs']] = None,
data_collection: Optional[pulumi.Input['ACIServiceCreateRequestDataCollectionArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
dns_name_label: Optional[pulumi.Input[str]] = None,
encryption_properties: Optional[pulumi.Input['ACIServiceCreateRequestEncryptionPropertiesArgs']] = None,
environment_image_request: Optional[pulumi.Input['CreateServiceRequestEnvironmentImageRequestArgs']] = None,
keys: Optional[pulumi.Input['CreateServiceRequestKeysArgs']] = None,
kv_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
service_name: Optional[pulumi.Input[str]] = None,
ssl_certificate: Optional[pulumi.Input[str]] = None,
ssl_enabled: Optional[pulumi.Input[bool]] = None,
ssl_key: Optional[pulumi.Input[str]] = None,
vnet_configuration: Optional[pulumi.Input['ACIServiceCreateRequestVnetConfigurationArgs']] = None):
"""
The set of arguments for constructing a ACIService resource.
:param pulumi.Input[str] compute_type: The compute environment type for the service.
Expected value is 'ACI'.
:param pulumi.Input[str] resource_group_name: Name of the resource group in which workspace is located.
:param pulumi.Input[str] workspace_name: Name of Azure Machine Learning workspace.
:param pulumi.Input[bool] app_insights_enabled: Whether or not Application Insights is enabled.
:param pulumi.Input[bool] auth_enabled: Whether or not authentication is enabled on the service.
:param pulumi.Input[str] cname: The CName for the service.
:param pulumi.Input['ContainerResourceRequirementsArgs'] container_resource_requirements: The container resource requirements.
:param pulumi.Input['ACIServiceCreateRequestDataCollectionArgs'] data_collection: Details of the data collection options specified.
:param pulumi.Input[str] description: The description of the service.
:param pulumi.Input[str] dns_name_label: The Dns label for the service.
:param pulumi.Input['ACIServiceCreateRequestEncryptionPropertiesArgs'] encryption_properties: The encryption properties.
:param pulumi.Input['CreateServiceRequestEnvironmentImageRequestArgs'] environment_image_request: The Environment, models and assets needed for inferencing.
:param pulumi.Input['CreateServiceRequestKeysArgs'] keys: The authentication keys.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] kv_tags: The service tag dictionary. Tags are mutable.
:param pulumi.Input[str] location: The name of the Azure location/region.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] properties: The service properties dictionary. Properties are immutable.
:param pulumi.Input[str] service_name: Name of the Azure Machine Learning service.
:param pulumi.Input[str] ssl_certificate: The public SSL certificate in PEM format to use if SSL is enabled.
:param pulumi.Input[bool] ssl_enabled: Whether or not SSL is enabled.
:param pulumi.Input[str] ssl_key: The public SSL key in PEM format for the certificate.
:param pulumi.Input['ACIServiceCreateRequestVnetConfigurationArgs'] vnet_configuration: The virtual network configuration.
"""
pulumi.set(__self__, "compute_type", 'ACI')
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "workspace_name", workspace_name)
if app_insights_enabled is None:
app_insights_enabled = False
if app_insights_enabled is not None:
pulumi.set(__self__, "app_insights_enabled", app_insights_enabled)
if auth_enabled is None:
auth_enabled = False
if auth_enabled is not None:
pulumi.set(__self__, "auth_enabled", auth_enabled)
if cname is not None:
pulumi.set(__self__, "cname", cname)
if container_resource_requirements is not None:
pulumi.set(__self__, "container_resource_requirements", container_resource_requirements)
if data_collection is not None:
pulumi.set(__self__, "data_collection", data_collection)
if description is not None:
pulumi.set(__self__, "description", description)
if dns_name_label is not None:
pulumi.set(__self__, "dns_name_label", dns_name_label)
if encryption_properties is not None:
pulumi.set(__self__, "encryption_properties", encryption_properties)
if environment_image_request is not None:
pulumi.set(__self__, "environment_image_request", environment_image_request)
if keys is not None:
pulumi.set(__self__, "keys", keys)
if kv_tags is not None:
pulumi.set(__self__, "kv_tags", kv_tags)
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if service_name is not None:
pulumi.set(__self__, "service_name", service_name)
if ssl_certificate is not None:
pulumi.set(__self__, "ssl_certificate", ssl_certificate)
if ssl_enabled is None:
ssl_enabled = False
if ssl_enabled is not None:
pulumi.set(__self__, "ssl_enabled", ssl_enabled)
if ssl_key is not None:
pulumi.set(__self__, "ssl_key", ssl_key)
if vnet_configuration is not None:
pulumi.set(__self__, "vnet_configuration", vnet_configuration)
@property
@pulumi.getter(name="computeType")
def compute_type(self) -> pulumi.Input[str]:
"""
The compute environment type for the service.
Expected value is 'ACI'.
"""
return pulumi.get(self, "compute_type")
@compute_type.setter
def compute_type(self, value: pulumi.Input[str]):
pulumi.set(self, "compute_type", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Name of the resource group in which workspace is located.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="workspaceName")
def workspace_name(self) -> pulumi.Input[str]:
"""
Name of Azure Machine Learning workspace.
"""
return pulumi.get(self, "workspace_name")
@workspace_name.setter
def workspace_name(self, value: pulumi.Input[str]):
pulumi.set(self, "workspace_name", value)
@property
@pulumi.getter(name="appInsightsEnabled")
def app_insights_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether or not Application Insights is enabled.
"""
return pulumi.get(self, "app_insights_enabled")
@app_insights_enabled.setter
def app_insights_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "app_insights_enabled", value)
@property
@pulumi.getter(name="authEnabled")
def auth_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether or not authentication is enabled on the service.
"""
return pulumi.get(self, "auth_enabled")
@auth_enabled.setter
def auth_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auth_enabled", value)
@property
@pulumi.getter
def cname(self) -> Optional[pulumi.Input[str]]:
"""
The CName for the service.
"""
return pulumi.get(self, "cname")
@cname.setter
def cname(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cname", value)
@property
@pulumi.getter(name="containerResourceRequirements")
def container_resource_requirements(self) -> Optional[pulumi.Input['ContainerResourceRequirementsArgs']]:
"""
The container resource requirements.
"""
return pulumi.get(self, "container_resource_requirements")
@container_resource_requirements.setter
def container_resource_requirements(self, value: Optional[pulumi.Input['ContainerResourceRequirementsArgs']]):
pulumi.set(self, "container_resource_requirements", value)
@property
@pulumi.getter(name="dataCollection")
def data_collection(self) -> Optional[pulumi.Input['ACIServiceCreateRequestDataCollectionArgs']]:
"""
Details of the data collection options specified.
"""
return pulumi.get(self, "data_collection")
@data_collection.setter
def data_collection(self, value: Optional[pulumi.Input['ACIServiceCreateRequestDataCollectionArgs']]):
pulumi.set(self, "data_collection", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the service.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="dnsNameLabel")
def dns_name_label(self) -> Optional[pulumi.Input[str]]:
"""
The Dns label for the service.
"""
return pulumi.get(self, "dns_name_label")
@dns_name_label.setter
def dns_name_label(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dns_name_label", value)
@property
@pulumi.getter(name="encryptionProperties")
def encryption_properties(self) -> Optional[pulumi.Input['ACIServiceCreateRequestEncryptionPropertiesArgs']]:
"""
The encryption properties.
"""
return pulumi.get(self, "encryption_properties")
@encryption_properties.setter
def encryption_properties(self, value: Optional[pulumi.Input['ACIServiceCreateRequestEncryptionPropertiesArgs']]):
pulumi.set(self, "encryption_properties", value)
@property
@pulumi.getter(name="environmentImageRequest")
def environment_image_request(self) -> Optional[pulumi.Input['CreateServiceRequestEnvironmentImageRequestArgs']]:
"""
The Environment, models and assets needed for inferencing.
"""
return pulumi.get(self, "environment_image_request")
@environment_image_request.setter
def environment_image_request(self, value: Optional[pulumi.Input['CreateServiceRequestEnvironmentImageRequestArgs']]):
pulumi.set(self, "environment_image_request", value)
@property
@pulumi.getter
def keys(self) -> Optional[pulumi.Input['CreateServiceRequestKeysArgs']]:
"""
The authentication keys.
"""
return pulumi.get(self, "keys")
@keys.setter
def keys(self, value: Optional[pulumi.Input['CreateServiceRequestKeysArgs']]):
pulumi.set(self, "keys", value)
@property
@pulumi.getter(name="kvTags")
def kv_tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The service tag dictionary. Tags are mutable.
"""
return pulumi.get(self, "kv_tags")
@kv_tags.setter
def kv_tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "kv_tags", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Azure location/region.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The service properties dictionary. Properties are immutable.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the Azure Machine Learning service.
"""
return pulumi.get(self, "service_name")
@service_name.setter
def service_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_name", value)
@property
@pulumi.getter(name="sslCertificate")
def ssl_certificate(self) -> Optional[pulumi.Input[str]]:
"""
The public SSL certificate in PEM format to use if SSL is enabled.
"""
return pulumi.get(self, "ssl_certificate")
@ssl_certificate.setter
def ssl_certificate(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ssl_certificate", value)
@property
@pulumi.getter(name="sslEnabled")
def ssl_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether or not SSL is enabled.
"""
return pulumi.get(self, "ssl_enabled")
@ssl_enabled.setter
def ssl_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "ssl_enabled", value)
@property
@pulumi.getter(name="sslKey")
def ssl_key(self) -> Optional[pulumi.Input[str]]:
"""
The public SSL key in PEM format for the certificate.
"""
return pulumi.get(self, "ssl_key")
@ssl_key.setter
def ssl_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ssl_key", value)
@property
@pulumi.getter(name="vnetConfiguration")
def vnet_configuration(self) -> Optional[pulumi.Input['ACIServiceCreateRequestVnetConfigurationArgs']]:
"""
The virtual network configuration.
"""
return pulumi.get(self, "vnet_configuration")
@vnet_configuration.setter
def vnet_configuration(self, value: Optional[pulumi.Input['ACIServiceCreateRequestVnetConfigurationArgs']]):
pulumi.set(self, "vnet_configuration", value)
class ACIService(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
app_insights_enabled: Optional[pulumi.Input[bool]] = None,
auth_enabled: Optional[pulumi.Input[bool]] = None,
cname: Optional[pulumi.Input[str]] = None,
compute_type: Optional[pulumi.Input[str]] = None,
container_resource_requirements: Optional[pulumi.Input[pulumi.InputType['ContainerResourceRequirementsArgs']]] = None,
data_collection: Optional[pulumi.Input[pulumi.InputType['ACIServiceCreateRequestDataCollectionArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
dns_name_label: Optional[pulumi.Input[str]] = None,
encryption_properties: Optional[pulumi.Input[pulumi.InputType['ACIServiceCreateRequestEncryptionPropertiesArgs']]] = None,
environment_image_request: Optional[pulumi.Input[pulumi.InputType['CreateServiceRequestEnvironmentImageRequestArgs']]] = None,
keys: Optional[pulumi.Input[pulumi.InputType['CreateServiceRequestKeysArgs']]] = None,
kv_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
ssl_certificate: Optional[pulumi.Input[str]] = None,
ssl_enabled: Optional[pulumi.Input[bool]] = None,
ssl_key: Optional[pulumi.Input[str]] = None,
vnet_configuration: Optional[pulumi.Input[pulumi.InputType['ACIServiceCreateRequestVnetConfigurationArgs']]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Machine Learning service object wrapped into ARM resource envelope.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] app_insights_enabled: Whether or not Application Insights is enabled.
:param pulumi.Input[bool] auth_enabled: Whether or not authentication is enabled on the service.
:param pulumi.Input[str] cname: The CName for the service.
:param pulumi.Input[str] compute_type: The compute environment type for the service.
Expected value is 'ACI'.
:param pulumi.Input[pulumi.InputType['ContainerResourceRequirementsArgs']] container_resource_requirements: The container resource requirements.
:param pulumi.Input[pulumi.InputType['ACIServiceCreateRequestDataCollectionArgs']] data_collection: Details of the data collection options specified.
:param pulumi.Input[str] description: The description of the service.
:param pulumi.Input[str] dns_name_label: The Dns label for the service.
:param pulumi.Input[pulumi.InputType['ACIServiceCreateRequestEncryptionPropertiesArgs']] encryption_properties: The encryption properties.
:param pulumi.Input[pulumi.InputType['CreateServiceRequestEnvironmentImageRequestArgs']] environment_image_request: The Environment, models and assets needed for inferencing.
:param pulumi.Input[pulumi.InputType['CreateServiceRequestKeysArgs']] keys: The authentication keys.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] kv_tags: The service tag dictionary. Tags are mutable.
:param pulumi.Input[str] location: The name of the Azure location/region.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] properties: The service properties dictionary. Properties are immutable.
:param pulumi.Input[str] resource_group_name: Name of the resource group in which workspace is located.
:param pulumi.Input[str] service_name: Name of the Azure Machine Learning service.
:param pulumi.Input[str] ssl_certificate: The public SSL certificate in PEM format to use if SSL is enabled.
:param pulumi.Input[bool] ssl_enabled: Whether or not SSL is enabled.
:param pulumi.Input[str] ssl_key: The public SSL key in PEM format for the certificate.
:param pulumi.Input[pulumi.InputType['ACIServiceCreateRequestVnetConfigurationArgs']] vnet_configuration: The virtual network configuration.
:param pulumi.Input[str] workspace_name: Name of Azure Machine Learning workspace.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ACIServiceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Machine Learning service object wrapped into ARM resource envelope.
:param str resource_name: The name of the resource.
:param ACIServiceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ACIServiceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
app_insights_enabled: Optional[pulumi.Input[bool]] = None,
auth_enabled: Optional[pulumi.Input[bool]] = None,
cname: Optional[pulumi.Input[str]] = None,
compute_type: Optional[pulumi.Input[str]] = None,
container_resource_requirements: Optional[pulumi.Input[pulumi.InputType['ContainerResourceRequirementsArgs']]] = None,
data_collection: Optional[pulumi.Input[pulumi.InputType['ACIServiceCreateRequestDataCollectionArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
dns_name_label: Optional[pulumi.Input[str]] = None,
encryption_properties: Optional[pulumi.Input[pulumi.InputType['ACIServiceCreateRequestEncryptionPropertiesArgs']]] = None,
environment_image_request: Optional[pulumi.Input[pulumi.InputType['CreateServiceRequestEnvironmentImageRequestArgs']]] = None,
keys: Optional[pulumi.Input[pulumi.InputType['CreateServiceRequestKeysArgs']]] = None,
kv_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
ssl_certificate: Optional[pulumi.Input[str]] = None,
ssl_enabled: Optional[pulumi.Input[bool]] = None,
ssl_key: Optional[pulumi.Input[str]] = None,
vnet_configuration: Optional[pulumi.Input[pulumi.InputType['ACIServiceCreateRequestVnetConfigurationArgs']]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ACIServiceArgs.__new__(ACIServiceArgs)
if app_insights_enabled is None:
app_insights_enabled = False
__props__.__dict__["app_insights_enabled"] = app_insights_enabled
if auth_enabled is None:
auth_enabled = False
__props__.__dict__["auth_enabled"] = auth_enabled
__props__.__dict__["cname"] = cname
if compute_type is None and not opts.urn:
raise TypeError("Missing required property 'compute_type'")
__props__.__dict__["compute_type"] = 'ACI'
__props__.__dict__["container_resource_requirements"] = container_resource_requirements
__props__.__dict__["data_collection"] = data_collection
__props__.__dict__["description"] = description
__props__.__dict__["dns_name_label"] = dns_name_label
__props__.__dict__["encryption_properties"] = encryption_properties
__props__.__dict__["environment_image_request"] = environment_image_request
__props__.__dict__["keys"] = keys
__props__.__dict__["kv_tags"] = kv_tags
__props__.__dict__["location"] = location
__props__.__dict__["properties"] = properties
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["service_name"] = service_name
__props__.__dict__["ssl_certificate"] = ssl_certificate
if ssl_enabled is None:
ssl_enabled = False
__props__.__dict__["ssl_enabled"] = ssl_enabled
__props__.__dict__["ssl_key"] = ssl_key
__props__.__dict__["vnet_configuration"] = vnet_configuration
if workspace_name is None and not opts.urn:
raise TypeError("Missing required property 'workspace_name'")
__props__.__dict__["workspace_name"] = workspace_name
__props__.__dict__["identity"] = None
__props__.__dict__["name"] = None
__props__.__dict__["sku"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200501preview:ACIService"), pulumi.Alias(type_="azure-native:machinelearningservices:ACIService"), pulumi.Alias(type_="azure-nextgen:machinelearningservices:ACIService"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200515preview:ACIService"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200515preview:ACIService"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200901preview:ACIService"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200901preview:ACIService"), pulumi.Alias(type_="azure-native:machinelearningservices/v20210101:ACIService"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20210101:ACIService"), pulumi.Alias(type_="azure-native:machinelearningservices/v20210401:ACIService"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20210401:ACIService")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ACIService, __self__).__init__(
'azure-native:machinelearningservices/v20200501preview:ACIService',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ACIService':
"""
Get an existing ACIService resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ACIServiceArgs.__new__(ACIServiceArgs)
__props__.__dict__["identity"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["properties"] = None
__props__.__dict__["sku"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return ACIService(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.IdentityResponse']]:
"""
The identity of the resource.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output[Any]:
"""
Service properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.SkuResponse']]:
"""
The sku of the workspace.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Contains resource tags defined as key/value pairs.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Specifies the type of the resource.
"""
return pulumi.get(self, "type")
| 48.99005
| 965
| 0.670424
|
80de6ff7ed7006f770eb158437c8fe3ff6fe08d2
| 1,912
|
py
|
Python
|
delfin/alert_manager/alert_processor.py
|
noelmcloughlin/delfin
|
6dfa9bdb86d850410c82201f6fa621b4e5ea2917
|
[
"Apache-2.0"
] | null | null | null |
delfin/alert_manager/alert_processor.py
|
noelmcloughlin/delfin
|
6dfa9bdb86d850410c82201f6fa621b4e5ea2917
|
[
"Apache-2.0"
] | 3
|
2020-05-23T03:24:56.000Z
|
2020-06-06T02:24:27.000Z
|
delfin/alert_manager/alert_processor.py
|
noelmcloughlin/delfin
|
6dfa9bdb86d850410c82201f6fa621b4e5ea2917
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The SODA Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log
from delfin import context
from delfin import db
from delfin import exception
from delfin.common import alert_util
from delfin.drivers import api as driver_manager
from delfin.exporter import base_exporter
LOG = log.getLogger(__name__)
class AlertProcessor(object):
"""Alert model translation and export functions"""
def __init__(self):
self.driver_manager = driver_manager.API()
self.exporter_manager = base_exporter.AlertExporterManager()
def process_alert_info(self, alert):
"""Fills alert model using driver manager interface."""
ctxt = context.get_admin_context()
storage = db.storage_get(ctxt, alert['storage_id'])
try:
alert_model = self.driver_manager.parse_alert(ctxt,
alert['storage_id'],
alert)
# Fill storage specific info
alert_util.fill_storage_attributes(alert_model, storage)
except Exception as e:
LOG.error(e)
raise exception.InvalidResults(
"Failed to fill the alert model from driver.")
# Export to base exporter which handles dispatch for all exporters
self.exporter_manager.dispatch(ctxt, alert_model)
| 36.769231
| 78
| 0.679393
|
886aa77bed2b2a5401469ce977fbfa53c8887d45
| 3,622
|
py
|
Python
|
models/basic.py
|
wpfhtl/PSMNet
|
337f8d9cd3ecfffe26395450e4363428179d6f30
|
[
"MIT"
] | null | null | null |
models/basic.py
|
wpfhtl/PSMNet
|
337f8d9cd3ecfffe26395450e4363428179d6f30
|
[
"MIT"
] | null | null | null |
models/basic.py
|
wpfhtl/PSMNet
|
337f8d9cd3ecfffe26395450e4363428179d6f30
|
[
"MIT"
] | 1
|
2018-04-20T05:32:55.000Z
|
2018-04-20T05:32:55.000Z
|
from __future__ import print_function
import torch
import torch.nn as nn
import torch.utils.data
from torch.autograd import Variable
import torch.nn.functional as F
import math
from submodule import *
class PSMNet(nn.Module):
def __init__(self, maxdisp, colormode):
super(PSMNet, self).__init__()
self.maxdisp = maxdisp
self.feature_extraction = feature_extraction(colormode)
########
self.dres0 = nn.Sequential(convbn_3d(64, 32, 3, 1, 1),
nn.ReLU(inplace=True),
convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True))
self.dres1 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
convbn_3d(32, 32, 3, 1, 1))
self.dres2 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
convbn_3d(32, 32, 3, 1, 1))
self.dres3 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
convbn_3d(32, 32, 3, 1, 1))
self.dres4 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
convbn_3d(32, 32, 3, 1, 1))
self.classify = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1,bias=False))
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.Conv3d):
n = m.kernel_size[0] * m.kernel_size[1]*m.kernel_size[2] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, left, right):
refimg_fea = self.feature_extraction(left)
targetimg_fea = self.feature_extraction(right)
#matching
cost = Variable(torch.FloatTensor(refimg_fea.size()[0], refimg_fea.size()[1]*2, self.maxdisp/4, refimg_fea.size()[2], refimg_fea.size()[3]).zero_(), volatile= not self.training).cuda()
for i in range(self.maxdisp/4):
if i > 0 :
cost[:, :refimg_fea.size()[1], i, :,i:] = refimg_fea[:,:,:,i:]
cost[:, refimg_fea.size()[1]:, i, :,i:] = targetimg_fea[:,:,:,:-i]
else:
cost[:, :refimg_fea.size()[1], i, :,:] = refimg_fea
cost[:, refimg_fea.size()[1]:, i, :,:] = targetimg_fea
cost = cost.contiguous()
cost0 = self.dres0(cost)
cost0 = self.dres1(cost0) + cost0
cost0 = self.dres2(cost0) + cost0
cost0 = self.dres3(cost0) + cost0
cost0 = self.dres4(cost0) + cost0
cost = self.classify(cost0)
cost = F.upsample(cost, [self.maxdisp,left.size()[2],left.size()[3]], mode='trilinear')
cost = torch.squeeze(cost,1)
pred = F.softmax(cost)
pred = disparityregression(self.maxdisp)(pred)
return pred
| 40.244444
| 194
| 0.509111
|
d05c9534affc297c7fa01543d6a14f5ec3bddcf6
| 3,571
|
py
|
Python
|
main/catdog_test.py
|
xiaonanQua/experiment
|
19925c9af5cffc73451dc7674bc3afce25abf772
|
[
"MIT"
] | null | null | null |
main/catdog_test.py
|
xiaonanQua/experiment
|
19925c9af5cffc73451dc7674bc3afce25abf772
|
[
"MIT"
] | null | null | null |
main/catdog_test.py
|
xiaonanQua/experiment
|
19925c9af5cffc73451dc7674bc3afce25abf772
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
import torch
import torch.nn as nn
import torchvision.models as model
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
import torchvision.transforms as transforms
import time
from torch.utils.data import Dataset
from PIL import Image
import os
import torch.nn.functional as F
from sklearn.metrics import f1_score
import utils.tools as tool
# 训练、测试数据集路径
test_dataset_path = '/home/team/xiaonan/Dataset/cat_dog/test/'
# 类别数量
num_classes = 2
# 类别名称和设备
class_name = ['cat', 'dog'] # 类别名称
device = 'cuda:1' if torch.cuda.is_available() else 'cpu'
# 平均值和标准差
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
# 保存模型路径
model_path = '../checkpoints/catdog.pth'
result_file = '../result/catdog.txt'
# 文件列表
file_list = os.listdir(test_dataset_path)
print(len(file_list))
# 对数据进行预处理
data_preprocess = transforms.Compose([
transforms.Resize(size=(112, 112)),
transforms.ToTensor(),
# transforms.Normalize(mean=mean, std=std)
])
class CatDogData(Dataset):
"""
继承父类数据集
"""
def __init__(self, root, transform):
super(CatDogData, self).__init__()
# 文件路径列表
self.images_path_list = [os.path.join(root, image_name) for image_name in file_list]
print(len(self.images_path_list))
self.image_preprocess = transform
def __getitem__(self, index):
# 获取单个图像路径
image_path = self.images_path_list[index]
print('读取数据路径:{}'.format(image_path))
# 读取图像
image = Image.open(image_path)
# 预处理图像
image = self.image_preprocess(image)
return image
def __len__(self):
return len(self.images_path_list)
# 加载数据集
image_datasets = CatDogData(root=test_dataset_path, transform=data_preprocess)
# 数据加载器
test_data_loader = DataLoader(dataset=image_datasets)
# print(iter(test_data_loader).__next__())
# 定义模型
# 获取ResNet50的网络结构
net = model.resnet50(pretrained=False, progress=True)
# # 重写网络的最后一层
fc_in_features = net.fc.in_features
net.fc = nn.Linear(fc_in_features, num_classes)
# 加载模型参数
net.load_state_dict(torch.load(model_path))
# 将网络结构放置在gpu上
# net.to(device)
# 测试的开始时间
since = time.time()
# 通过上下文管理器禁用梯度计算,减少运行内存
with torch.no_grad():
j = 0
with open(result_file, mode='w+') as file:
# 迭代整个数据集
for images in test_data_loader:
# 获取图像和标签数据
# images= data2
# 若gpu存在,将图像和标签数据放入gpu上
# images = images.to(device)
# print(images.size())
# 若读完整个数据则不再循环
if j > len(file_list) - 1:
break
# 预测结果
outputs = net(images)
# outputs = F.softmax(outputs, dim=1)
# _, preds = torch.max(outputs, 1)
preds = torch.argmax(outputs, 1)
predict_result = preds.numpy().tolist()
# print(predict_result)
# print(preds.numpy().tolist())
# print(type(preds))
# print(j)
content = '{} {}\n'.format(file_list[j], class_name[predict_result[0]])
file.write(content)
j = j + 1
tool.view_bar('测试数据:', j+1, len(file_list))
# # 将结果写入结果文件中
# with open(result_file, mode='a+') as file:
# for i in range(images.size(0)):
# content = '{} {}\n'.format(file_list[j], class_name[predict_result[i]])
# file.write(content)
# j = j+1
# print('结果保存完成...')
# print()
# print('micro_f1_score:{}, macro_f1_score:{}'.format(micro_f1, macro_f1))
| 25.326241
| 92
| 0.628395
|
4481d4a34c212a8d376fcb4cd47f4f63acc160e8
| 4,514
|
py
|
Python
|
src/mac/generate_mapping.py
|
hanya/mozc
|
7e2420b6307ef2eb147fdf9b620cb8f1ebea0588
|
[
"BSD-3-Clause"
] | 1,144
|
2015-04-23T16:18:45.000Z
|
2022-03-29T19:37:33.000Z
|
src/mac/generate_mapping.py
|
kirameister/mozc
|
18b2b32b4d3fe585d38134606773239781b6be82
|
[
"BSD-3-Clause"
] | 291
|
2015-05-04T07:53:37.000Z
|
2022-03-22T00:09:05.000Z
|
src/mac/generate_mapping.py
|
kirameister/mozc
|
18b2b32b4d3fe585d38134606773239781b6be82
|
[
"BSD-3-Clause"
] | 301
|
2015-05-03T00:07:18.000Z
|
2022-03-21T10:48:29.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2010-2021, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import optparse
import sys
class Mapping(object):
def __init__(self, options):
self._mapname = options.mapname
self._key_type = options.key_type
self._result_type = options.result_type
self._filename = options.filename
def PrintLine(self, line):
columns = line.strip().split('\t')
if len(columns) != 2:
return
(key, value) = columns
mapname = self._mapname
if key.startswith('Shift '):
mapname += 'Shift'
key = key[len('Shift '):len(key)]
if self._key_type == 'unsigned short' and not key.startswith('kVK_'):
key = 'kVK_ANSI_' + key
if self._result_type == 'const char *':
value = '"%s"' % ''.join(r'\x%02x' % b for b in value.encode('utf-8'))
elif self._result_type == 'KeyEvent::SpecialKey':
value = 'KeyEvent::' + value
print(' (*k%s)[%s] = %s;' % (mapname, key, value))
def PrintHeader(self):
print("""// Copyright 2009 Google Inc. All Rights Reserved.
// Author: mukai
//
// This file is automatically generated by
// generate_mapping.py.
// Do not edit directly and do not include this from any file other
// than KeyCodeMap.mm
namespace {
static std::map<%(key_type)s, %(result_type)s> *k%(mapname)s = nullptr;
static std::map<%(key_type)s, %(result_type)s> *k%(mapname)sShift = nullptr;
static once_t kOnceFor%(mapname)s = MOZC_ONCE_INIT;
void Init%(mapname)s() {
if (k%(mapname)s != nullptr || k%(mapname)sShift != nullptr) {
return;
}
k%(mapname)s = new(std::nothrow)std::map<%(key_type)s, %(result_type)s>;
if (k%(mapname)s == nullptr) {
return;
}
k%(mapname)sShift = new(std::nothrow)std::map<%(key_type)s, %(result_type)s>;
if (k%(mapname)sShift == nullptr) {
delete k%(mapname)s;
k%(mapname)s = nullptr;
return;
}
""" % {'key_type': self._key_type,
'mapname': self._mapname,
'result_type': self._result_type})
def PrintFooter(self):
print("""}
} // namespace
""")
def Print(self):
self.PrintHeader()
for line in open(self._filename, encoding='utf-8'):
self.PrintLine(line)
self.PrintFooter()
def ParseOption():
"""Parse command line options."""
parser = optparse.OptionParser()
parser.add_option('--mapname', dest='mapname')
parser.add_option('--key_type', dest='key_type')
parser.add_option('--result_type', dest='result_type')
parser.add_option('--filename', dest='filename')
(options, unused_args) = parser.parse_args()
if not options.mapname:
print('Error: the output map name should be specified.')
sys.exit(2)
if not options.key_type:
options.key_type = 'unsigned short'
if not options.result_type:
print('Error: the result type of the output map should be speicified.')
if not options.filename:
print('Error: the file name is not specified.')
return options
def main():
options = ParseOption()
Mapping(options).Print()
if __name__ == '__main__':
main()
| 33.686567
| 79
| 0.696943
|
86e4621e9c79b5f83e2cf42b572d5292ff5f43ad
| 16,738
|
py
|
Python
|
app/engine/health_bar.py
|
ViolaBuddy/EscapeFromPlegia
|
5228b42e8525b445854d742dccf85ca65b320d70
|
[
"MIT"
] | null | null | null |
app/engine/health_bar.py
|
ViolaBuddy/EscapeFromPlegia
|
5228b42e8525b445854d742dccf85ca65b320d70
|
[
"MIT"
] | null | null | null |
app/engine/health_bar.py
|
ViolaBuddy/EscapeFromPlegia
|
5228b42e8525b445854d742dccf85ca65b320d70
|
[
"MIT"
] | null | null | null |
import random
import app.utilities as utils
from app.constants import WINWIDTH, WINHEIGHT, TILEWIDTH, TILEHEIGHT, TILEX, TILEY
from app.engine.sprites import SPRITES
from app.engine.fonts import FONT
from app.engine.sound import SOUNDTHREAD
from app.engine import engine, combat_calcs, icons, equations, skill_system, item_system
from app.engine.game_state import game
from app.engine.game_counters import ANIMATION_COUNTERS
class HealthBar():
time_for_change_min = 200
speed = utils.frames2ms(1) # 1 frame for each hp point
def __init__(self, unit):
self.unit = unit
self.displayed_hp = self.unit.get_hp()
self.old_hp = self.displayed_hp
self.total_hp = equations.parser.hitpoints(self.unit)
self.transition_flag = False
self.time_for_change = self.time_for_change_min
self.last_update = 0
def set_hp(self, val):
self.displayed_hp = val
def update(self):
# print(self.displayed_hp, self.unit.get_hp(), self.transition_flag)
# Check to see if we should begin showing transition
if self.displayed_hp != self.unit.get_hp() and not self.transition_flag:
self.transition_flag = True
self.time_for_change = max(self.time_for_change_min, abs(self.displayed_hp - self.unit.get_hp()) * self.speed)
self.last_update = engine.get_time()
if equations.parser.hitpoints(self.unit) != self.total_hp:
self.total_hp = equations.parser.hitpoints(self.unit)
# Check to see if we should update
if self.transition_flag:
time = (engine.get_time() - self.last_update) / self.time_for_change
new_val = int(utils.lerp(self.old_hp, self.unit.get_hp(), time))
self.set_hp(new_val)
if time >= 1:
self.set_hp(self.unit.get_hp())
self.old_hp = self.displayed_hp
self.transition_flag = False
class CombatHealthBar(HealthBar):
full_hp_blip = SPRITES.get('full_hp_blip')
empty_hp_blip = SPRITES.get('empty_hp_blip')
end_hp_blip = engine.subsurface(full_hp_blip, (0, 0, 1, full_hp_blip.get_height()))
colors = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 3, 3, 2, 2, 1, 1]
speed = utils.frames2ms(2)
time_for_change_min = 0
def __init__(self, unit):
super().__init__(unit)
self.color_tick = 0
self.heal_sound_update = 0
def update(self, skip=False):
if self.displayed_hp < self.unit.get_hp():
self.speed = utils.frames2ms(4) # Slower speed when increasing hp
else:
self.speed = utils.frames2ms(2)
super().update()
self.color_tick = int(engine.get_time() / 16.67) % len(self.colors)
def set_hp(self, val):
current_time = engine.get_time()
if self.displayed_hp < self.unit.get_hp() and current_time - self.heal_sound_update > self.speed:
self.heal_sound_update = current_time
SOUNDTHREAD.stop_sfx('HealBoop')
SOUNDTHREAD.play_sfx('HealBoop')
super().set_hp(val)
def big_number(self) -> bool:
return self.displayed_hp != self.unit.get_hp()
def done(self) -> bool:
return self.displayed_hp == self.unit.get_hp()
def draw(self, surf, left, top):
font = FONT['number-small2']
if self.big_number():
font = FONT['number-big2']
if self.displayed_hp <= 80:
font.blit_right(str(self.displayed_hp), surf, (left, top - 4))
else:
font.blit_right('??', surf, (left, top - 4))
full_hp_blip = engine.subsurface(self.full_hp_blip, (self.colors[self.color_tick] * 2, 0, 2, self.full_hp_blip.get_height()))
if self.unit.get_max_hp() <= 40:
for idx in range(self.displayed_hp):
surf.blit(full_hp_blip, (left + idx * 2 + 5, top + 1))
for idx in range(self.unit.get_max_hp() - self.displayed_hp):
surf.blit(self.empty_hp_blip, (left + (idx + self.displayed_hp) * 2 + 5, top + 1))
surf.blit(self.end_hp_blip, (left + self.unit.get_max_hp() * 2 + 5, top + 1))
else:
# Lower 40 hp
for idx in range(min(self.displayed_hp, 40)):
surf.blit(full_hp_blip, (left + idx * 2 + 5, top + 4))
for idx in range(max(40 - self.displayed_hp, 0)):
surf.blit(self.empty_hp_blip, (left + (idx + self.displayed_hp) * 2 + 5, top + 4))
surf.blit(self.end_hp_blip, (left + 40 * 2 + 5, top + 4))
# Upper 40 hp
for idx in range(utils.clamp(self.displayed_hp - 40, 0, 40)):
surf.blit(full_hp_blip, (left + idx * 2 + 5, top - 4))
right = utils.clamp(self.unit.get_max_hp(), 0, 80)
for idx in range(right - max(40, self.displayed_hp)):
surf.blit(self.empty_hp_blip, (left + (idx + max(self.displayed_hp - 40, 0)) * 2 + 5, top - 4))
surf.blit(self.end_hp_blip, (left + (right - 40) * 2 + 5, top - 4))
class MapHealthBar(HealthBar):
time_for_change_min = 200
speed = utils.frames2ms(1)
health_outline = SPRITES.get('map_health_outline')
health_bar = SPRITES.get('map_health_bar')
def draw(self, surf, left, top):
total = max(1, self.total_hp)
fraction_hp = utils.clamp(self.displayed_hp / total, 0, 1)
index_pixel = int(12 * fraction_hp) + 1
surf.blit(self.health_outline, (left, top + 13))
if fraction_hp > 0:
bar = engine.subsurface(self.health_bar, (0, 0, index_pixel, 1))
surf.blit(bar, (left + 1, top + 14))
return surf
class MapCombatHealthBar(HealthBar):
display_numbers = True
health_bar = SPRITES.get('health_bar')
def draw(self, surf):
total = max(1, self.total_hp)
fraction_hp = utils.clamp(self.displayed_hp / total, 0, 1)
index_pixel = int(50 * fraction_hp)
position = 25, 22
surf.blit(engine.subsurface(self.health_bar, (0, 0, index_pixel, 2)), position)
# Blit HP number
if self.display_numbers:
font = FONT['number-small2']
if self.transition_flag:
font = FONT['number-big2']
s = str(self.displayed_hp)
position = 22 - font.size(s)[0], 15
font.blit(s, surf, position)
return surf
class MapCombatInfo():
blind_speed = 1/8. # 8 frames to fade in
def __init__(self, draw_method, unit, item, target, stats):
self.skill_icons = []
self.ordering = None
self.reset()
self.change_unit(unit, item, target, stats, draw_method)
# self.fade_in()
def change_unit(self, unit, item, target=None, stats=None, draw_method=None):
if draw_method:
self.draw_method = draw_method
self.true_position = None
if unit != self.unit or target != self.target:
self.fade_in()
else:
self.blinds = 1
self.hp_bar = MapCombatHealthBar(unit)
self.unit = unit
self.item = item
if target:
self.target = target
if stats:
self.hit = stats[0]
self.mt = stats[1]
self.skill_icons.clear()
# Handle surfaces
team = unit.team
self.stats_surf = None
self.bg_surf = SPRITES.get('health_' + utils.get_team_color(team)).convert_alpha()
self.c_surf = SPRITES.get('combat_stats_' + utils.get_team_color(team)).convert_alpha()
self.gem = SPRITES.get('combat_gem_' + utils.get_team_color(team)).convert_alpha()
def reset(self):
self.draw_method = None
self.true_position = None
self.hp_bar = None
self.unit = None
self.item = None
self.target = None
self.hit = None
self.mt = None
self.blinds = 1
self.reset_shake()
self.stats_surf = None
self.bg_surf = None
self.c_surf = None
self.gem = None
self.skill_icons.clear()
def fade_in(self):
self.blinds = 0
def fade_out(self):
pass
def shake(self, num):
self.current_shake_idx = 1
if num == 1: # Normal hit
self.shake_set = [(-3, -3), (0, 0), (3, 3), (0, 0)]
elif num == 2: # Kill
self.shake_set = [(3, 3), (0, 0), (0, 0), (3, 3), (-3, -3), (3, 3), (-3, -3), (0, 0)]
elif num == 3: # Crit
self.shake_set = [(random.randint(-4, 4), random.randint(-4, 4)) for _ in range(16)] + [(0, 0)]
elif num == 4: # Glancing hit
self.shake_set = [(-1, -1), (0, 0), (1, 1), (0, 0)]
def reset_shake(self):
self.shake_set = [(0, 0)] # How the hp bar will shake
self.shake_offset = (0, 0) # How it is currently shaking
self.current_shake_idx = 0
def handle_shake(self):
if self.current_shake_idx:
self.shake_offset = self.shake_set[self.current_shake_idx - 1]
self.current_shake_idx += 1
if self.current_shake_idx > len(self.shake_set):
self.current_shake_idx = 0
def add_skill_icon(self, skill_icon):
self.skill_icons.append(skill_icon)
def build_stat_surf(self):
stat_surf = self.c_surf.copy()
# Blit hit
if self.hit is not None:
hit = str(utils.clamp(self.hit, 0, 100))
else:
hit = '--'
position = stat_surf.get_width() // 2 - FONT['number-small2'].size(hit)[0] - 1, -2
FONT['number-small2'].blit(hit, stat_surf, position)
# Blit damage
if self.mt is not None:
damage = str(max(0, self.mt))
else:
damage = '--'
position = stat_surf.get_width() - FONT['number-small2'].size(damage)[0] - 2, -2
FONT['number-small2'].blit(damage, stat_surf, position)
return stat_surf
def get_time_for_change(self):
return self.hp_bar.time_for_change
def force_position_update(self):
if self.unit:
width, height = self.bg_surf.get_width(), self.bg_surf.get_height()
self.determine_position(width, height)
def determine_position(self, width, height):
self.true_position = self.draw_method
if self.draw_method in ('p1', 'p2'):
pos1 = self.unit.position
pos2 = self.target.position
camera_pos = game.camera.get_xy()
if self.draw_method == 'p1':
left = True if pos1[0] <= pos2[0] else False
else:
left = True if pos1[0] < pos2[0] else False
self.ordering = 'left' if left else 'right'
x_pos = WINWIDTH//2 - width if left else WINWIDTH//2
rel_1 = pos1[1] - camera_pos[1]
rel_2 = pos2[1] - camera_pos[1]
# If both are on top of screen
if rel_1 < TILEY//2 and rel_2 < TILEY//2:
rel = max(rel_1, rel_2)
y_pos = (rel + 1) * TILEHEIGHT + 12
# If both are on bottom of screen
elif rel_1 >= TILEY//2 and rel_2 >= TILEY//2:
rel = min(rel_1, rel_2)
y_pos = rel * TILEHEIGHT - 12 - height - 13 # Stat surf
# Find largest gap and place it in the middle
else:
top_gap = min(rel_1, rel_2)
bottom_gap = TILEY - 1 - max(rel_1, rel_2)
middle_gap = abs(rel_1 - rel_2)
if top_gap > bottom_gap and top_gap > middle_gap:
y_pos = top_gap * TILEHEIGHT - 12 - height - 13 # Stat surf
elif bottom_gap > top_gap and bottom_gap > middle_gap:
y_pos = (bottom_gap + 1) * TILEHEIGHT + 12
else:
y_pos = WINHEIGHT//4 - height//2 - 13//2 if rel_1 < TILEY//2 else 3*WINHEIGHT//4 - height//2 - 13//2
x_pos = WINWIDTH//4 - width//2 if pos1[0] - camera_pos[0] > TILEX//2 else 3*WINWIDTH//4 - width//2
self.ordering = 'middle'
self.true_position = (x_pos, y_pos)
elif self.draw_method == 'splash':
x_pos = self.unit.position[0] - game.camera.get_x()
x_pos = utils.clamp(x_pos, 3, TILEX - 2)
if self.unit.position[1] - game.camera.get_y() < TILEY//2:
y_pos = self.unit.position[1] - game.camera.get_y() + 2
else:
y_pos = self.unit.position[1] - game.camera.get_y() - 3
self.true_position = x_pos * TILEWIDTH - width//2, y_pos * TILEHEIGHT - 8
self.ordering = 'middle'
def update_stats(self, stats):
self.hit, self.mt = stats
self.stats_surf = None
def update(self):
# Make blinds wider
self.blinds = utils.clamp(self.blinds, self.blinds + self.blind_speed, 1)
if self.unit and self.blinds >= 1:
self.handle_shake()
self.hp_bar.update()
def draw(self, surf):
# Create background surface
width, height = self.bg_surf.get_width(), self.bg_surf.get_height()
true_height = height + self.c_surf.get_height()
if self.hit or self.mt:
bg_surf = engine.create_surface((width, true_height))
else:
bg_surf = engine.create_surface((width, height))
bg_surf.blit(self.bg_surf, (0, 0))
# Name
name_width = FONT['text-numbers'].size(self.unit.name)[0]
position = width - name_width - 4, 3
FONT['text-numbers'].blit(self.unit.name, bg_surf, position)
# Item
if self.item:
# Determine effectiveness
icon = icons.get_icon(self.item)
if icon:
icon = item_system.item_icon_mod(self.unit, self.item, self.target, icon)
bg_surf.blit(icon, (2, 3))
# Blit advantage
if skill_system.check_enemy(self.unit, self.target):
adv = combat_calcs.compute_advantage(self.unit, self.target, self.item, self.target.get_weapon())
disadv = combat_calcs.compute_advantage(self.unit, self.target, self.item, self.target.get_weapon(), False)
up_arrow = engine.subsurface(SPRITES.get('arrow_advantage'), (ANIMATION_COUNTERS.arrow_counter.count * 7, 0, 7, 10))
down_arrow = engine.subsurface(SPRITES.get('arrow_advantage'), (ANIMATION_COUNTERS.arrow_counter.count * 7, 10, 7, 10))
if adv and adv.modification > 0:
bg_surf.blit(up_arrow, (11, 7))
elif adv and adv.modification < 0:
bg_surf.blit(down_arrow, (11, 7))
elif disadv and disadv.modification > 0:
bg_surf.blit(down_arrow, (11, 7))
elif disadv and disadv.modification < 0:
bg_surf.blit(up_arrow, (11, 7))
# End item
bg_surf = self.hp_bar.draw(bg_surf)
# Blit stat surf
if self.hit is not None or self.mt is not None:
if not self.stats_surf:
self.stats_surf = self.build_stat_surf()
bg_surf.blit(self.stats_surf, (0, height))
if not self.true_position or self.draw_method == 'splash':
self.determine_position(width, height)
if self.hit is not None or self.mt is not None:
blit_surf = engine.subsurface(bg_surf, (0, true_height//2 - int(true_height * self.blinds // 2), width, int(true_height * self.blinds)))
y_pos = self.true_position[1] + true_height//2 - int(true_height * self.blinds // 2)
else:
blit_surf = engine.subsurface(bg_surf, (0, height//2 - int(height * self.blinds // 2), width, int(height * self.blinds)))
y_pos = self.true_position[1] + height//2 - int(height * self.blinds // 2)
x, y = (self.true_position[0] + self.shake_offset[0], y_pos + self.shake_offset[1])
surf.blit(blit_surf, (x, y))
# Gem
if self.blinds >= 1 and self.gem and self.ordering:
if self.ordering == 'left':
position = (x + 2, y - 3)
elif self.ordering == 'right':
position = (x + 56, y - 3)
elif self.ordering == 'middle':
position = (x + 27, y - 3)
surf.blit(self.gem, position)
# Draw skill icons
for idx, skill_icon in enumerate(self.skill_icons):
skill_icon.update()
x, y = self.true_position[0] + width // 2, self.true_position[1] - 16 + idx * 16
skill_icon.draw(surf, (x, y))
self.skill_icons = [s for s in self.skill_icons if not s.done]
return surf
| 40.235577
| 148
| 0.577668
|
a732bd83a09970e0573171e767890147a1b64b94
| 485
|
py
|
Python
|
mla_game/apps/transcript/migrations/0023_auto_20171113_1841.py
|
amazingwebdev/django-FixIt
|
698aa7e4c45f07d86fbf209d1caca017ed136675
|
[
"MIT"
] | null | null | null |
mla_game/apps/transcript/migrations/0023_auto_20171113_1841.py
|
amazingwebdev/django-FixIt
|
698aa7e4c45f07d86fbf209d1caca017ed136675
|
[
"MIT"
] | null | null | null |
mla_game/apps/transcript/migrations/0023_auto_20171113_1841.py
|
amazingwebdev/django-FixIt
|
698aa7e4c45f07d86fbf209d1caca017ed136675
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-13 18:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('transcript', '0022_auto_20171103_1841'),
]
operations = [
migrations.AlterField(
model_name='transcriptphrasecorrectionvote',
name='upvote',
field=models.NullBooleanField(default=False),
),
]
| 23.095238
| 57
| 0.643299
|
8d6d5a5fca1977767da574fa8b14b52724466356
| 1,676
|
py
|
Python
|
userbot/modules/hentai.py
|
Wiki28/WikixCilik
|
a7e8d684e34174001af3e69d1f00de4e98243abe
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 4
|
2022-01-31T14:35:01.000Z
|
2022-03-31T06:42:39.000Z
|
userbot/modules/hentai.py
|
Wiki28/WikixCilik
|
a7e8d684e34174001af3e69d1f00de4e98243abe
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2022-03-19T15:54:46.000Z
|
2022-03-19T15:54:46.000Z
|
userbot/modules/hentai.py
|
Wiki28/WikixCilik
|
a7e8d684e34174001af3e69d1f00de4e98243abe
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 22
|
2022-01-29T20:29:35.000Z
|
2022-03-31T06:42:41.000Z
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot module for filter commands """
from telethon import events
from telethon.errors.rpcerrorlist import YouBlockedUserError
from telethon.tl.functions.contacts import UnblockRequest
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP
from userbot.utils import edit_or_reply, cilik_cmd
@cilik_cmd(pattern="hentai(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
link = event.pattern_match.group(1)
chat = "@nHentaiBot"
xx = await edit_or_reply(event, "`Processing...`")
async with event.client.conversation(chat) as conv:
try:
response = conv.wait_event(
events.NewMessage(incoming=True, from_users=424466890)
)
await event.client.send_message(chat, link)
response = await response
except YouBlockedUserError:
await event.client(UnblockRequest(chat))
await event.client.send_message(chat, link)
response = await response
if response.text.startswith("**Sorry I couldn't get manga from**"):
await xx.edit("**Saya pikir ini bukan link yang benar**")
else:
await xx.delete()
await event.client.send_message(event.chat_id, response.message)
CMD_HELP.update(
{
"hentai": f"**Plugin : **`hentai`\
\n\n • **Syntax :** `{cmd}hentai` <link nhentai>\
\n • **Function : **Melihat nhentai di telegra.ph XD\
"
}
)
| 33.52
| 78
| 0.652148
|
1e1172f4dd482cd6789ec231902e8fc979f0cd4f
| 20,618
|
py
|
Python
|
networks/vision_transformer.py
|
NoelShin/selfmask
|
396e0a3636b29591f505b6711be45eabe292919a
|
[
"MIT"
] | 11
|
2022-03-24T02:45:33.000Z
|
2022-03-30T02:53:33.000Z
|
networks/vision_transformer.py
|
NoelShin/selfmask
|
396e0a3636b29591f505b6711be45eabe292919a
|
[
"MIT"
] | 2
|
2022-03-25T11:08:34.000Z
|
2022-03-30T14:13:26.000Z
|
networks/vision_transformer.py
|
NoelShin/selfmask
|
396e0a3636b29591f505b6711be45eabe292919a
|
[
"MIT"
] | 1
|
2022-03-30T02:53:35.000Z
|
2022-03-30T02:53:35.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Mostly copy-paste from timm library.
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
from typing import Optional
import math
from functools import partial
import torch
import torch.nn as nn
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn(
"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be incorrect.",
stacklevel=2
)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def drop_path(x, drop_prob: float = 0., training: bool = False):
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5 # square root of dimension for normalisation
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape # B x (cls token + # patch tokens) x dim
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
# qkv: 3 x B x Nh x (cls token + # patch tokens) x (dim // Nh)
q, k, v = qkv[0], qkv[1], qkv[2]
# q, k, v: B x Nh x (cls token + # patch tokens) x (dim // Nh)
# q: B x Nh x (cls token + # patch tokens) x (dim // Nh)
# k.transpose(-2, -1) = B x Nh x (dim // Nh) x (cls token + # patch tokens)
# attn: B x Nh x (cls token + # patch tokens) x (cls token + # patch tokens)
attn = (q @ k.transpose(-2, -1)) * self.scale # @ operator is for matrix multiplication
attn = attn.softmax(dim=-1) # B x Nh x (cls token + # patch tokens) x (cls token + # patch tokens)
attn = self.attn_drop(attn)
# attn = B x Nh x (cls token + # patch tokens) x (cls token + # patch tokens)
# v = B x Nh x (cls token + # patch tokens) x (dim // Nh)
# attn @ v = B x Nh x (cls token + # patch tokens) x (dim // Nh)
# (attn @ v).transpose(1, 2) = B x (cls token + # patch tokens) x Nh x (dim // Nh)
x = (attn @ v).transpose(1, 2).reshape(B, N, C) # B x (cls token + # patch tokens) x dim
x = self.proj(x) # B x (cls token + # patch tokens) x dim
x = self.proj_drop(x)
return x, attn
class Block(nn.Module):
def __init__(self,
dim, num_heads,
mlp_ratio=4.,
qkv_bias=False,
qk_scale=None,
drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, return_attention=False):
y, attn = self.attn(self.norm1(x))
if return_attention:
return attn
x = x + self.drop_path(y)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding"""
def __init__(self, img_size=(224, 224), patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
num_patches = (img_size[0] // patch_size) * (img_size[1] // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
x = self.proj(x)
x = x.flatten(2).transpose(1, 2) # B x (P_H * P_W) x C
return x
class VisionTransformer(nn.Module):
""" Vision Transformer """
def __init__(self,
img_size=(224, 224),
patch_size=16,
in_chans=3,
num_classes=0,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.,
qkv_bias=False,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
norm_layer=nn.LayerNorm):
super().__init__()
self.num_features = self.embed_dim = embed_dim
self.patch_embed = PatchEmbed(
img_size=(224, 224), # noel: this is to load pretrained model.
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim
)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer
) for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Classifier head
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
self.depth = depth
self.embed_dim = self.n_embs = embed_dim
self.mlp_ratio = mlp_ratio
self.n_heads = num_heads
self.patch_size = patch_size
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def make_input_divisible(self, x: torch.Tensor) -> torch.Tensor:
"""Pad some pixels to make the input size divisible by the patch size."""
B, _, H_0, W_0 = x.shape
pad_w = (self.patch_size - W_0 % self.patch_size) % self.patch_size
pad_h = (self.patch_size - H_0 % self.patch_size) % self.patch_size
x = nn.functional.pad(x, (0, pad_w, 0, pad_h), value=0)
return x
def prepare_tokens(self, x):
B, nc, h, w = x.shape
x: torch.Tensor = self.make_input_divisible(x)
patch_embed_h, patch_embed_w = x.shape[-2] // self.patch_size, x.shape[-1] // self.patch_size
x = self.patch_embed(x) # patch linear embedding
# add positional encoding to each token
# add the [CLS] token to the embed patch tokens
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
x = x + self.interpolate_pos_encoding(x, self.pos_embed, size=(patch_embed_h, patch_embed_w))
return self.pos_drop(x)
@staticmethod
def split_token(x, token_type: str):
if token_type == "cls":
return x[:, 0, :]
elif token_type == "patch":
return x[:, 1:, :]
else:
return x
# noel
def forward(self, x, layer: Optional[str] = None):
x: torch.Tensor = self.prepare_tokens(x)
features: dict = {}
for i, blk in enumerate(self.blocks):
x = blk(x)
features[f"layer{i + 1}"] = self.norm(x)
if layer is not None:
return features[layer]
else:
return features
# noel - for DINO's visual
def get_last_selfattention(self, x):
x = self.prepare_tokens(x)
for i, blk in enumerate(self.blocks):
if i < len(self.blocks) - 1:
x = blk(x)
else:
# return attention of the last block
return blk(x, return_attention=True)
def get_tokens(
self,
x,
layers: list,
patch_tokens: bool = False,
norm: bool = True,
input_tokens: bool = False,
post_pe: bool = False
):
"""Return intermediate tokens."""
list_tokens: list = []
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
if input_tokens:
list_tokens.append(x)
pos_embed = self.interpolate_pos_encoding(x, self.pos_embed)
x = x + pos_embed
if post_pe:
list_tokens.append(x)
x = self.pos_drop(x)
for i, blk in enumerate(self.blocks):
x = blk(x) # B x # patches x dim
if layers is None or i in layers:
list_tokens.append(self.norm(x) if norm else x)
tokens = torch.stack(list_tokens, dim=1) # B x n_layers x (1 + # patches) x dim
if not patch_tokens:
return tokens[:, :, 0, :] # index [CLS] tokens only, B x n_layers x dim
else:
return tokens
def forward_features(self, x):
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
pos_embed = self.interpolate_pos_encoding(x, self.pos_embed)
x = x + pos_embed
x = self.pos_drop(x)
for blk in self.blocks:
x = blk(x)
if self.norm is not None:
x = self.norm(x)
return x[:, 0]
def interpolate_pos_encoding(self, x, pos_embed, size):
"""Interpolate the learnable positional encoding to match the number of patches.
x: B x (1 + N patches) x dim_embedding
pos_embed: B x (1 + N patches) x dim_embedding
return interpolated positional embedding
"""
npatch = x.shape[1] - 1 # (H // patch_size * W // patch_size)
N = pos_embed.shape[1] - 1 # 784 (= 28 x 28)
if npatch == N:
return pos_embed
class_emb, pos_embed = pos_embed[:, 0], pos_embed[:, 1:] # a learnable CLS token, learnable position embeddings
dim = x.shape[-1] # dimension of embeddings
pos_embed = nn.functional.interpolate(
pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2), # B x dim x 28 x 28
size=size,
mode='bicubic',
align_corners=False
)
pos_embed = pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
pos_embed = torch.cat((class_emb.unsqueeze(0), pos_embed), dim=1)
return pos_embed
def forward_selfattention(self, x, return_interm_attn=False):
B, nc, w, h = x.shape
N = self.pos_embed.shape[1] - 1
x = self.patch_embed(x)
# interpolate patch embeddings
dim = x.shape[-1]
w0 = w // self.patch_embed.patch_size
h0 = h // self.patch_embed.patch_size
class_pos_embed = self.pos_embed[:, 0]
patch_pos_embed = self.pos_embed[:, 1:]
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode='bicubic'
)
if w0 != patch_pos_embed.shape[-2]:
helper = torch.zeros(h0)[None, None, None, :].repeat(1, dim, w0 - patch_pos_embed.shape[-2], 1).to(x.device)
patch_pos_embed = torch.cat((patch_pos_embed, helper), dim=-2)
if h0 != patch_pos_embed.shape[-1]:
helper = torch.zeros(w0)[None, None, :, None].repeat(1, dim, 1, h0 - patch_pos_embed.shape[-1]).to(x.device)
pos_embed = torch.cat((patch_pos_embed, helper), dim=-1)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
pos_embed = torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
cls_tokens = self.cls_token.expand(B, -1, -1) # self.cls_token: 1 x 1 x emb_dim -> ?
x = torch.cat((cls_tokens, x), dim=1)
x = x + pos_embed
x = self.pos_drop(x)
if return_interm_attn:
list_attn = []
for i, blk in enumerate(self.blocks):
attn = blk(x, return_attention=True)
x = blk(x)
list_attn.append(attn)
return torch.cat(list_attn, dim=0)
else:
for i, blk in enumerate(self.blocks):
if i < len(self.blocks) - 1:
x = blk(x)
else:
return blk(x, return_attention=True)
def forward_return_n_last_blocks(self, x, n=1, return_patch_avgpool=False):
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
pos_embed = self.interpolate_pos_encoding(x, self.pos_embed)
x = x + pos_embed
x = self.pos_drop(x)
# we will return the [CLS] tokens from the `n` last blocks
output = []
for i, blk in enumerate(self.blocks):
x = blk(x)
if len(self.blocks) - i <= n:
# get only CLS token (B x dim)
output.append(self.norm(x)[:, 0])
if return_patch_avgpool:
x = self.norm(x)
# In addition to the [CLS] tokens from the `n` last blocks, we also return
# the patch tokens from the last block. This is useful for linear eval.
output.append(torch.mean(x[:, 1:], dim=1))
return torch.cat(output, dim=-1)
def return_patch_emb_from_n_last_blocks(self, x, n=1, return_patch_avgpool=False):
"""Return intermediate patch embeddings, rather than CLS token, from the last n blocks."""
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
pos_embed = self.interpolate_pos_encoding(x, self.pos_embed)
x = x + pos_embed
x = self.pos_drop(x)
# we will return the [CLS] tokens from the `n` last blocks
output = []
for i, blk in enumerate(self.blocks):
x = blk(x)
if len(self.blocks) - i <= n:
output.append(self.norm(x)[:, 1:]) # get only CLS token (B x dim)
if return_patch_avgpool:
x = self.norm(x)
# In addition to the [CLS] tokens from the `n` last blocks, we also return
# the patch tokens from the last block. This is useful for linear eval.
output.append(torch.mean(x[:, 1:], dim=1))
return torch.stack(output, dim=-1) # B x n_patches x dim x n
def deit_tiny(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size,
embed_dim=192,
depth=12,
num_heads=3,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs)
return model
def deit_small(patch_size=16, **kwargs):
depth = kwargs.pop("depth") if "depth" in kwargs else 12
model = VisionTransformer(
patch_size=patch_size,
embed_dim=384,
depth=depth,
num_heads=6,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs
)
return model
def vit_base(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
class DINOHead(nn.Module):
def __init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256):
super().__init__()
nlayers = max(nlayers, 1)
if nlayers == 1:
self.mlp = nn.Linear(in_dim, bottleneck_dim)
else:
layers = [nn.Linear(in_dim, hidden_dim)]
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
for _ in range(nlayers - 2):
layers.append(nn.Linear(hidden_dim, hidden_dim))
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
layers.append(nn.Linear(hidden_dim, bottleneck_dim))
self.mlp = nn.Sequential(*layers)
self.apply(self._init_weights)
self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False))
self.last_layer.weight_g.data.fill_(1)
if norm_last_layer:
self.last_layer.weight_g.requires_grad = False
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.mlp(x)
x = nn.functional.normalize(x, dim=-1, p=2)
x = self.last_layer(x)
return x
| 36.17193
| 124
| 0.57508
|
e176a37f55dbe09e5239a92eb90fa2f4aa557455
| 5,230
|
py
|
Python
|
server_tools/device_server.py
|
krbjila/labrad_tools
|
5c510cb35090807807bfe6bd910b9c35edce6fce
|
[
"MIT"
] | 1
|
2020-11-30T01:45:08.000Z
|
2020-11-30T01:45:08.000Z
|
server_tools/device_server.py
|
krbjila/labrad_tools
|
5c510cb35090807807bfe6bd910b9c35edce6fce
|
[
"MIT"
] | 8
|
2021-02-23T00:18:12.000Z
|
2022-03-12T00:54:50.000Z
|
server_tools/device_server.py
|
krbjila/labrad_tools
|
5c510cb35090807807bfe6bd910b9c35edce6fce
|
[
"MIT"
] | 1
|
2020-11-08T14:54:21.000Z
|
2020-11-08T14:54:21.000Z
|
import re
import json
import types
import os
from twisted.internet.defer import returnValue, inlineCallbacks
from labrad.server import LabradServer, setting
from decorators import quickSetting
def underscore(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z])([A-Z])', r'\1_\2', s1).lower()
def add_quick_setting(srv, ID, setting_name, arg_type):
def setting(self, c, arg=None):
pass
setting.__name__ = str(setting_name)
setting.__doc__ = "get or change {} ".format(setting_name)
method = types.MethodType(setting, srv)
qs = quickSetting(ID, arg_type)(method)
setattr(srv, setting_name, qs)
def get_device_wrapper(device_config):
device_type = device_config['device_type']
_device_type = underscore(device_type)
module_path = 'devices.{}'.format(_device_type)
if os.path.isdir(_device_type):
module_path += _device_type
module = __import__(module_path, fromlist=[device_type])
print(module.__dict__.keys())
return getattr(module, device_type)
def get_connection_wrapper(device):
module_path = 'server_tools.connections.{}_connection'.format(device.connection_type.lower())
module = __import__(module_path, fromlist=[device.connection_type+'Connection'], level=1)
return getattr(module, device.connection_type+'Connection')
class DeviceWrapper(object):
def __init__(self, config={}):
for key, value in config.items():
setattr(self, key, value)
self.connection_name = self.servername + ' - ' + self.address
@inlineCallbacks
def initialize(self):
yield None
class DeviceServer(LabradServer):
def __init__(self, config_path='./config.json'):
LabradServer.__init__(self)
self.devices = {}
self.open_connections = {}
self.quick_settings = []
self.load_config(config_path)
# for i, (setting, arg_type) in enumerate(self.quick_settings):
# add_quick_setting(self, 10 + i, setting, arg_type)
def load_config(self, path=None):
self.config = lambda: None
if path is not None:
self.config_path = path
with open(self.config_path, 'r') as infile:
config = json.load(infile)
for key, value in config.items():
setattr(self.config, key, value)
@inlineCallbacks
def initServer(self):
for name, config in self.config.devices.items():
yield self.initialize_device(name, config)
@inlineCallbacks
def initialize_device(self, name, config):
device_wrapper = get_device_wrapper(config)
device_wrapper.name = name
device = device_wrapper(config)
try:
if device.connection_name not in self.open_connections:
yield self.init_connection(device)
device.connection = self.open_connections[device.connection_name]
self.devices[name] = device
yield device.initialize()
except Exception as e:
print(e)
print('could not initialize device {}'.format(name))
print('removing {} from available devices'.format(name))
self.devices.pop(name)
@inlineCallbacks
def init_connection(self, device):
connection = get_connection_wrapper(device)()
yield connection.initialize(device)
print('connection opened: {} - {}'.format(device.servername, device.address))
self.open_connections[device.connection_name] = connection
def get_device(self, c):
name = c.get('name')
if name is None:
raise Exception('select a device first')
return self.devices[name]
@setting(0, returns='*s')
def get_device_list(self, c):
return self.devices.keys()
@setting(1, name='s', returns=['s', ''])
def select_device(self, c, name):
if name not in self.devices.keys():
try:
yield self.reload_config(c, name)
except:
message = '{} is not the name of a configured device'.format(
name)
raise Exception(message)
returnValue(None)
c['name'] = name
device = self.get_device(c)
returnValue(json.dumps(device.__dict__, default=lambda x: None))
@setting(3, returns='b')
def reinit_connection(self, c):
device = self.get_device(c)
yield self.init_connection(device)
device.connection = self.open_connections[device.connection_name]
returnValue(True)
@setting(4)
def send_update(self, c):
device = self.get_device(c)
update = {c['name']: {p: getattr(device, p)
for p in device.update_parameters}}
yield self.update(json.dumps(update))
@setting(5, names=['*s', 's'])
def reload_config(self, c, names=None):
self.load_config()
if names is None:
names = self.config.devices
elif type(names).__name__ != 'list':
names = [names]
for name in names:
device = self.config.devices.get(name)
if device:
yield self.initialize_device(name, device)
| 35.100671
| 97
| 0.627916
|
221643fb5e94e107b26c17415ef42d4b8f2897a1
| 41,128
|
py
|
Python
|
PlanheatMappingModule/PlanHeatDMM/src/worker.py
|
Planheat/Planheat-Tool
|
9764fcb86d3898b232c4cc333dab75ebe41cd421
|
[
"MIT"
] | 2
|
2020-04-07T03:43:33.000Z
|
2021-03-23T13:17:42.000Z
|
PlanheatMappingModule/PlanHeatDMM/src/worker.py
|
Planheat/Planheat-Tool
|
9764fcb86d3898b232c4cc333dab75ebe41cd421
|
[
"MIT"
] | 1
|
2020-07-20T09:56:13.000Z
|
2020-07-22T10:26:06.000Z
|
PlanheatMappingModule/PlanHeatDMM/src/worker.py
|
Planheat/Planheat-Tool
|
9764fcb86d3898b232c4cc333dab75ebe41cd421
|
[
"MIT"
] | 1
|
2020-07-20T09:40:15.000Z
|
2020-07-20T09:40:15.000Z
|
# -*- coding: utf-8 -*-
"""
Process Thread
:author: Sergio Aparicio Vegas
:version: 0.1
:date: 10 Oct. 2017
"""
__docformat__ = "restructuredtext"
import sys
import os
import time
import subprocess
import gc
import platform
from json import dumps
from PyQt5 import QtGui,QtCore
from PyQt5.QtCore import QThread
from collections import OrderedDict
from src.alternative import Alternative
from src.complete import Complete
from config import config as Config
from utility.utils import showResults, clean_temp_files
from manageCsv.csv_reader import CSV_Reader
from manageCsv.csv_writer import CSV_Writer
from manageShape.shape_writer import Shape_Writer
from model.building import Building
from manageShape.shape_reader import Shape_Reader
from manageDB.db import DB
from myExceptions.exceptions import NotFoundResourceException, JavaProcessException
class Worker(QThread):
"""
Thread that executes the calculations of the process
"""
showMessageDialog = QtCore.pyqtSignal(str,str,str,object,name='showMessageDialog')
progress_update = QtCore.pyqtSignal(int,object, name= 'progress_update')
progress_total = QtCore.pyqtSignal(int,object,name='progress_total')
changeStatusProcessButton = QtCore.pyqtSignal(str,object,object, name='changeStatusProcessButton')
message_update = QtCore.pyqtSignal(str,object)
unlock_interface = QtCore.pyqtSignal(object)
loadShapeGeneratedFile = QtCore.pyqtSignal(str,str,object, name='loadShapeGeneratedFile')
def __init__(self,planHeatDMM):
QThread.__init__(self)
try:
self.clockThread = ClockThread(planHeatDMM)
if planHeatDMM.resources.thread_clock is None:
planHeatDMM.resources.thread_clock = self.clockThread
else:
self.clockThread = None
self.planHeatDMM = planHeatDMM
self.javaLog = Config.PLUGIN_DIR + os.path.sep + Config.LOG_PARAMS['path'] + os.path.sep + "java_output_" + planHeatDMM.resources.logDateName + "." + Config.LOG_PARAMS["logExt"]
self.fileJsonArg=Config.PLUGIN_DIR + os.path.sep + "temp" + os.path.sep + "planheatjavaargs.json"
self.fileJava=Config.PLUGIN_DIR + os.path.sep + "java" + os.path.sep + "jre1.8.0_151" + os.path.sep + "bin" + os.path.sep
self.fileJar=self.planHeatDMM.data.fileJar
self.fileLib=self.planHeatDMM.data.fileLib
self.mainJavaClass=Config.JAVA_MAIN_CLASS
#Input files
self.csvInFilename = Config.PLUGIN_DIR + os.path.sep + "temp" + os.path.sep + Config.INTERMEDIATE_FILE_CSV
self.shapeInFilename = self.planHeatDMM.data.inputShapeFile
#Baseline Demand
self.shapeOutBaselineFilename = self.planHeatDMM.data.outputSaveFile
self.csvOutBaselineFilename = self.planHeatDMM.data.outputSaveFile + ".csv"
self.csvOutBaselineTotalFilename = self.planHeatDMM.data.outputSaveFile + "_totalized.csv"
self.csvOutBaselineHourlyFilename = self.planHeatDMM.data.outputSaveFile + "_hourly.csv"
#Future Demand
self.shapeOutFutureFilename = self.planHeatDMM.data.outputSaveFile + "_future"
self.csvOutFutureFilename = self.planHeatDMM.data.outputSaveFile + "_future.csv"
self.csvOutFutureTotalFilename = self.planHeatDMM.data.outputSaveFile + "_future_totalized.csv"
self.csvOutFutureHourlyFilename = self.planHeatDMM.data.outputSaveFile + "_future_hourly.csv"
self.threadOptionsLog = self.planHeatDMM.data.outputSaveFile + "_options.txt"
self.log = self.planHeatDMM.resources.log
self.database = None
self.noa = self.planHeatDMM.resources.noa
self.planHeatDMM.data.closeWindow = False
self.dbFileName=Config.DB_PARAMS['databaseName']
self.boolRetrofittedScenarios = self.planHeatDMM.data.boolRetrofittedScenarios
self.boolHourlyDetailFile = self.planHeatDMM.data.boolHourlyDetailFile
self.boolAddShapeFields = self.planHeatDMM.data.boolAddShapeFields
self.userFieldShapeMap = self.planHeatDMM.data.userFieldShapeMap
self.inputCsvFile = None
self.inputShpFile = None
self.outputDetailBaselineCSVFile = None
self.outputTotalizedBaselineCSVFile = None
self.outputHourlyBaselineCSVFile = None
self.outputBaselineSHPFile = None
self.outputDetailFutureCSVFile = None
self.outputTotalizedFutureCSVFile = None
self.outputHourlyFutureCSVFile = None
self.outputFutureSHPFile = None
self.projectName = self.planHeatDMM.data.projectName
self.areaName = self.planHeatDMM.data.areaName
self.country_id = self.planHeatDMM.data.country_id
self.folderProject = Config.PLUGIN_DIR + os.path.sep + "temp" + os.path.sep
self.shpFilename = self.shapeInFilename
self.logFileName = self.planHeatDMM.resources.log.completeFileName
self.lidarDTMFolder = "" if self.planHeatDMM.data.DTMDirectory is None else self.planHeatDMM.data.DTMDirectory + os.path.sep
self.lidarDSMFolder = "" if self.planHeatDMM.data.DSMDirectory is None else self.planHeatDMM.data.DSMDirectory + os.path.sep
self.referentialSpaceEPSG = self.planHeatDMM.data.spatialReferenceEPSG
self.referentialSpaceWKT = self.planHeatDMM.data.spatialReferenceWKT
self.fieldMapping= None
self.fieldsSHPJavaPosition = self.planHeatDMM.data.fieldsSHPMappingPosition
self.buildingUseFloorHeightDict = self.planHeatDMM.data.buildingUseFloorHeightDict
self.ok = 0
self.error = 0
self.total = 0
except:
self.showMessageDialog.emit("CRITICAL","Thread Constructor", " __init__ Unexpected error:" + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]),self.planHeatDMM)
self.planHeatDMM.log.write_log("ERROR", "Worker Unexpected error:" + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
def run(self):
try:
self.log.write_log("INFO","Run Thread")
# Delete temporary files
# Delete temp files
if Config.LAUNCH_JAVA_PROCESS in ("Y","y"):
clean_temp_files(self.log)
if self.clockThread is not None:
self.clockThread.start( priority = Config.PROCESS_THREAD_PRIORITY)
# Create DB Connection per Thread
self.database = DB(self.log)
if self.database is None:
raise Exception("Error Creating Database Object")
else:
#Database Connection
self.database.connectDB("DEFERRED")
self.message_update.emit("Process Start",self.planHeatDMM)
self.progress_update.emit(0,self.planHeatDMM)
self.database.truncate_calculate_complete_totalized_table()
self.planHeatDMM.resources.log.write_log("INFO","Execute")
self.initizalizeLogExecuteOptions(self.threadOptionsLog)
self.writeLogExecuteOptions(self.threadOptionsLog,"Execute")
message = "Calculate Method:{}".format(self.planHeatDMM.data.calculateMethod)
self.writeLogExecuteOptions(self.threadOptionsLog,message)
self.planHeatDMM.resources.log.write_log("INFO",message)
message = "Project Name:{}".format(self.planHeatDMM.data.projectName)
self.writeLogExecuteOptions(self.threadOptionsLog,message)
self.planHeatDMM.resources.log.write_log("INFO",message)
message = "Area Under Study:{}".format(self.planHeatDMM.data.areaName)
self.writeLogExecuteOptions(self.threadOptionsLog,message)
self.planHeatDMM.resources.log.write_log("INFO",message)
message = "Country:{}".format(self.planHeatDMM.data.country)
self.writeLogExecuteOptions(self.threadOptionsLog,message)
self.planHeatDMM.resources.log.write_log("INFO",message)
message = "Baseline Scenario Year:{}".format(self.planHeatDMM.data.baselineScenarioYear)
self.writeLogExecuteOptions(self.threadOptionsLog,message)
self.planHeatDMM.resources.log.write_log("INFO",message)
if self.boolRetrofittedScenarios:
message = "Future Scenario Year:{}".format(self.planHeatDMM.data.futureScenarioYear)
self.writeLogExecuteOptions(self.threadOptionsLog,message)
self.planHeatDMM.resources.log.write_log("INFO",message)
message = "Input Shape File:{}".format(self.planHeatDMM.data.inputShapeFile)
self.writeLogExecuteOptions(self.threadOptionsLog,message)
self.planHeatDMM.resources.log.write_log("INFO",message)
message = "Output Files:{}".format(self.planHeatDMM.data.outputSaveFile)
self.writeLogExecuteOptions(self.threadOptionsLog,message)
self.planHeatDMM.resources.log.write_log("INFO",message)
message = "Lidar DSM:{}".format(self.planHeatDMM.data.DSMDirectory)
self.writeLogExecuteOptions(self.threadOptionsLog,message)
self.planHeatDMM.resources.log.write_log("INFO",message)
message = "Lidar DTM:{}".format(self.planHeatDMM.data.DTMDirectory)
self.writeLogExecuteOptions(self.threadOptionsLog,message)
self.planHeatDMM.resources.log.write_log("INFO", message)
message = "Database file:{}".format(self.dbFileName)
self.writeLogExecuteOptions(self.threadOptionsLog,message)
self.planHeatDMM.resources.log.write_log("INFO", message)
result = self.process()
if result:
if self.planHeatDMM.data.processContinue:
if self.error == 0:
self.changeStatusProcessButton.emit("Process Ok, see log file",self.planHeatDMM.resources.icon_ok_icon, self.planHeatDMM)
self.showMessageDialog.emit("OK","Ok", "Process finished Ok",self.planHeatDMM)
else:
self.changeStatusProcessButton.emit("Process Warning, see log file",self.planHeatDMM.resources.icon_warn_icon, self.planHeatDMM)
self.showMessageDialog.emit("WARN","Warning", "Warning - The Process finished with records in error, check log file for further information",self.planHeatDMM)
if self.ok > 0 and self.planHeatDMM.data.pluginLaunch == True and self.planHeatDMM.data.boolOpenGeneratedShape == True:
try:
layerName = os.path.splitext(os.path.basename(self.shapeOutFilename))[0]
except:
layerName = self.projectName
self.loadShapeGeneratedFile.emit(self.shapeOutBaselineFilename + ".shp", layerName ,self.planHeatDMM)
if self.boolRetrofittedScenarios == True:
self.loadShapeGeneratedFile.emit(self.shapeOutFutureFilename + ".shp", layerName + "_future_scenario" ,self.planHeatDMM)
else:
if self.planHeatDMM.data.closeWindow == False and self.planHeatDMM.data.processContinue == False:
self.changeStatusProcessButton.emit("Process canceled, see log file",self.planHeatDMM.resources.icon_error_icon, self.planHeatDMM)
self.message_update.emit("Process canceled by the user",self.planHeatDMM)
self.showMessageDialog.emit("CRITICAL","Canceled", "Process canceled by the user",self.planHeatDMM)
elif self.planHeatDMM.data.closeWindow == False and self.planHeatDMM.data.processContinue == True:
self.changeStatusProcessButton.emit("Process Error, see log file",self.planHeatDMM.resources.icon_error_icon, self.planHeatDMM)
self.message_update.emit("Process error",self.planHeatDMM)
else:
self.changeStatusProcessButton.emit("Close Window requested by the User",self.planHeatDMM.resources.icon_error_icon, self.planHeatDMM)
self.message_update.emit("Close Window requested by the User",self.planHeatDMM)
self.unlock_interface.emit(self.planHeatDMM)
# Sleep time for finish de unlock interface signal
self.planHeatDMM.resources.log.write_log("INFO", "Finish Process")
time.sleep(1)
if self.database is not None:
self.database.closeDB()
self.database = None
self.log.write_log("INFO","Finish Thread")
if self.clockThread is not None and self.clockThread.isRunning():
self.clockThread.exit()
except:
self.log.write_log("ERROR", "Run Unexpected error:" + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
if self.clockThread is not None and self.clockThread.isRunning():
self.clockThread.exit()
if self.database is not None:
self.database.closeDB()
self.database = None
self.unlock_interface.emit(self.planHeatDMM)
self.showMessageDialog.emit("CRITICAL","ERROR", "Run Unexpected error:" + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]),self.planHeatDMM)
def kill_process(self):
self.processContinue = False
def process(self):
try:
if self.planHeatDMM.data.calculateMethod.lower() == "simplified":
self.sendMessage("INFO", "Simplified - Proccess Start")
method_object = Alternative(self.log,self.database,self.country_id, self.boolRetrofittedScenarios,self.planHeatDMM)
method_object.initialize()
self.sendMessage("INFO", "Simplified - Create Objet")
else:
self.sendMessage("INFO", "Complete - Proccess Start")
method_object = Complete(self.log,self.database,self.noa,self.country_id, self.boolRetrofittedScenarios, self.planHeatDMM)
method_object.initialize()
self.sendMessage("INFO", "Complete - Create Objet")
# Create the SHP Handler
if self.planHeatDMM.data.processContinue == True:
self.inputShpFile = Shape_Reader(self.log,self.shapeInFilename)
self.sendMessage("INFO", "Create SHP Input Handler")
else:
self.sendMessage("INFO", "Process Cancel Request By User - Create SHP Input Handler Ok")
return False
# Create the CSV IN Handler
if self.planHeatDMM.data.processContinue == True:
self.inputCsvFile = CSV_Reader(self.log, self.csvInFilename,method_object.inputFields)
self.sendMessage("INFO", "Create CSV Input Handler Ok")
else:
self.sendMessage("INFO", "Process Cancel Request By User - Create CSV Input Handler")
return False
# Create Baseline Detail Output CSV
if self.planHeatDMM.data.processContinue == True:
self.outputDetailBaselineCSVFile = CSV_Writer(self.log,self.csvOutBaselineFilename,method_object.outputBaselineDetailFieldsCsv,self.boolAddShapeFields ,self.userFieldShapeMap)
self.sendMessage("INFO", "Create Baseline CSV - Detail Output Handler Ok")
else:
self.sendMessage("INFO","Process Cancel Request By User - Create Baseline CSV Detail Output Handler")
return False
# Create Future Detail Output CSV
if self.boolRetrofittedScenarios == True:
if self.planHeatDMM.data.processContinue == True:
self.outputDetailFutureCSVFile = CSV_Writer(self.log,self.csvOutFutureFilename,method_object.outputFutureDetailFieldsCsv,self.boolAddShapeFields ,self.userFieldShapeMap)
self.sendMessage("INFO", "Create Future CSV Detail Output Handler Ok")
else:
self.sendMessage("INFO", "Process Cancel Request By User - Create Future CSV Detail Output Handler")
return False
# Create Baseline Totalized Output CSV
if self.planHeatDMM.data.processContinue == True:
self.outputTotalizedBaselineCSVFile = CSV_Writer(self.log,self.csvOutBaselineTotalFilename,method_object.outputBaselineTotalizedFieldsCsv)
self.sendMessage("INFO", "Create Baseline CSV - Totalized Output Handler Ok")
else:
self.sendMessage("INFO","Process Cancel Request By User - Create Baseline CSV - Totalized Output Handler")
return False
# Create Future Totalized Output CSV
if self.boolRetrofittedScenarios == True:
if self.planHeatDMM.data.processContinue == True:
self.outputTotalizedFutureCSVFile = CSV_Writer(self.log,self.csvOutFutureTotalFilename,method_object.outputFutureTotalizedFieldsCsv)
self.sendMessage("INFO", "Create Future CSV - Totalized Output Handler Ok")
else:
self.sendMessage("INFO","Process Cancel Request By User - Create Future CSV - Totalized Output Handler")
return False
# Create Baseline Hourly Output CSV
if self.boolHourlyDetailFile == True:
if self.planHeatDMM.data.processContinue == True:
self.outputHourlyBaselineCSVFile = CSV_Writer(self.log,self.csvOutBaselineHourlyFilename,method_object.outputBaselineHourlyFieldsCsv)
self.sendMessage("INFO", "Create Baseline CSV - Hourly Output Handler Ok")
else:
self.log.write_log("INFO", "Process Cancel Request By User - Create CSV Hourly Output Handler")
return False
# Create Future Hourly Output CSV
if self.boolHourlyDetailFile == True and self.boolRetrofittedScenarios == True:
if self.planHeatDMM.data.processContinue == True:
self.outputHourlyFutureCSVFile = CSV_Writer(self.log,self.csvOutFutureHourlyFilename,method_object.outputFutureHourlyFieldsCsv)
self.sendMessage("INFO", "Create Future CSV - Hourly Output Handler Ok")
else:
self.log.write_log("INFO", "Process Cancel Request By User - Create CSV Hourly Output Handler")
return False
#Read Input Shape file
self.sendMessage("INFO","Reading Input Shape File")
if self.planHeatDMM.data.processContinue == True:
self.inputShpFile.readShapeFile()
self.inputShpFile.createGeometryIndex(self.fieldsSHPJavaPosition)
self.sendMessage("INFO","Read Input Shape File Ok")
else:
self.sendMessage("INFO","Process Cancel Request By User - Read Input Shape File")
return False
# Create Baseline SHP Output
if self.planHeatDMM.data.processContinue == True:
self.outputBaselineSHPFile = Shape_Writer(self.log,self.shapeOutBaselineFilename,method_object.outputBaselineDetailFieldsShape,self.boolAddShapeFields ,self.userFieldShapeMap, self.referentialSpaceWKT)
self.sendMessage("INFO","Create Baseline - Shape File Output Handler")
else:
self.sendMessage("INFO","Process Cancel Request By User - Create Shape File Output Handler")
return False
# Create future SHP Output
if self.boolRetrofittedScenarios == True:
if self.planHeatDMM.data.processContinue == True:
self.outputFutureSHPFile = Shape_Writer(self.log,self.shapeOutFutureFilename,method_object.outputFutureDetailFieldsShape,self.boolAddShapeFields ,self.userFieldShapeMap, self.referentialSpaceWKT)
self.sendMessage("INFO","Create future - Shape File Output Handler")
else:
self.sendMessage("INFO","Process Cancel Request By User - Create Shape File Output Handler")
return False
# Create JSON file for Java Callback
if self.planHeatDMM.data.processContinue == True:
self.jsonArgs4Java(self.log, self.fileJsonArg,self.folderProject,self.shpFilename,self.logFileName,self.lidarDTMFolder,\
self.lidarDSMFolder, self.referentialSpaceEPSG, self.fieldsSHPJavaPosition,self.buildingUseFloorHeightDict)
self.sendMessage("INFO","Create JSON File for Java Process Ok")
else:
self.sendMessage("INFO","Process Cancel Request By User - Create JSON File for Java Process")
return False
# CALL TO JAVA PROCESS
if self.planHeatDMM.data.processContinue == True:
if Config.LAUNCH_JAVA_PROCESS in ("Y","y"):
self.sendMessage("INFO","Call to Java Process")
self.message_update.emit("Running Java Process...",self.planHeatDMM)
self.javaLaunchProcess(self.log,self.javaLog, self.fileJava, self.fileJar,self.fileLib, self.mainJavaClass, self.fileJsonArg)
self.planHeatDMM.resources.javaProcessObject = None
self.sendMessage("INFO","Finish Java Process Ok")
else:
self.sendMessage("INFO","Process Cancel Request By User - Call to Java Process")
return False
#Read CSV in file
if self.planHeatDMM.data.processContinue == True:
self.sendMessage("INFO","Reading java CSV")
data = self.inputCsvFile.csv_read()
self.sendMessage("INFO","Finish CSV Read Ok")
else:
self.sendMessage("INFO","Process Cancel Request By User - Reading CSV file")
return False
self.progress_total.emit(len(data),self.planHeatDMM)
building_list = []
self.sendMessage("INFO","Data Calculate Processing Start")
# Process Data
for i, row in enumerate(data):
if self.planHeatDMM.data.processContinue == True:
building = Building(self.log,self.projectName,self.areaName,self.country_id,row)
self.message_update.emit("Processing data calculation - Building {}/{}".format(i+1,len(data)),self.planHeatDMM)
self.assignBuildingShapeGeometryAndRecord(self.inputShpFile,building)
self.progress_update.emit(i+1,self.planHeatDMM)
building = method_object.calculateConsumption(building)
building_list.append(building)
if self.boolHourlyDetailFile and building.Regstatus and building.Regprocess:
#write rows on CSV file with baseline Hourly per building
self.outputHourlyBaselineCSVFile.writeRowsCSV(building.hourlyBaselineDemandList)
#write rows on CSV file with Future Hourly per building
if self.boolRetrofittedScenarios:
self.outputHourlyFutureCSVFile.writeRowsCSV(building.hourlyFutureDemandList)
building.hourlyBaselineDemandList = []
building.hourlyFutureDemandList = []
else:
self.sendMessage("INFO","Process Cancel Request By User - Data Calculate Processing")
return False
self.progress_update.emit(len(data),self.planHeatDMM)
self.sendMessage("INFO", "Processing data calculation - Building {}/{}".format(len(data),len(data)))
self.sendMessage("INFO", "Free memory reources - CSV input file and Geometry index")
self.freeMemoryResources(self.inputCsvFile,self.inputShpFile.geometryAndRecordBuildingIndex)
#Retrieve totals for selected calculation method
if self.planHeatDMM.data.processContinue == True:
self.sendMessage("INFO", "Calculate Totalized Data.")
method_object.calculateTotalizedConsumptionDemand()
self.sendMessage("INFO", "Calculate Totalized Data Ok")
else:
self.log.write_log("INFO", "Process Cancel Request By User - Calculate Totalized Data")
return False
#Write Baseline Detail CSV file
if self.planHeatDMM.data.processContinue == True:
self.sendMessage("INFO", "Writing Output CSV - Baseline Detail file")
self.outputDetailBaselineCSVFile.writeRowsCSV(building_list)
self.sendMessage("INFO", "Writing Output CSV - Baseline Detail file Ok")
else:
self.sendMessage("INFO", "Process Cancel Request By User - Writing CSV file")
return False
#Write Future Detail CSV file
if self.boolRetrofittedScenarios == True:
if self.planHeatDMM.data.processContinue == True:
self.sendMessage("INFO", "Writing Output CSV - Future Detail file")
self.outputDetailFutureCSVFile.writeRowsCSV(building_list)
self.sendMessage("INFO", "Writing Output CSV - Future Detail file Ok")
else:
self.sendMessage("INFO", "Process Cancel Request By User - Writing CSV file")
return False
#Write Baseline Totalized CSV file
if self.planHeatDMM.data.processContinue == True:
self.sendMessage("INFO", "Writing Output CSV - Baseline Totalized file")
self.outputTotalizedBaselineCSVFile.writeRowsCSV(method_object.baselineTotalizedDemandList)
self.sendMessage("INFO", "Writing Output CSV - Baseline Totalized file Ok")
else:
self.sendMessage("INFO", "Process Cancel Request By User - Writing CSV file")
return False
#Write Future Totalized CSV file
if self.boolRetrofittedScenarios == True:
if self.planHeatDMM.data.processContinue == True:
self.sendMessage("INFO", "Writing Output CSV - Future Totalized file")
self.outputTotalizedFutureCSVFile.writeRowsCSV(method_object.futureTotalizedDemandList)
self.sendMessage("INFO", "Writing Output CSV - Future Totalized file Ok")
else:
self.sendMessage("INFO", "Process Cancel Request By User - Writing CSV file")
return False
self.sendMessage("INFO", "Free memory resources output CSV files")
self.freeMemoryResources(self.outputDetailBaselineCSVFile,self.outputDetailFutureCSVFile,\
self.outputHourlyBaselineCSVFile,self.outputHourlyFutureCSVFile,\
self.outputTotalizedBaselineCSVFile, self.outputTotalizedFutureCSVFile,callGC=False)
self.sendMessage("INFO", "Free memory resources method object data")
self.freeMemoryResources(method_object)
#Populate SHP file - Baseline
if self.planHeatDMM.data.processContinue == True:
self.sendMessage("INFO", "Populate Baseline Output Shape file")
self.outputBaselineSHPFile.populateAll(building_list)
self.sendMessage("INFO", "Populate Baseline Output Shape file Ok")
else:
self.sendMessage("INFO", "Process Cancel Request By User - populate Qgis Files")
return False
# Save QGIS files - Baseline
if self.planHeatDMM.data.processContinue == True:
self.sendMessage("INFO", "Saving Output Baseline Qgis files")
self.outputBaselineSHPFile.saveQgisFiles()
self.sendMessage("INFO", "Saving Output Baseline Qgis files Ok")
else:
self.sendMessage("INFO", "Process Cancel Request By User - Writing Qgis files")
return False
self.sendMessage("INFO", "Free memory resources baseline shape file")
self.freeMemoryResources(self.outputBaselineSHPFile)
if self.boolRetrofittedScenarios == True:
#Populate SHP file - Future
if self.planHeatDMM.data.processContinue == True:
self.sendMessage("INFO", "Populate Future Output Shape file")
self.outputFutureSHPFile.populateAll(building_list)
self.sendMessage("INFO", "Populate Future Output Shape file Ok")
else:
self.sendMessage("INFO", "Process Cancel Request By User - populate Qgis Files")
return False
# Save QGIS files - Future
if self.planHeatDMM.data.processContinue == True:
self.sendMessage("INFO", "Saving Output Future Qgis files")
self.outputFutureSHPFile.saveQgisFiles()
self.sendMessage("INFO", "Saving Output Future Qgis files Ok")
else:
self.sendMessage("INFO", "Process Cancel Request By User - Writing Qgis files")
return False
self.sendMessage("INFO", "Free memory resources future shape file")
self.freeMemoryResources(self.outputFutureSHPFile, callGC=False)
self.sendMessage("INFO", "Free memory resources building list data")
self.freeMemoryResources(building_list)
self.total, self.ok, self.error, self.skip = showResults(building_list)
result = "Processed:{} buildings - Ok:{} - Error:{} - Skipped:{}".format(self.total,self.ok, self.error,self.skip)
self.sendMessage("INFO", result)
self.log.write_log("INFO", "Simplified Proccess End")
return True
self.exec_()
except Exception as e:
self.log.write_log("ERROR ", "Thread process - Unexpected error:" + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
self.showMessageDialog.emit("CRITICAL","ERROR", "process Unexpected error:" + str(e),self.planHeatDMM)
return False
def freeMemoryResources(self,*args,callGC=True,**kwargs):
try:
for arg in args:
#print("free", asizeof.asizeof(arg),str(arg))
del arg
if callGC:
gc.collect()
except:
self.log.write_log("ERROR ","freeMemoryResources Unexpected error:" + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
def assignBuildingShapeGeometryAndRecord(self,shpReader,building):
try:
position = shpReader.geometryAndRecordBuildingIndex[str(building.reference)]
building.shpGeometryData = shpReader.geometryAndRecordBuilding[position].shape
building.shpRecordData = shpReader.geometryAndRecordBuilding[position].record
except KeyError:
self.log.write_log("ERROR ","assignBuildingShapeGeometryAndRecord Not Exists building reference in shapefile id:" + str(building.reference))
building.Regstatus = False
except:
self.log.write_log("ERROR ","assignBuildingShapeGeometryAndRecord Unexpected error:" + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
building.Regstatus = False
def sendMessage(self,level,message):
try:
self.log.write_log(level, message)
self.message_update.emit(message,self.planHeatDMM)
except Exception as e:
self.log.write_log("ERROR ", "process Unexpected error:" + str(e))
self.showMessageDialog.emit("ERROR", "process Unexpected error:" + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]),self.planHeatDMM)
def jsonArgs4Java(self,log,fileArg,folderProject,shpFilename,logFileName,lidarDTMFolder,lidarDSMFolder,referentialSpace,fieldMapping,buildingUseFloorHeightDict):
try:
with open(fileArg, "w") as jsonFile:
jsonPythonJava = OrderedDict()
jsonPythonJava["fieldMapping"]=fieldMapping
jsonPythonJava["logFileName"] = logFileName
jsonPythonJava["lidarDTMFolder"] = lidarDTMFolder
jsonPythonJava["lidarDSMFolder"] = lidarDSMFolder
jsonPythonJava["shpFilename"] = shpFilename
jsonPythonJava["referentialSpace"] = referentialSpace
jsonPythonJava["folderProject"] = folderProject
jsonPythonJava["floorHeight"] = buildingUseFloorHeightDict
jsonFile.write(dumps(jsonPythonJava))
log.write_log("INFO", "Write JSON Args File")
except:
log.write_log("ERROR", "jsonArgs4Java unexpected error: " + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
raise
def javaLaunchProcess(self,log,javaLog, fileJava, fileJar,fileLib, mainJavaClass,fileJsonArg):
try:
javaConsoleOutput = ""
run_process = ""
if os.path.isfile(fileJar):
if platform.system() == 'Windows':
#Add command Windows, to do not visible CMD
try:
run_test_process = fileJava + 'java -version '
CREATE_NO_WINDOW = 0x08000000
java_process = subprocess.Popen(run_test_process,shell=False,stdout=subprocess.DEVNULL,stderr=subprocess.DEVNULL,creationflags = CREATE_NO_WINDOW)
WindowMode = CREATE_NO_WINDOW
except Exception as e:
print("excepcion " + str(e))
WindowMode = subprocess.SW_HIDE
#run_process = fileJava + 'java -cp "' + fileJar + ';' + fileLib + '" ' + mainJavaClass + ' ' + fileJsonArg
else:
WindowMode=0
foutput = open(javaLog, "w")
run_process = fileJava + 'java -XX:+UseG1GC -Xms1g -Xmx4g -cp "' + fileJar + ';' + fileLib + '" ' + mainJavaClass + ' ' + fileJsonArg
#self.planHeatDMM.resources.javaProcessObject = subprocess.run(run_process,check=True,shell=False,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
#java_process = subprocess.Popen(run_process,shell=False,stdout=subprocess.DEVNULL,stderr=subprocess.DEVNULL,creationflags = WindowMode)
java_process = subprocess.Popen(run_process,shell=False,stdout=foutput,stderr=foutput,creationflags = WindowMode)
self.planHeatDMM.resources.javaProcessObject = java_process
log.write_log("INFO","Java execute command " + str(java_process.args))
returnCode = java_process.wait()
foutput.close()
if returnCode and self.planHeatDMM.data.processContinue is not False:
#Process Error
raise JavaProcessException("Error on Java Process , exit status code:{:d}".format(returnCode))
else:
raise NotFoundResourceException("jar file not found at location " + fileJar)
except JavaProcessException:
log.write_log("ERROR","Execute error " + run_process)
log.write_log("ERROR", "javaLaunchProcess JavaProcessException JAVA error: " + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
raise
except subprocess.CalledProcessError as e:
javaConsoleOutput = str(e.stdout, 'utf-8', errors='ignore')
log.write_log("ERROR","Java Console Output " + javaConsoleOutput)
log.write_log("ERROR","Execute error " + run_process)
log.write_log("ERROR", "javaLaunchProcess CalledProcessError JAVA error: " + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
raise
except OSError as e:
javaConsoleOutput = str(e)
log.write_log("ERROR","Java Console Output " + javaConsoleOutput)
log.write_log("ERROR","Execute error " + run_process)
log.write_log("ERROR", "javaLaunchProcess OSError JAVA error: " + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
raise
except Exception as e:
log.write_log("ERROR", "javaLaunchProcess launching new process JAVA Unexpected error: " + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
raise
def initizalizeLogExecuteOptions(self,file_handler):
try:
if Config.OPTIONS_FILE.lower() == "y":
with open(file_handler, "w") as optionsFile:
pass
except Exception as e:
self.log.write_log("ERROR", "initizalizeLogExecuteOptions Unexpected error: " + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
def writeLogExecuteOptions(self,file_handler, message):
try:
if Config.OPTIONS_FILE.lower() == "y":
with open(file_handler, "a") as optionsFile:
optionsFile.write(message + "\n")
except Exception as e:
self.log.write_log("ERROR", "writeLogExecuteOptions Unexpected error: " + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
class ClockThread(QThread):
""" Interface Clock for worker thread """
clock_refresh = QtCore.pyqtSignal(int,object,name='clock_refresh')
def __init__(self,planHeatDMM):
QThread.__init__(self)
try:
self.planHeatDMM = planHeatDMM
self.timeStart = None
self.timer = None
except Exception as e:
self.planHeatDMM.resources.log.write_log("ERROR", "ClockThread Unexpected error:" + str(e))
def run(self):
try:
self.timer = QtCore.QTimer()
self.timeStart = time.time()
self.timer.timeout.connect(lambda:self.timeClock())
self.timer.setInterval(1000)
self.timer.start()
self.exec_()
except Exception as e:
self.planHeatDMM.resources.log.write_log("ERROR", "ClockThread Unexpected error:" + str(e))
def timeClock(self):
try:
value = int(time.time() - self.timeStart)
self.clock_refresh.emit(value,self.planHeatDMM)
except Exception:
pass
| 52.392357
| 218
| 0.597039
|
7701ead48e65a3fbaab532b2fc7bc9bd059aa6eb
| 6,777
|
py
|
Python
|
tests/cli/test_projects.py
|
veger/jexia-cli
|
d7652030292a0276659cc0710b2bf4861c8ba568
|
[
"MIT"
] | 6
|
2020-02-03T11:02:29.000Z
|
2020-04-02T19:16:13.000Z
|
tests/cli/test_projects.py
|
veger/jexia-cli
|
d7652030292a0276659cc0710b2bf4861c8ba568
|
[
"MIT"
] | 3
|
2020-04-02T12:42:08.000Z
|
2021-06-10T23:51:13.000Z
|
tests/cli/test_projects.py
|
veger/jexia-cli
|
d7652030292a0276659cc0710b2bf4861c8ba568
|
[
"MIT"
] | 2
|
2020-02-05T09:05:32.000Z
|
2020-12-04T13:22:48.000Z
|
import mock
import pytest
from jexia_cli.formatters import format_timestamp_to_utc
from tests.cli import run_cmd, SHELL_CONFIG
CREATED_AT = 1580208898
PROJECT_LIST_RESP = {
'accounts': [{
'account_owner_id': '3054b850-a1d9-4860-8b4e-2b63b7322907',
'plan_id': 'cbaeb217-aadf-41c3-87c9-8b8bb0a29969',
'plan_name': 'Hobby',
'plan_amount': 0,
'is_free_plan': True
}],
'max_number_free': '3',
'projects': [{
'id': '3054b850-a1d9-4860-8b4e-2b63b7322907',
'owner': '284e31e6-b21b-418f-b19e-21d1c741db63',
'is_owner': True,
'name': 'integration',
'description': '<nil>',
'created_at': CREATED_AT
}]
}
PROJECT_CREATE_RESP = {
'is_owner': 'true',
'description': '<nil>',
'created_at': '%s' % CREATED_AT,
'owner': '284e31e6-b21b-418f-b19e-21d1c741db63',
'id': '5f0c9f45-cbd8-4054-8158-b64c39fb8be9',
'name': 'integration'
}
PROJECT_SHOW_RESP = {
'collaborators': None,
'is_owner': 'true',
'name': 'integration',
'created_at': '%s' % CREATED_AT,
'owner': '284e31e6-b21b-418f-b19e-21d1c741db63',
'id': '5f0c9f45-cbd8-4054-8158-b64c39fb8be9',
'description': '<nil>'
}
@mock.patch('jexia_sdk.http.HTTPClient.request',
return_value=PROJECT_LIST_RESP)
@mock.patch('jexia_sdk.http.HTTPClient.auth_management')
def test_project_list(mock_auth, mock_req):
cur_projects = run_cmd(['project list', '-f=json'])
mock_req.assert_called_once_with(
limit=10, method='GET', page=0, url='/projects')
resp = [
{
"id": "3054b850-a1d9-4860-8b4e-2b63b7322907",
"name": "integration",
"description": "",
"created_at": format_timestamp_to_utc(CREATED_AT)
}
]
assert cur_projects == resp
@mock.patch('jexia_sdk.http.HTTPClient.request',
return_value={'projects': []})
@mock.patch('jexia_sdk.http.HTTPClient.auth_management')
def test_project_list_options(mock_auth, mock_req):
cur_projects = run_cmd(['project list', '-f=json', '--limit=100',
'--page=20'])
mock_req.assert_called_once_with(
limit=100, method='GET', page=20, url='/projects')
assert cur_projects == []
@mock.patch('jexia_sdk.http.HTTPClient.request',
return_value=PROJECT_CREATE_RESP)
@mock.patch('jexia_sdk.http.HTTPClient.auth_management')
def test_project_create(mock_auth, mock_req):
res = run_cmd(['project create', '-f=json', '--name=test'])
mock_req.assert_called_once_with(
method='POST', url='/project',
data={'name': 'test', 'description': None})
resp = {
'id': '5f0c9f45-cbd8-4054-8158-b64c39fb8be9',
'name': 'integration',
'description': '',
'created_at': format_timestamp_to_utc(CREATED_AT)
}
assert res == resp
@mock.patch('jexia_sdk.http.HTTPClient.request', return_value=[])
@mock.patch('jexia_sdk.http.HTTPClient.auth_management')
def test_project_create_options(mock_auth, mock_req):
run_cmd(['project create', '-f=json', '--name=test',
'--description=descr'])
mock_req.assert_called_once_with(
method='POST', url='/project',
data={'name': 'test', 'description': 'descr'})
@mock.patch('jexia_sdk.http.HTTPClient.request', return_value=[])
@mock.patch('jexia_sdk.http.HTTPClient.auth_management')
def test_project_create_options_fail(mock_auth, mock_req):
with pytest.raises(SystemExit):
run_cmd(['project create'])
@mock.patch('jexia_sdk.http.HTTPClient.request',
return_value=PROJECT_SHOW_RESP)
@mock.patch('jexia_sdk.http.HTTPClient.auth_management')
def test_project_show(mock_auth, mock_req):
res = run_cmd(['project show', '-f=json', 'test'])
mock_req.assert_called_once_with(
method='GET', url='/project/test')
resp = {
'id': '5f0c9f45-cbd8-4054-8158-b64c39fb8be9',
'name': 'integration',
'description': '',
'created_at': format_timestamp_to_utc(CREATED_AT),
'collaborators': None
}
assert res == resp
@mock.patch('jexia_sdk.http.HTTPClient.request', return_value=[])
@mock.patch('jexia_sdk.http.HTTPClient.auth_management')
def test_project_show_options_fail(mock_auth, mock_req):
with pytest.raises(SystemExit):
run_cmd(['project show'], json_output=False)
@mock.patch('jexia_sdk.http.HTTPClient.request', return_value='')
@mock.patch('jexia_sdk.http.HTTPClient.auth_management')
def test_project_delete(mock_auth, mock_req):
res = run_cmd(['project delete', 'test', '--yes-i-really-want-to-delete'],
json_output=False)
mock_req.assert_called_once_with(
method='DELETE', url='/project/test',
data={'password': SHELL_CONFIG['password']})
assert res == ''
@mock.patch('jexia_sdk.http.HTTPClient.request', return_value='')
@mock.patch('jexia_sdk.http.HTTPClient.auth_management')
def test_project_delete_options_fail(mock_auth, mock_req):
with pytest.raises(SystemExit):
run_cmd(['project delete'], json_output=False)
@pytest.fixture()
def integration_teardown():
yield
if _CREATED_RESOURCE:
run_cmd(['project delete', _CREATED_RESOURCE,
'--yes-i-really-want-to-delete'])
@pytest.mark.integration
@pytest.mark.skip(
reason="This test skipped because no way to delete project")
def test_projects_integration(integration_teardown):
global _CREATED_RESOURCE
# get current projects
cur_projects = run_cmd(['project list', '-f=json'])
# create new project
new_project = run_cmd(['project create',
'-f=json',
'--name=integration'])
_CREATED_RESOURCE = new_project.get('id')
assert 'id' in new_project
assert 'name' in new_project
assert 'description' in new_project
assert 'created_at' in new_project
assert 'integration' == new_project['name']
# get project
project = run_cmd(['project show',
'-f=json',
'%s' % new_project['id']])
assert 'id' in project
assert 'name' in project
assert 'description' in project
assert 'created_at' in project
assert 'collaborators' in project
assert new_project['id'] == project['id']
# check number of projects
projects = run_cmd(['project list', '-f=json'])
assert len(cur_projects) + 1 == len(projects)
# delete project
output = run_cmd(['project delete',
'%s' % new_project['id'],
'--yes-i-really-want-to-delete'], json_output=False)
assert '' == output
# check number of projects
projects = run_cmd(['project list', '-f=json'])
assert len(cur_projects) == len(projects)
| 34.401015
| 78
| 0.649845
|
35d634568fbcfa5e6d4277017a67f008af3c9246
| 3,333
|
py
|
Python
|
tests/test_helpers/test_acn/test_agent_record.py
|
valory-xyz/agents-aea
|
8f38efa96041b0156ed1ae328178e395dbabf2fc
|
[
"Apache-2.0"
] | 28
|
2021-10-31T18:54:14.000Z
|
2022-03-17T13:10:43.000Z
|
tests/test_helpers/test_acn/test_agent_record.py
|
valory-xyz/agents-aea
|
8f38efa96041b0156ed1ae328178e395dbabf2fc
|
[
"Apache-2.0"
] | 66
|
2021-10-31T11:55:48.000Z
|
2022-03-31T06:26:23.000Z
|
tests/test_helpers/test_acn/test_agent_record.py
|
valory-xyz/agents-aea
|
8f38efa96041b0156ed1ae328178e395dbabf2fc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2022 Valory AG
# Copyright 2018-2021 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests for acn helper module."""
import pytest
from aea.configurations.constants import DEFAULT_LEDGER
from aea.crypto.registries import make_crypto
from aea.helpers.acn.agent_record import AgentRecord
from aea.helpers.base import CertRequest
from tests.conftest import _process_cert
def test_agent_record(change_directory):
"""Test signature and public key proper retrieval from a CertRequest"""
agent_key_1 = make_crypto(DEFAULT_LEDGER)
agent_key_2 = make_crypto(DEFAULT_LEDGER)
peer_public_key_1 = make_crypto(DEFAULT_LEDGER).public_key
peer_public_key_2 = make_crypto(DEFAULT_LEDGER).public_key
cert_path = "test_acn_cert.txt"
cert = CertRequest(
peer_public_key_1,
"test_service",
DEFAULT_LEDGER,
"2021-01-01",
"2022-01-01",
"{public_key}",
cert_path,
)
_process_cert(agent_key_1, cert, change_directory)
# success
agent_record = AgentRecord.from_cert_request(
cert, agent_key_1.address, peer_public_key_1
)
assert (
agent_record.address == agent_key_1.address
and agent_record.public_key == agent_key_1.public_key
and agent_record.representative_public_key == peer_public_key_1
and agent_record.signature == cert.get_signature()
and agent_record.message == cert.get_message(peer_public_key_1)
)
# success
agent_record = AgentRecord(
agent_key_1.address,
peer_public_key_1,
cert.identifier,
cert.ledger_id,
cert.not_before,
cert.not_after,
cert.message_format,
cert.get_signature(),
)
assert (
agent_record.address == agent_key_1.address
and agent_record.public_key == agent_key_1.public_key
and agent_record.representative_public_key == peer_public_key_1
and agent_record.signature == cert.get_signature()
and agent_record.message == cert.get_message(peer_public_key_1)
)
# error: wrong signer
with pytest.raises(
ValueError,
match="Invalid signature for provided representative_public_key and agent address!",
):
AgentRecord.from_cert_request(cert, agent_key_2.address, peer_public_key_1)
# error: wrong signer
with pytest.raises(
ValueError,
match="Invalid signature for provided representative_public_key and agent address!",
):
AgentRecord.from_cert_request(cert, agent_key_1.address, peer_public_key_2)
| 34.360825
| 92
| 0.676568
|
678a7fe7e189a5e744c78f006156fe9521aadcdb
| 55,069
|
py
|
Python
|
REDSI_1160929_1161573/boost_1_67_0/tools/build/src/build/project.py
|
Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo
|
eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8
|
[
"MIT"
] | null | null | null |
REDSI_1160929_1161573/boost_1_67_0/tools/build/src/build/project.py
|
Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo
|
eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8
|
[
"MIT"
] | null | null | null |
REDSI_1160929_1161573/boost_1_67_0/tools/build/src/build/project.py
|
Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo
|
eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8
|
[
"MIT"
] | 1
|
2019-03-08T11:06:22.000Z
|
2019-03-08T11:06:22.000Z
|
# Status: ported.
# Base revision: 64488
# Copyright 2002, 2003 Dave Abrahams
# Copyright 2002, 2005, 2006 Rene Rivera
# Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Implements project representation and loading. Each project is represented
# by:
# - a module where all the Jamfile content live.
# - an instance of 'project-attributes' class.
# (given a module name, can be obtained using the 'attributes' rule)
# - an instance of 'project-target' class (from targets.jam)
# (given a module name, can be obtained using the 'target' rule)
#
# Typically, projects are created as result of loading a Jamfile, which is done
# by rules 'load' and 'initialize', below. First, module for Jamfile is loaded
# and new project-attributes instance is created. Some rules necessary for
# project are added to the module (see 'project-rules' module) at the bottom of
# this file. Default project attributes are set (inheriting attributes of
# parent project, if it exists). After that the Jamfile is read. It can declare
# its own attributes using the 'project' rule which will be combined with any
# already set attributes.
#
# The 'project' rule can also declare a project id which will be associated
# with the project module.
#
# There can also be 'standalone' projects. They are created by calling
# 'initialize' on an arbitrary module and not specifying their location. After
# the call, the module can call the 'project' rule, declare main targets and
# behave as a regular project except that, since it is not associated with any
# location, it should only declare prebuilt targets.
#
# The list of all loaded Jamfiles is stored in the .project-locations variable.
# It is possible to obtain a module name for a location using the 'module-name'
# rule. Standalone projects are not recorded and can only be references using
# their project id.
import b2.util.path
import b2.build.targets
from b2.build import property_set, property
from b2.build.errors import ExceptionWithUserContext
from b2.manager import get_manager
import bjam
import b2
import re
import sys
import pkgutil
import os
import string
import imp
import traceback
import b2.util.option as option
from b2.util import (
record_jam_to_value_mapping, qualify_jam_action, is_iterable_typed, bjam_signature,
is_iterable)
class ProjectRegistry:
def __init__(self, manager, global_build_dir):
self.manager = manager
self.global_build_dir = global_build_dir
self.project_rules_ = ProjectRules(self)
# The target corresponding to the project being loaded now
self.current_project = None
# The set of names of loaded project modules
self.jamfile_modules = {}
# Mapping from location to module name
self.location2module = {}
# Mapping from project id to project module
self.id2module = {}
# Map from Jamfile directory to parent Jamfile/Jamroot
# location.
self.dir2parent_jamfile = {}
# Map from directory to the name of Jamfile in
# that directory (or None).
self.dir2jamfile = {}
# Map from project module to attributes object.
self.module2attributes = {}
# Map from project module to target for the project
self.module2target = {}
# Map from names to Python modules, for modules loaded
# via 'using' and 'import' rules in Jamfiles.
self.loaded_tool_modules_ = {}
self.loaded_tool_module_path_ = {}
# Map from project target to the list of
# (id,location) pairs corresponding to all 'use-project'
# invocations.
# TODO: should not have a global map, keep this
# in ProjectTarget.
self.used_projects = {}
self.saved_current_project = []
self.JAMROOT = self.manager.getenv("JAMROOT");
# Note the use of character groups, as opposed to listing
# 'Jamroot' and 'jamroot'. With the latter, we'd get duplicate
# matches on windows and would have to eliminate duplicates.
if not self.JAMROOT:
self.JAMROOT = ["project-root.jam", "[Jj]amroot", "[Jj]amroot.jam"]
# Default patterns to search for the Jamfiles to use for build
# declarations.
self.JAMFILE = self.manager.getenv("JAMFILE")
if not self.JAMFILE:
self.JAMFILE = ["[Bb]uild.jam", "[Jj]amfile.v2", "[Jj]amfile",
"[Jj]amfile.jam"]
self.__python_module_cache = {}
def load (self, jamfile_location):
"""Loads jamfile at the given location. After loading, project global
file and jamfile needed by the loaded one will be loaded recursively.
If the jamfile at that location is loaded already, does nothing.
Returns the project module for the Jamfile."""
assert isinstance(jamfile_location, basestring)
absolute = os.path.join(os.getcwd(), jamfile_location)
absolute = os.path.normpath(absolute)
jamfile_location = b2.util.path.relpath(os.getcwd(), absolute)
mname = self.module_name(jamfile_location)
# If Jamfile is already loaded, do not try again.
if not mname in self.jamfile_modules:
if "--debug-loading" in self.manager.argv():
print "Loading Jamfile at '%s'" % jamfile_location
self.load_jamfile(jamfile_location, mname)
# We want to make sure that child project are loaded only
# after parent projects. In particular, because parent projects
# define attributes which are inherited by children, and we do not
# want children to be loaded before parents has defined everything.
#
# While "build-project" and "use-project" can potentially refer
# to child projects from parent projects, we do not immediately
# load child projects when seing those attributes. Instead,
# we record the minimal information that will be used only later.
self.load_used_projects(mname)
return mname
def load_used_projects(self, module_name):
assert isinstance(module_name, basestring)
# local used = [ modules.peek $(module-name) : .used-projects ] ;
used = self.used_projects[module_name]
location = self.attribute(module_name, "location")
for u in used:
id = u[0]
where = u[1]
self.use(id, os.path.join(location, where))
def load_parent(self, location):
"""Loads parent of Jamfile at 'location'.
Issues an error if nothing is found."""
assert isinstance(location, basestring)
found = b2.util.path.glob_in_parents(
location, self.JAMROOT + self.JAMFILE)
if not found:
print "error: Could not find parent for project at '%s'" % location
print "error: Did not find Jamfile.jam or Jamroot.jam in any parent directory."
sys.exit(1)
return self.load(os.path.dirname(found[0]))
def find(self, name, current_location):
"""Given 'name' which can be project-id or plain directory name,
return project module corresponding to that id or directory.
Returns nothing of project is not found."""
assert isinstance(name, basestring)
assert isinstance(current_location, basestring)
project_module = None
# Try interpreting name as project id.
if name[0] == '/':
project_module = self.id2module.get(name)
if not project_module:
location = os.path.join(current_location, name)
# If no project is registered for the given location, try to
# load it. First see if we have Jamfile. If not we might have project
# root, willing to act as Jamfile. In that case, project-root
# must be placed in the directory referred by id.
project_module = self.module_name(location)
if not project_module in self.jamfile_modules:
if b2.util.path.glob([location], self.JAMROOT + self.JAMFILE):
project_module = self.load(location)
else:
project_module = None
return project_module
def module_name(self, jamfile_location):
"""Returns the name of module corresponding to 'jamfile-location'.
If no module corresponds to location yet, associates default
module name with that location."""
assert isinstance(jamfile_location, basestring)
module = self.location2module.get(jamfile_location)
if not module:
# Root the path, so that locations are always umbiguious.
# Without this, we can't decide if '../../exe/program1' and '.'
# are the same paths, or not.
jamfile_location = os.path.realpath(
os.path.join(os.getcwd(), jamfile_location))
module = "Jamfile<%s>" % jamfile_location
self.location2module[jamfile_location] = module
return module
def find_jamfile (self, dir, parent_root=0, no_errors=0):
"""Find the Jamfile at the given location. This returns the
exact names of all the Jamfiles in the given directory. The optional
parent-root argument causes this to search not the given directory
but the ones above it up to the directory given in it."""
assert isinstance(dir, basestring)
assert isinstance(parent_root, (int, bool))
assert isinstance(no_errors, (int, bool))
# Glob for all the possible Jamfiles according to the match pattern.
#
jamfile_glob = None
if parent_root:
parent = self.dir2parent_jamfile.get(dir)
if not parent:
parent = b2.util.path.glob_in_parents(dir,
self.JAMFILE)
self.dir2parent_jamfile[dir] = parent
jamfile_glob = parent
else:
jamfile = self.dir2jamfile.get(dir)
if not jamfile:
jamfile = b2.util.path.glob([dir], self.JAMFILE)
self.dir2jamfile[dir] = jamfile
jamfile_glob = jamfile
if len(jamfile_glob) > 1:
# Multiple Jamfiles found in the same place. Warn about this.
# And ensure we use only one of them.
# As a temporary convenience measure, if there's Jamfile.v2 amount
# found files, suppress the warning and use it.
#
pattern = "(.*[Jj]amfile\\.v2)|(.*[Bb]uild\\.jam)"
v2_jamfiles = [x for x in jamfile_glob if re.match(pattern, x)]
if len(v2_jamfiles) == 1:
jamfile_glob = v2_jamfiles
else:
print """warning: Found multiple Jamfiles at '%s'!""" % (dir)
for j in jamfile_glob:
print " -", j
print "Loading the first one"
# Could not find it, error.
if not no_errors and not jamfile_glob:
self.manager.errors()(
"""Unable to load Jamfile.
Could not find a Jamfile in directory '%s'
Attempted to find it with pattern '%s'.
Please consult the documentation at 'http://boost.org/boost-build2'."""
% (dir, string.join(self.JAMFILE)))
if jamfile_glob:
return jamfile_glob[0]
def load_jamfile(self, dir, jamfile_module):
"""Load a Jamfile at the given directory. Returns nothing.
Will attempt to load the file as indicated by the JAMFILE patterns.
Effect of calling this rule twice with the same 'dir' is underfined."""
assert isinstance(dir, basestring)
assert isinstance(jamfile_module, basestring)
# See if the Jamfile is where it should be.
is_jamroot = False
jamfile_to_load = b2.util.path.glob([dir], self.JAMROOT)
if jamfile_to_load:
if len(jamfile_to_load) > 1:
get_manager().errors()(
"Multiple Jamfiles found at '{}'\n"
"Filenames are: {}"
.format(dir, ' '.join(os.path.basename(j) for j in jamfile_to_load))
)
is_jamroot = True
jamfile_to_load = jamfile_to_load[0]
else:
jamfile_to_load = self.find_jamfile(dir)
dir = os.path.dirname(jamfile_to_load)
if not dir:
dir = "."
self.used_projects[jamfile_module] = []
# Now load the Jamfile in it's own context.
# The call to 'initialize' may load parent Jamfile, which might have
# 'use-project' statement that causes a second attempt to load the
# same project we're loading now. Checking inside .jamfile-modules
# prevents that second attempt from messing up.
if not jamfile_module in self.jamfile_modules:
previous_project = self.current_project
# Initialize the jamfile module before loading.
self.initialize(jamfile_module, dir, os.path.basename(jamfile_to_load))
if not jamfile_module in self.jamfile_modules:
saved_project = self.current_project
self.jamfile_modules[jamfile_module] = True
bjam.call("load", jamfile_module, jamfile_to_load)
if is_jamroot:
jamfile = self.find_jamfile(dir, no_errors=True)
if jamfile:
bjam.call("load", jamfile_module, jamfile)
# Now do some checks
if self.current_project != saved_project:
from textwrap import dedent
self.manager.errors()(dedent(
"""
The value of the .current-project variable has magically changed
after loading a Jamfile. This means some of the targets might be
defined a the wrong project.
after loading %s
expected value %s
actual value %s
"""
% (jamfile_module, saved_project, self.current_project)
))
self.end_load(previous_project)
if self.global_build_dir:
id = self.attributeDefault(jamfile_module, "id", None)
project_root = self.attribute(jamfile_module, "project-root")
location = self.attribute(jamfile_module, "location")
if location and project_root == dir:
# This is Jamroot
if not id:
# FIXME: go via errors module, so that contexts are
# shown?
print "warning: the --build-dir option was specified"
print "warning: but Jamroot at '%s'" % dir
print "warning: specified no project id"
print "warning: the --build-dir option will be ignored"
def end_load(self, previous_project=None):
if not self.current_project:
self.manager.errors()(
'Ending project loading requested when there was no project currently '
'being loaded.'
)
if not previous_project and self.saved_current_project:
self.manager.errors()(
'Ending project loading requested with no "previous project" when there '
'other projects still being loaded recursively.'
)
self.current_project = previous_project
def load_standalone(self, jamfile_module, file):
"""Loads 'file' as standalone project that has no location
associated with it. This is mostly useful for user-config.jam,
which should be able to define targets, but although it has
some location in filesystem, we do not want any build to
happen in user's HOME, for example.
The caller is required to never call this method twice on
the same file.
"""
assert isinstance(jamfile_module, basestring)
assert isinstance(file, basestring)
self.used_projects[jamfile_module] = []
bjam.call("load", jamfile_module, file)
self.load_used_projects(jamfile_module)
def is_jamroot(self, basename):
assert isinstance(basename, basestring)
match = [ pat for pat in self.JAMROOT if re.match(pat, basename)]
if match:
return 1
else:
return 0
def initialize(self, module_name, location=None, basename=None, standalone_path=''):
"""Initialize the module for a project.
module-name is the name of the project module.
location is the location (directory) of the project to initialize.
If not specified, standalone project will be initialized
standalone_path is the path to the source-location.
this should only be called from the python side.
"""
assert isinstance(module_name, basestring)
assert isinstance(location, basestring) or location is None
assert isinstance(basename, basestring) or basename is None
jamroot = False
parent_module = None
if module_name == "test-config":
# No parent
pass
elif module_name == "site-config":
parent_module = "test-config"
elif module_name == "user-config":
parent_module = "site-config"
elif module_name == "project-config":
parent_module = "user-config"
elif location and not self.is_jamroot(basename):
# We search for parent/project-root only if jamfile was specified
# --- i.e
# if the project is not standalone.
parent_module = self.load_parent(location)
elif location:
# It's either jamroot, or standalone project.
# If it's jamroot, inherit from user-config.
# If project-config module exist, inherit from it.
parent_module = 'user-config'
if 'project-config' in self.module2attributes:
parent_module = 'project-config'
jamroot = True
# TODO: need to consider if standalone projects can do anything but defining
# prebuilt targets. If so, we need to give more sensible "location", so that
# source paths are correct.
if not location:
location = ""
# the call to load_parent() above can end up loading this module again
# make sure we don't reinitialize the module's attributes
if module_name not in self.module2attributes:
if "--debug-loading" in self.manager.argv():
print "Initializing project '%s'" % module_name
attributes = ProjectAttributes(self.manager, location, module_name)
self.module2attributes[module_name] = attributes
python_standalone = False
if location:
attributes.set("source-location", [location], exact=1)
elif not module_name in ["test-config", "site-config", "user-config", "project-config"]:
# This is a standalone project with known location. Set source location
# so that it can declare targets. This is intended so that you can put
# a .jam file in your sources and use it via 'using'. Standard modules
# (in 'tools' subdir) may not assume source dir is set.
source_location = standalone_path
if not source_location:
source_location = self.loaded_tool_module_path_.get(module_name)
if not source_location:
self.manager.errors()('Standalone module path not found for "{}"'
.format(module_name))
attributes.set("source-location", [source_location], exact=1)
python_standalone = True
attributes.set("requirements", property_set.empty(), exact=True)
attributes.set("usage-requirements", property_set.empty(), exact=True)
attributes.set("default-build", property_set.empty(), exact=True)
attributes.set("projects-to-build", [], exact=True)
attributes.set("project-root", None, exact=True)
attributes.set("build-dir", None, exact=True)
self.project_rules_.init_project(module_name, python_standalone)
if parent_module:
self.inherit_attributes(module_name, parent_module)
attributes.set("parent-module", parent_module, exact=1)
if jamroot:
attributes.set("project-root", location, exact=1)
parent = None
if parent_module:
parent = self.target(parent_module)
if module_name not in self.module2target:
target = b2.build.targets.ProjectTarget(self.manager,
module_name, module_name, parent,
self.attribute(module_name, "requirements"),
# FIXME: why we need to pass this? It's not
# passed in jam code.
self.attribute(module_name, "default-build"))
self.module2target[module_name] = target
self.current_project = self.target(module_name)
def inherit_attributes(self, project_module, parent_module):
"""Make 'project-module' inherit attributes of project
root and parent module."""
assert isinstance(project_module, basestring)
assert isinstance(parent_module, basestring)
attributes = self.module2attributes[project_module]
pattributes = self.module2attributes[parent_module]
# Parent module might be locationless user-config.
# FIXME:
#if [ modules.binding $(parent-module) ]
#{
# $(attributes).set parent : [ path.parent
# [ path.make [ modules.binding $(parent-module) ] ] ] ;
# }
attributes.set("project-root", pattributes.get("project-root"), exact=True)
attributes.set("default-build", pattributes.get("default-build"), exact=True)
attributes.set("requirements", pattributes.get("requirements"), exact=True)
attributes.set("usage-requirements",
pattributes.get("usage-requirements"), exact=1)
parent_build_dir = pattributes.get("build-dir")
if parent_build_dir:
# Have to compute relative path from parent dir to our dir
# Convert both paths to absolute, since we cannot
# find relative path from ".." to "."
location = attributes.get("location")
parent_location = pattributes.get("location")
our_dir = os.path.join(os.getcwd(), location)
parent_dir = os.path.join(os.getcwd(), parent_location)
build_dir = os.path.join(parent_build_dir,
os.path.relpath(our_dir, parent_dir))
attributes.set("build-dir", build_dir, exact=True)
def register_id(self, id, module):
"""Associate the given id with the given project module."""
assert isinstance(id, basestring)
assert isinstance(module, basestring)
self.id2module[id] = module
def current(self):
"""Returns the project which is currently being loaded."""
if not self.current_project:
get_manager().errors()(
'Reference to the project currently being loaded requested '
'when there was no project module being loaded.'
)
return self.current_project
def set_current(self, c):
if __debug__:
from .targets import ProjectTarget
assert isinstance(c, ProjectTarget)
self.current_project = c
def push_current(self, project):
"""Temporary changes the current project to 'project'. Should
be followed by 'pop-current'."""
if __debug__:
from .targets import ProjectTarget
assert isinstance(project, ProjectTarget)
self.saved_current_project.append(self.current_project)
self.current_project = project
def pop_current(self):
if self.saved_current_project:
self.current_project = self.saved_current_project.pop()
else:
self.current_project = None
def attributes(self, project):
"""Returns the project-attribute instance for the
specified jamfile module."""
assert isinstance(project, basestring)
return self.module2attributes[project]
def attribute(self, project, attribute):
"""Returns the value of the specified attribute in the
specified jamfile module."""
assert isinstance(project, basestring)
assert isinstance(attribute, basestring)
try:
return self.module2attributes[project].get(attribute)
except:
raise BaseException("No attribute '%s' for project %s" % (attribute, project))
def attributeDefault(self, project, attribute, default):
"""Returns the value of the specified attribute in the
specified jamfile module."""
assert isinstance(project, basestring)
assert isinstance(attribute, basestring)
assert isinstance(default, basestring) or default is None
return self.module2attributes[project].getDefault(attribute, default)
def target(self, project_module):
"""Returns the project target corresponding to the 'project-module'."""
assert isinstance(project_module, basestring)
if project_module not in self.module2target:
self.module2target[project_module] = \
b2.build.targets.ProjectTarget(project_module, project_module,
self.attribute(project_module, "requirements"))
return self.module2target[project_module]
def use(self, id, location):
# Use/load a project.
assert isinstance(id, basestring)
assert isinstance(location, basestring)
saved_project = self.current_project
project_module = self.load(location)
declared_id = self.attributeDefault(project_module, "id", "")
if not declared_id or declared_id != id:
# The project at 'location' either have no id or
# that id is not equal to the 'id' parameter.
if id in self.id2module and self.id2module[id] != project_module:
self.manager.errors()(
"""Attempt to redeclare already existing project id '%s' at location '%s'""" % (id, location))
self.id2module[id] = project_module
self.current_project = saved_project
def add_rule(self, name, callable_):
"""Makes rule 'name' available to all subsequently loaded Jamfiles.
Calling that rule wil relay to 'callable'."""
assert isinstance(name, basestring)
assert callable(callable_)
self.project_rules_.add_rule(name, callable_)
def project_rules(self):
return self.project_rules_
def glob_internal(self, project, wildcards, excludes, rule_name):
if __debug__:
from .targets import ProjectTarget
assert isinstance(project, ProjectTarget)
assert is_iterable_typed(wildcards, basestring)
assert is_iterable_typed(excludes, basestring) or excludes is None
assert isinstance(rule_name, basestring)
location = project.get("source-location")[0]
result = []
callable = b2.util.path.__dict__[rule_name]
paths = callable([location], wildcards, excludes)
has_dir = 0
for w in wildcards:
if os.path.dirname(w):
has_dir = 1
break
if has_dir or rule_name != "glob":
result = []
# The paths we've found are relative to current directory,
# but the names specified in sources list are assumed to
# be relative to source directory of the corresponding
# prject. Either translate them or make absolute.
for p in paths:
rel = os.path.relpath(p, location)
# If the path is below source location, use relative path.
if not ".." in rel:
result.append(rel)
else:
# Otherwise, use full path just to avoid any ambiguities.
result.append(os.path.abspath(p))
else:
# There were not directory in wildcard, so the files are all
# in the source directory of the project. Just drop the
# directory, instead of making paths absolute.
result = [os.path.basename(p) for p in paths]
return result
def __build_python_module_cache(self):
"""Recursively walks through the b2/src subdirectories and
creates an index of base module name to package name. The
index is stored within self.__python_module_cache and allows
for an O(1) module lookup.
For example, given the base module name `toolset`,
self.__python_module_cache['toolset'] will return
'b2.build.toolset'
pkgutil.walk_packages() will find any python package
provided a directory contains an __init__.py. This has the
added benefit of allowing libraries to be installed and
automatically avaiable within the contrib directory.
*Note*: pkgutil.walk_packages() will import any subpackage
in order to access its __path__variable. Meaning:
any initialization code will be run if the package hasn't
already been imported.
"""
cache = {}
for importer, mname, ispkg in pkgutil.walk_packages(b2.__path__, prefix='b2.'):
basename = mname.split('.')[-1]
# since the jam code is only going to have "import toolset ;"
# it doesn't matter if there are separately named "b2.build.toolset" and
# "b2.contrib.toolset" as it is impossible to know which the user is
# referring to.
if basename in cache:
self.manager.errors()('duplicate module name "{0}" '
'found in boost-build path'.format(basename))
cache[basename] = mname
self.__python_module_cache = cache
def load_module(self, name, extra_path=None):
"""Load a Python module that should be useable from Jamfiles.
There are generally two types of modules Jamfiles might want to
use:
- Core Boost.Build. Those are imported using plain names, e.g.
'toolset', so this function checks if we have module named
b2.package.module already.
- Python modules in the same directory as Jamfile. We don't
want to even temporary add Jamfile's directory to sys.path,
since then we might get naming conflicts between standard
Python modules and those.
"""
assert isinstance(name, basestring)
assert is_iterable_typed(extra_path, basestring) or extra_path is None
# See if we loaded module of this name already
existing = self.loaded_tool_modules_.get(name)
if existing:
return existing
# check the extra path as well as any paths outside
# of the b2 package and import the module if it exists
b2_path = os.path.normpath(b2.__path__[0])
# normalize the pathing in the BOOST_BUILD_PATH.
# this allows for using startswith() to determine
# if a path is a subdirectory of the b2 root_path
paths = [os.path.normpath(p) for p in self.manager.boost_build_path()]
# remove all paths that start with b2's root_path
paths = [p for p in paths if not p.startswith(b2_path)]
# add any extra paths
paths.extend(extra_path)
try:
# find_module is used so that the pyc's can be used.
# an ImportError is raised if not found
f, location, description = imp.find_module(name, paths)
except ImportError:
# if the module is not found in the b2 package,
# this error will be handled later
pass
else:
# we've found the module, now let's try loading it.
# it's possible that the module itself contains an ImportError
# which is why we're loading it in this else clause so that the
# proper error message is shown to the end user.
# TODO: does this module name really need to be mangled like this?
mname = name + "__for_jamfile"
self.loaded_tool_module_path_[mname] = location
module = imp.load_module(mname, f, location, description)
self.loaded_tool_modules_[name] = module
return module
# the cache is created here due to possibly importing packages
# that end up calling get_manager() which might fail
if not self.__python_module_cache:
self.__build_python_module_cache()
underscore_name = name.replace('-', '_')
# check to see if the module is within the b2 package
# and already loaded
mname = self.__python_module_cache.get(underscore_name)
if mname in sys.modules:
return sys.modules[mname]
# otherwise, if the module name is within the cache,
# the module exists within the BOOST_BUILD_PATH,
# load it.
elif mname:
# in some cases, self.loaded_tool_module_path_ needs to
# have the path to the file during the import
# (project.initialize() for example),
# so the path needs to be set *before* importing the module.
path = os.path.join(b2.__path__[0], *mname.split('.')[1:])
self.loaded_tool_module_path_[mname] = path
# mname is guaranteed to be importable since it was
# found within the cache
__import__(mname)
module = sys.modules[mname]
self.loaded_tool_modules_[name] = module
return module
self.manager.errors()("Cannot find module '%s'" % name)
# FIXME:
# Defines a Boost.Build extension project. Such extensions usually
# contain library targets and features that can be used by many people.
# Even though extensions are really projects, they can be initialize as
# a module would be with the "using" (project.project-rules.using)
# mechanism.
#rule extension ( id : options * : * )
#{
# # The caller is a standalone module for the extension.
# local mod = [ CALLER_MODULE ] ;
#
# # We need to do the rest within the extension module.
# module $(mod)
# {
# import path ;
#
# # Find the root project.
# local root-project = [ project.current ] ;
# root-project = [ $(root-project).project-module ] ;
# while
# [ project.attribute $(root-project) parent-module ] &&
# [ project.attribute $(root-project) parent-module ] != user-config
# {
# root-project = [ project.attribute $(root-project) parent-module ] ;
# }
#
# # Create the project data, and bring in the project rules
# # into the module.
# project.initialize $(__name__) :
# [ path.join [ project.attribute $(root-project) location ] ext $(1:L) ] ;
#
# # Create the project itself, i.e. the attributes.
# # All extensions are created in the "/ext" project space.
# project /ext/$(1) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ;
# local attributes = [ project.attributes $(__name__) ] ;
#
# # Inherit from the root project of whomever is defining us.
# project.inherit-attributes $(__name__) : $(root-project) ;
# $(attributes).set parent-module : $(root-project) : exact ;
# }
#}
class ProjectAttributes:
"""Class keeping all the attributes of a project.
The standard attributes are 'id', "location", "project-root", "parent"
"requirements", "default-build", "source-location" and "projects-to-build".
"""
def __init__(self, manager, location, project_module):
self.manager = manager
self.location = location
self.project_module = project_module
self.attributes = {}
self.usage_requirements = None
def set(self, attribute, specification, exact=False):
"""Set the named attribute from the specification given by the user.
The value actually set may be different."""
assert isinstance(attribute, basestring)
assert isinstance(exact, (int, bool))
if __debug__ and not exact:
if attribute == 'requirements':
assert (isinstance(specification, property_set.PropertySet)
or all(isinstance(s, basestring) for s in specification))
elif attribute in (
'usage-requirements', 'default-build', 'source-location', 'build-dir', 'id'):
assert is_iterable_typed(specification, basestring)
elif __debug__:
assert (
isinstance(specification, (property_set.PropertySet, type(None), basestring))
or all(isinstance(s, basestring) for s in specification)
)
if exact:
self.__dict__[attribute] = specification
elif attribute == "requirements":
self.requirements = property_set.refine_from_user_input(
self.requirements, specification,
self.project_module, self.location)
elif attribute == "usage-requirements":
unconditional = []
for p in specification:
split = property.split_conditional(p)
if split:
unconditional.append(split[1])
else:
unconditional.append(p)
non_free = property.remove("free", unconditional)
if non_free:
get_manager().errors()("usage-requirements %s have non-free properties %s" \
% (specification, non_free))
t = property.translate_paths(
property.create_from_strings(specification, allow_condition=True),
self.location)
existing = self.__dict__.get("usage-requirements")
if existing:
new = property_set.create(existing.all() + t)
else:
new = property_set.create(t)
self.__dict__["usage-requirements"] = new
elif attribute == "default-build":
self.__dict__["default-build"] = property_set.create(specification)
elif attribute == "source-location":
source_location = []
for path in specification:
source_location.append(os.path.join(self.location, path))
self.__dict__["source-location"] = source_location
elif attribute == "build-dir":
self.__dict__["build-dir"] = os.path.join(self.location, specification[0])
elif attribute == "id":
id = specification[0]
if id[0] != '/':
id = "/" + id
self.manager.projects().register_id(id, self.project_module)
self.__dict__["id"] = id
elif not attribute in ["default-build", "location",
"source-location", "parent",
"projects-to-build", "project-root"]:
self.manager.errors()(
"""Invalid project attribute '%s' specified
for project at '%s'""" % (attribute, self.location))
else:
self.__dict__[attribute] = specification
def get(self, attribute):
assert isinstance(attribute, basestring)
return self.__dict__[attribute]
def getDefault(self, attribute, default):
assert isinstance(attribute, basestring)
return self.__dict__.get(attribute, default)
def dump(self):
"""Prints the project attributes."""
id = self.get("id")
if not id:
id = "(none)"
else:
id = id[0]
parent = self.get("parent")
if not parent:
parent = "(none)"
else:
parent = parent[0]
print "'%s'" % id
print "Parent project:%s", parent
print "Requirements:%s", self.get("requirements")
print "Default build:%s", string.join(self.get("debuild-build"))
print "Source location:%s", string.join(self.get("source-location"))
print "Projects to build:%s", string.join(self.get("projects-to-build").sort());
class ProjectRules:
"""Class keeping all rules that are made available to Jamfile."""
def __init__(self, registry):
self.registry = registry
self.manager_ = registry.manager
self.rules = {}
self.local_names = [x for x in self.__class__.__dict__
if x not in ["__init__", "init_project", "add_rule",
"error_reporting_wrapper", "add_rule_for_type", "reverse"]]
self.all_names_ = [x for x in self.local_names]
def _import_rule(self, bjam_module, name, callable_):
assert isinstance(bjam_module, basestring)
assert isinstance(name, basestring)
assert callable(callable_)
if hasattr(callable_, "bjam_signature"):
bjam.import_rule(bjam_module, name, self.make_wrapper(callable_), callable_.bjam_signature)
else:
bjam.import_rule(bjam_module, name, self.make_wrapper(callable_))
def add_rule_for_type(self, type):
assert isinstance(type, basestring)
rule_name = type.lower().replace("_", "-")
@bjam_signature([['name'], ['sources', '*'], ['requirements', '*'],
['default_build', '*'], ['usage_requirements', '*']])
def xpto (name, sources=[], requirements=[], default_build=[], usage_requirements=[]):
return self.manager_.targets().create_typed_target(
type, self.registry.current(), name, sources,
requirements, default_build, usage_requirements)
self.add_rule(rule_name, xpto)
def add_rule(self, name, callable_):
assert isinstance(name, basestring)
assert callable(callable_)
self.rules[name] = callable_
self.all_names_.append(name)
# Add new rule at global bjam scope. This might not be ideal,
# added because if a jamroot does 'import foo' where foo calls
# add_rule, we need to import new rule to jamroot scope, and
# I'm lazy to do this now.
self._import_rule("", name, callable_)
def all_names(self):
return self.all_names_
def call_and_report_errors(self, callable_, *args, **kw):
assert callable(callable_)
result = None
try:
self.manager_.errors().push_jamfile_context()
result = callable_(*args, **kw)
except ExceptionWithUserContext, e:
e.report()
except Exception, e:
try:
self.manager_.errors().handle_stray_exception (e)
except ExceptionWithUserContext, e:
e.report()
finally:
self.manager_.errors().pop_jamfile_context()
return result
def make_wrapper(self, callable_):
"""Given a free-standing function 'callable', return a new
callable that will call 'callable' and report all exceptins,
using 'call_and_report_errors'."""
assert callable(callable_)
def wrapper(*args, **kw):
return self.call_and_report_errors(callable_, *args, **kw)
return wrapper
def init_project(self, project_module, python_standalone=False):
assert isinstance(project_module, basestring)
assert isinstance(python_standalone, bool)
if python_standalone:
m = sys.modules[project_module]
for n in self.local_names:
if n != "import_":
setattr(m, n, getattr(self, n))
for n in self.rules:
setattr(m, n, self.rules[n])
return
for n in self.local_names:
# Using 'getattr' here gives us a bound method,
# while using self.__dict__[r] would give unbound one.
v = getattr(self, n)
if callable(v):
if n == "import_":
n = "import"
else:
n = string.replace(n, "_", "-")
self._import_rule(project_module, n, v)
for n in self.rules:
self._import_rule(project_module, n, self.rules[n])
def project(self, *args):
assert is_iterable(args) and all(is_iterable(arg) for arg in args)
jamfile_module = self.registry.current().project_module()
attributes = self.registry.attributes(jamfile_module)
id = None
if args and args[0]:
id = args[0][0]
args = args[1:]
if id:
attributes.set('id', [id])
explicit_build_dir = None
for a in args:
if a:
attributes.set(a[0], a[1:], exact=0)
if a[0] == "build-dir":
explicit_build_dir = a[1]
# If '--build-dir' is specified, change the build dir for the project.
if self.registry.global_build_dir:
location = attributes.get("location")
# Project with empty location is 'standalone' project, like
# user-config, or qt. It has no build dir.
# If we try to set build dir for user-config, we'll then
# try to inherit it, with either weird, or wrong consequences.
if location and location == attributes.get("project-root"):
# Re-read the project id, since it might have been changed in
# the project's attributes.
id = attributes.get('id')
# This is Jamroot.
if id:
if explicit_build_dir and os.path.isabs(explicit_build_dir):
self.registry.manager.errors()(
"""Absolute directory specified via 'build-dir' project attribute
Don't know how to combine that with the --build-dir option.""")
rid = id
if rid[0] == '/':
rid = rid[1:]
p = os.path.join(self.registry.global_build_dir, rid)
if explicit_build_dir:
p = os.path.join(p, explicit_build_dir)
attributes.set("build-dir", p, exact=1)
elif explicit_build_dir:
self.registry.manager.errors()(
"""When --build-dir is specified, the 'build-dir'
attribute is allowed only for top-level 'project' invocations""")
def constant(self, name, value):
"""Declare and set a project global constant.
Project global constants are normal variables but should
not be changed. They are applied to every child Jamfile."""
assert is_iterable_typed(name, basestring)
assert is_iterable_typed(value, basestring)
self.registry.current().add_constant(name[0], value)
def path_constant(self, name, value):
"""Declare and set a project global constant, whose value is a path. The
path is adjusted to be relative to the invocation directory. The given
value path is taken to be either absolute, or relative to this project
root."""
assert is_iterable_typed(name, basestring)
assert is_iterable_typed(value, basestring)
if len(value) > 1:
self.registry.manager.errors()("path constant should have one element")
self.registry.current().add_constant(name[0], value, path=1)
def use_project(self, id, where):
# See comment in 'load' for explanation why we record the
# parameters as opposed to loading the project now.
assert is_iterable_typed(id, basestring)
assert is_iterable_typed(where, basestring)
m = self.registry.current().project_module()
self.registry.used_projects[m].append((id[0], where[0]))
def build_project(self, dir):
assert is_iterable_typed(dir, basestring)
jamfile_module = self.registry.current().project_module()
attributes = self.registry.attributes(jamfile_module)
now = attributes.get("projects-to-build")
attributes.set("projects-to-build", now + dir, exact=True)
def explicit(self, target_names):
assert is_iterable_typed(target_names, basestring)
self.registry.current().mark_targets_as_explicit(target_names)
def always(self, target_names):
assert is_iterable_typed(target_names, basestring)
self.registry.current().mark_targets_as_always(target_names)
def glob(self, wildcards, excludes=None):
assert is_iterable_typed(wildcards, basestring)
assert is_iterable_typed(excludes, basestring)or excludes is None
return self.registry.glob_internal(self.registry.current(),
wildcards, excludes, "glob")
def glob_tree(self, wildcards, excludes=None):
assert is_iterable_typed(wildcards, basestring)
assert is_iterable_typed(excludes, basestring) or excludes is None
bad = 0
for p in wildcards:
if os.path.dirname(p):
bad = 1
if excludes:
for p in excludes:
if os.path.dirname(p):
bad = 1
if bad:
self.registry.manager.errors()(
"The patterns to 'glob-tree' may not include directory")
return self.registry.glob_internal(self.registry.current(),
wildcards, excludes, "glob_tree")
def using(self, toolset, *args):
# The module referred by 'using' can be placed in
# the same directory as Jamfile, and the user
# will expect the module to be found even though
# the directory is not in BOOST_BUILD_PATH.
# So temporary change the search path.
assert is_iterable_typed(toolset, basestring)
current = self.registry.current()
location = current.get('location')
m = self.registry.load_module(toolset[0], [location])
if "init" not in m.__dict__:
self.registry.manager.errors()(
"Tool module '%s' does not define the 'init' method" % toolset[0])
m.init(*args)
# The above might have clobbered .current-project. Restore the correct
# value.
self.registry.set_current(current)
def import_(self, name, names_to_import=None, local_names=None):
assert is_iterable_typed(name, basestring)
assert is_iterable_typed(names_to_import, basestring) or names_to_import is None
assert is_iterable_typed(local_names, basestring)or local_names is None
name = name[0]
py_name = name
if py_name == "os":
py_name = "os_j"
jamfile_module = self.registry.current().project_module()
attributes = self.registry.attributes(jamfile_module)
location = attributes.get("location")
saved = self.registry.current()
m = self.registry.load_module(py_name, [location])
for f in m.__dict__:
v = m.__dict__[f]
f = f.replace("_", "-")
if callable(v):
qn = name + "." + f
self._import_rule(jamfile_module, qn, v)
record_jam_to_value_mapping(qualify_jam_action(qn, jamfile_module), v)
if names_to_import:
if not local_names:
local_names = names_to_import
if len(names_to_import) != len(local_names):
self.registry.manager.errors()(
"""The number of names to import and local names do not match.""")
for n, l in zip(names_to_import, local_names):
self._import_rule(jamfile_module, l, m.__dict__[n])
self.registry.set_current(saved)
def conditional(self, condition, requirements):
"""Calculates conditional requirements for multiple requirements
at once. This is a shorthand to be reduce duplication and to
keep an inline declarative syntax. For example:
lib x : x.cpp : [ conditional <toolset>gcc <variant>debug :
<define>DEBUG_EXCEPTION <define>DEBUG_TRACE ] ;
"""
assert is_iterable_typed(condition, basestring)
assert is_iterable_typed(requirements, basestring)
c = string.join(condition, ",")
if c.find(":") != -1:
return [c + r for r in requirements]
else:
return [c + ":" + r for r in requirements]
def option(self, name, value):
assert is_iterable(name) and isinstance(name[0], basestring)
assert is_iterable(value) and isinstance(value[0], basestring)
name = name[0]
if not name in ["site-config", "user-config", "project-config"]:
get_manager().errors()("The 'option' rule may be used only in site-config or user-config")
option.set(name, value[0])
| 42.821928
| 104
| 0.597541
|
308e7fa94dfbec0143e4ac6db3352563bc7ffee6
| 11,661
|
py
|
Python
|
mmdet/datasets/custom.py
|
Chenglin-Yang/LESA_detection
|
3238bd9c6f3eb4a2a746837043cd9b3b56c77216
|
[
"Apache-2.0"
] | 4
|
2021-07-18T08:09:09.000Z
|
2022-01-06T06:19:00.000Z
|
mmdet/datasets/custom.py
|
Chenglin-Yang/LESA_detection
|
3238bd9c6f3eb4a2a746837043cd9b3b56c77216
|
[
"Apache-2.0"
] | null | null | null |
mmdet/datasets/custom.py
|
Chenglin-Yang/LESA_detection
|
3238bd9c6f3eb4a2a746837043cd9b3b56c77216
|
[
"Apache-2.0"
] | 1
|
2022-01-06T06:18:53.000Z
|
2022-01-06T06:18:53.000Z
|
import os.path as osp
import warnings
from collections import OrderedDict
import mmcv
import numpy as np
from mmcv.utils import print_log
from torch.utils.data import Dataset
from mmdet.core import eval_map, eval_recalls
from .builder import DATASETS
from .pipelines import Compose
@DATASETS.register_module()
class CustomDataset(Dataset):
"""Custom dataset for detection.
The annotation format is shown as follows. The `ann` field is optional for
testing.
.. code-block:: none
[
{
'filename': 'a.jpg',
'width': 1280,
'height': 720,
'ann': {
'bboxes': <np.ndarray> (n, 4) in (x1, y1, x2, y2) order.
'labels': <np.ndarray> (n, ),
'bboxes_ignore': <np.ndarray> (k, 4), (optional field)
'labels_ignore': <np.ndarray> (k, 4) (optional field)
}
},
...
]
Args:
ann_file (str): Annotation file path.
pipeline (list[dict]): Processing pipeline.
classes (str | Sequence[str], optional): Specify classes to load.
If is None, ``cls.CLASSES`` will be used. Default: None.
data_root (str, optional): Data root for ``ann_file``,
``img_prefix``, ``seg_prefix``, ``proposal_file`` if specified.
test_mode (bool, optional): If set True, annotation will not be loaded.
filter_empty_gt (bool, optional): If set true, images without bounding
boxes of the dataset's classes will be filtered out. This option
only works when `test_mode=False`, i.e., we never filter images
during tests.
"""
CLASSES = None
def __init__(self,
ann_file,
pipeline,
classes=None,
data_root=None,
img_prefix='',
seg_prefix=None,
proposal_file=None,
test_mode=False,
filter_empty_gt=True):
self.ann_file = ann_file
self.data_root = data_root
self.img_prefix = img_prefix
self.seg_prefix = seg_prefix
self.proposal_file = proposal_file
self.test_mode = test_mode
self.filter_empty_gt = filter_empty_gt
self.CLASSES = self.get_classes(classes)
# join paths if data_root is specified
if self.data_root is not None:
if not osp.isabs(self.ann_file):
self.ann_file = osp.join(self.data_root, self.ann_file)
if not (self.img_prefix is None or osp.isabs(self.img_prefix)):
self.img_prefix = osp.join(self.data_root, self.img_prefix)
if not (self.seg_prefix is None or osp.isabs(self.seg_prefix)):
self.seg_prefix = osp.join(self.data_root, self.seg_prefix)
if not (self.proposal_file is None
or osp.isabs(self.proposal_file)):
self.proposal_file = osp.join(self.data_root,
self.proposal_file)
# load annotations (and proposals)
self.data_infos = self.load_annotations(self.ann_file)
if self.proposal_file is not None:
self.proposals = self.load_proposals(self.proposal_file)
else:
self.proposals = None
# filter images too small and containing no annotations
if not test_mode:
valid_inds = self._filter_imgs()
self.data_infos = [self.data_infos[i] for i in valid_inds]
if self.proposals is not None:
self.proposals = [self.proposals[i] for i in valid_inds]
# set group flag for the sampler
self._set_group_flag()
# processing pipeline
# __import__('pdb').set_trace()
self.pipeline = Compose(pipeline)
def __len__(self):
"""Total number of samples of data."""
return len(self.data_infos)
def load_annotations(self, ann_file):
"""Load annotation from annotation file."""
return mmcv.load(ann_file)
def load_proposals(self, proposal_file):
"""Load proposal from proposal file."""
return mmcv.load(proposal_file)
def get_ann_info(self, idx):
"""Get annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
return self.data_infos[idx]['ann']
def get_cat_ids(self, idx):
"""Get category ids by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
return self.data_infos[idx]['ann']['labels'].astype(np.int).tolist()
def pre_pipeline(self, results):
"""Prepare results dict for pipeline."""
results['img_prefix'] = self.img_prefix
results['seg_prefix'] = self.seg_prefix
results['proposal_file'] = self.proposal_file
results['bbox_fields'] = []
results['mask_fields'] = []
results['seg_fields'] = []
def _filter_imgs(self, min_size=32):
"""Filter images too small."""
if self.filter_empty_gt:
warnings.warn(
'CustomDataset does not support filtering empty gt images.')
valid_inds = []
for i, img_info in enumerate(self.data_infos):
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
return valid_inds
def _set_group_flag(self):
"""Set flag according to image aspect ratio.
Images with aspect ratio greater than 1 will be set as group 1,
otherwise group 0.
"""
self.flag = np.zeros(len(self), dtype=np.uint8)
for i in range(len(self)):
img_info = self.data_infos[i]
if img_info['width'] / img_info['height'] > 1:
self.flag[i] = 1
def _rand_another(self, idx):
"""Get another random index from the same group as the given index."""
pool = np.where(self.flag == self.flag[idx])[0]
return np.random.choice(pool)
def __getitem__(self, idx):
"""Get training/test data after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Training/test data (with annotation if `test_mode` is set \
True).
"""
if self.test_mode:
return self.prepare_test_img(idx)
while True:
data = self.prepare_train_img(idx)
if data is None:
idx = self._rand_another(idx)
continue
return data
def prepare_train_img(self, idx):
"""Get training data and annotations after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Training data and annotation after pipeline with new keys \
introduced by pipeline.
"""
img_info = self.data_infos[idx]
ann_info = self.get_ann_info(idx)
results = dict(img_info=img_info, ann_info=ann_info)
if self.proposals is not None:
results['proposals'] = self.proposals[idx]
# __import__('pdb').set_trace()
self.pre_pipeline(results)
return self.pipeline(results)
def prepare_test_img(self, idx):
"""Get testing data after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Testing data after pipeline with new keys introduced by \
pipeline.
"""
img_info = self.data_infos[idx]
results = dict(img_info=img_info)
if self.proposals is not None:
results['proposals'] = self.proposals[idx]
self.pre_pipeline(results)
return self.pipeline(results)
@classmethod
def get_classes(cls, classes=None):
"""Get class names of current dataset.
Args:
classes (Sequence[str] | str | None): If classes is None, use
default CLASSES defined by builtin dataset. If classes is a
string, take it as a file name. The file contains the name of
classes where each line contains one class name. If classes is
a tuple or list, override the CLASSES defined by the dataset.
Returns:
tuple[str] or list[str]: Names of categories of the dataset.
"""
if classes is None:
return cls.CLASSES
if isinstance(classes, str):
# take it as a file path
class_names = mmcv.list_from_file(classes)
elif isinstance(classes, (tuple, list)):
class_names = classes
else:
raise ValueError(f'Unsupported type {type(classes)} of classes.')
return class_names
def format_results(self, results, **kwargs):
"""Place holder to format result to dataset specific output."""
def evaluate(self,
results,
metric='mAP',
logger=None,
proposal_nums=(100, 300, 1000),
iou_thr=0.5,
scale_ranges=None):
"""Evaluate the dataset.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
logger (logging.Logger | None | str): Logger used for printing
related information during evaluation. Default: None.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thr (float | list[float]): IoU threshold. Default: 0.5.
scale_ranges (list[tuple] | None): Scale ranges for evaluating mAP.
Default: None.
"""
if not isinstance(metric, str):
assert len(metric) == 1
metric = metric[0]
allowed_metrics = ['mAP', 'recall']
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
annotations = [self.get_ann_info(i) for i in range(len(self))]
eval_results = OrderedDict()
iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr
if metric == 'mAP':
assert isinstance(iou_thrs, list)
mean_aps = []
for iou_thr in iou_thrs:
print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}')
mean_ap, _ = eval_map(
results,
annotations,
scale_ranges=scale_ranges,
iou_thr=iou_thr,
dataset=self.CLASSES,
logger=logger)
mean_aps.append(mean_ap)
eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3)
eval_results['mAP'] = sum(mean_aps) / len(mean_aps)
elif metric == 'recall':
gt_bboxes = [ann['bboxes'] for ann in annotations]
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thr, logger=logger)
for i, num in enumerate(proposal_nums):
for j, iou in enumerate(iou_thrs):
eval_results[f'recall@{num}@{iou}'] = recalls[i, j]
if recalls.shape[1] > 1:
ar = recalls.mean(axis=1)
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
return eval_results
| 35.769939
| 79
| 0.566675
|
8dafd003b87856c4eba1b4a96e2cabd4a3602ca8
| 595
|
py
|
Python
|
app/main.py
|
jmeisele/fastapi-ml-scaffolding
|
605cbfc6a1bb4c3bb7cb3d379a4aee6f889d0e53
|
[
"MIT"
] | 8
|
2020-07-22T16:35:18.000Z
|
2021-11-07T02:54:30.000Z
|
app/main.py
|
jmeisele/fastapi-ml-scaffolding
|
605cbfc6a1bb4c3bb7cb3d379a4aee6f889d0e53
|
[
"MIT"
] | 1
|
2022-01-13T04:43:41.000Z
|
2022-01-13T04:43:41.000Z
|
app/main.py
|
jmeisele/fastapi-ml-scaffolding
|
605cbfc6a1bb4c3bb7cb3d379a4aee6f889d0e53
|
[
"MIT"
] | 2
|
2021-07-16T21:41:15.000Z
|
2022-01-24T23:31:34.000Z
|
from fastapi import FastAPI
from app.api.routes.router import api_router
from app.core.config import API_PREFIX, APP_NAME, APP_VERSION, IS_DEBUG
from app.core.event_handlers import start_app_handler, stop_app_handler
def get_app() -> FastAPI:
"""FastAPI app controller"""
fast_app = FastAPI(title=APP_NAME, version=APP_VERSION, debug=IS_DEBUG)
fast_app.include_router(api_router, prefix=API_PREFIX)
fast_app.add_event_handler("startup", start_app_handler(fast_app))
fast_app.add_event_handler("shutdown", stop_app_handler(fast_app))
return fast_app
app = get_app()
| 31.315789
| 75
| 0.786555
|
9e3645032fb9e969cb257a8b8ba592ed0c3214c4
| 1,750
|
py
|
Python
|
Data Structure/Linked_List/Singly_Linked_List/Delete_Kth_Node_From_End.py
|
Hasindu5512/HacktoberFest-2020-Data-Structure-and-algorithm
|
64ab102fd363fc47d2f22ba9f5a5a8130891efd4
|
[
"MIT"
] | 5
|
2020-10-05T11:26:42.000Z
|
2021-03-04T04:31:37.000Z
|
Data Structure/Linked_List/Singly_Linked_List/Delete_Kth_Node_From_End.py
|
Hasindu5512/HacktoberFest-2020-Data-Structure-and-algorithm
|
64ab102fd363fc47d2f22ba9f5a5a8130891efd4
|
[
"MIT"
] | 16
|
2020-10-05T04:56:32.000Z
|
2020-10-21T21:04:07.000Z
|
Data Structure/Linked_List/Singly_Linked_List/Delete_Kth_Node_From_End.py
|
Hasindu5512/HacktoberFest-2020-Data-Structure-and-algorithm
|
64ab102fd363fc47d2f22ba9f5a5a8130891efd4
|
[
"MIT"
] | 45
|
2020-10-04T15:23:45.000Z
|
2020-10-30T04:46:30.000Z
|
# Python3 program for Fibonacci search.
from bisect import bisect_left
# Returns index of x if present, else
# returns -1
def fibMonaccianSearch(arr, x, n):
# Initialize fibonacci numbers
fibMMm2 = 0 # (m-2)'th Fibonacci No.
fibMMm1 = 1 # (m-1)'th Fibonacci No.
fibM = fibMMm2 + fibMMm1 # m'th Fibonacci
# fibM is going to store the smallest
# Fibonacci Number greater than or equal to n
while (fibM < n):
fibMMm2 = fibMMm1
fibMMm1 = fibM
fibM = fibMMm2 + fibMMm1
# Marks the eliminated range from front
offset = -1;
# while there are elements to be inspected.
# Note that we compare arr[fibMm2] with x.
# When fibM becomes 1, fibMm2 becomes 0
while (fibM > 1):
# Check if fibMm2 is a valid location
i = min(offset + fibMMm2, n - 1)
# If x is greater than the value at
# index fibMm2, cut the subarray array
# from offset to i
if (arr[i] < x):
fibM = fibMMm1
fibMMm1 = fibMMm2
fibMMm2 = fibM - fibMMm1
offset = i
# If x is less than the value at
# index fibMm2, cut the subarray
# after i+1
elif (arr[i] > x):
fibM = fibMMm2
fibMMm1 = fibMMm1 - fibMMm2
fibMMm2 = fibM - fibMMm1
# element found. return index
else:
return i
# comparing the last element with x */
if (fibMMm1 and arr[offset + 1] == x):
return offset + 1;
# element not found. return -1
return -1
# Driver Code
arr = [10, 22, 35, 40, 45, 50,
80, 82, 85, 90, 100]
n = len(arr)
x = 85
print("Found at index:",
fibMonaccianSearch(arr, x, n))
| 26.119403
| 50
| 0.563429
|
660855b1a714823c84a6ef6602c59f9473fff684
| 8,156
|
py
|
Python
|
mlmodels/model_tf/rl/21.neuro-evolution-agent.py
|
gitter-badger/mlmodels
|
f08cc9b6ec202d4ad25ecdda2f44487da387569d
|
[
"MIT"
] | 1
|
2022-03-11T07:57:48.000Z
|
2022-03-11T07:57:48.000Z
|
mlmodels/model_tf/rl/21.neuro-evolution-agent.py
|
whitetiger1002/mlmodels
|
f70f1da7434e8855eed50adc67b49cc169f2ea24
|
[
"MIT"
] | null | null | null |
mlmodels/model_tf/rl/21.neuro-evolution-agent.py
|
whitetiger1002/mlmodels
|
f70f1da7434e8855eed50adc67b49cc169f2ea24
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
sns.set()
# In[2]:
df = pd.read_csv("../dataset/GOOG-year.csv")
df.head()
# In[3]:
close = df.Close.values.tolist()
initial_money = 10000
window_size = 30
skip = 1
# In[4]:
class neuralnetwork:
def __init__(self, id_, hidden_size=128):
self.W1 = np.random.randn(window_size, hidden_size) / np.sqrt(window_size)
self.W2 = np.random.randn(hidden_size, 3) / np.sqrt(hidden_size)
self.fitness = 0
self.id = id_
def relu(X):
return np.maximum(X, 0)
def softmax(X):
e_x = np.exp(X - np.max(X, axis=-1, keepdims=True))
return e_x / np.sum(e_x, axis=-1, keepdims=True)
def feed_forward(X, nets):
a1 = np.dot(X, nets.W1)
z1 = relu(a1)
a2 = np.dot(z1, nets.W2)
return softmax(a2)
# In[5]:
class NeuroEvolution:
def __init__(
self,
population_size,
mutation_rate,
model_generator,
state_size,
window_size,
trend,
skip,
initial_money,
):
self.population_size = population_size
self.mutation_rate = mutation_rate
self.model_generator = model_generator
self.state_size = state_size
self.window_size = window_size
self.half_window = window_size // 2
self.trend = trend
self.skip = skip
self.initial_money = initial_money
def _initialize_population(self):
self.population = []
for i in range(self.population_size):
self.population.append(self.model_generator(i))
def mutate(self, individual, scale=1.0):
mutation_mask = np.random.binomial(1, p=self.mutation_rate, size=individual.W1.shape)
individual.W1 += (
np.random.normal(loc=0, scale=scale, size=individual.W1.shape) * mutation_mask
)
mutation_mask = np.random.binomial(1, p=self.mutation_rate, size=individual.W2.shape)
individual.W2 += (
np.random.normal(loc=0, scale=scale, size=individual.W2.shape) * mutation_mask
)
return individual
def inherit_weights(self, parent, child):
child.W1 = parent.W1.copy()
child.W2 = parent.W2.copy()
return child
def crossover(self, parent1, parent2):
child1 = self.model_generator((parent1.id + 1) * 10)
child1 = self.inherit_weights(parent1, child1)
child2 = self.model_generator((parent2.id + 1) * 10)
child2 = self.inherit_weights(parent2, child2)
# first W
n_neurons = child1.W1.shape[1]
cutoff = np.random.randint(0, n_neurons)
child1.W1[:, cutoff:] = parent2.W1[:, cutoff:].copy()
child2.W1[:, cutoff:] = parent1.W1[:, cutoff:].copy()
# second W
n_neurons = child1.W2.shape[1]
cutoff = np.random.randint(0, n_neurons)
child1.W2[:, cutoff:] = parent2.W2[:, cutoff:].copy()
child2.W2[:, cutoff:] = parent1.W2[:, cutoff:].copy()
return child1, child2
def get_state(self, t):
window_size = self.window_size + 1
d = t - window_size + 1
block = self.trend[d : t + 1] if d >= 0 else -d * [self.trend[0]] + self.trend[0 : t + 1]
res = []
for i in range(window_size - 1):
res.append(block[i + 1] - block[i])
return np.array([res])
def act(self, p, state):
logits = feed_forward(state, p)
return np.argmax(logits, 1)[0]
def buy(self, individual):
initial_money = self.initial_money
starting_money = initial_money
state = self.get_state(0)
inventory = []
states_sell = []
states_buy = []
for t in range(0, len(self.trend) - 1, self.skip):
action = self.act(individual, state)
next_state = self.get_state(t + 1)
if action == 1 and starting_money >= self.trend[t]:
inventory.append(self.trend[t])
initial_money -= self.trend[t]
states_buy.append(t)
print(
"day %d: buy 1 unit at price %f, total balance %f"
% (t, self.trend[t], initial_money)
)
elif action == 2 and len(inventory):
bought_price = inventory.pop(0)
initial_money += self.trend[t]
states_sell.append(t)
try:
invest = ((self.trend[t] - bought_price) / bought_price) * 100
except:
invest = 0
print(
"day %d, sell 1 unit at price %f, investment %f %%, total balance %f,"
% (t, self.trend[t], invest, initial_money)
)
state = next_state
invest = ((initial_money - starting_money) / starting_money) * 100
total_gains = initial_money - starting_money
return states_buy, states_sell, total_gains, invest
def calculate_fitness(self):
for i in range(self.population_size):
initial_money = self.initial_money
starting_money = initial_money
state = self.get_state(0)
inventory = []
for t in range(0, len(self.trend) - 1, self.skip):
action = self.act(self.population[i], state)
next_state = self.get_state(t + 1)
if action == 1 and starting_money >= self.trend[t]:
inventory.append(self.trend[t])
starting_money -= self.trend[t]
elif action == 2 and len(inventory):
bought_price = inventory.pop(0)
starting_money += self.trend[t]
state = next_state
invest = ((starting_money - initial_money) / initial_money) * 100
self.population[i].fitness = invest
def evolve(self, generations=20, checkpoint=5):
self._initialize_population()
n_winners = int(self.population_size * 0.4)
n_parents = self.population_size - n_winners
for epoch in range(generations):
self.calculate_fitness()
fitnesses = [i.fitness for i in self.population]
sort_fitness = np.argsort(fitnesses)[::-1]
self.population = [self.population[i] for i in sort_fitness]
fittest_individual = self.population[0]
if (epoch + 1) % checkpoint == 0:
print(
"epoch %d, fittest individual %d with accuracy %f"
% (epoch + 1, sort_fitness[0], fittest_individual.fitness)
)
next_population = [self.population[i] for i in range(n_winners)]
total_fitness = np.sum([np.abs(i.fitness) for i in self.population])
parent_probabilities = [np.abs(i.fitness / total_fitness) for i in self.population]
parents = np.random.choice(
self.population, size=n_parents, p=parent_probabilities, replace=False
)
for i in np.arange(0, len(parents), 2):
child1, child2 = self.crossover(parents[i], parents[i + 1])
next_population += [self.mutate(child1), self.mutate(child2)]
self.population = next_population
return fittest_individual
# In[6]:
population_size = 100
generations = 100
mutation_rate = 0.1
neural_evolve = NeuroEvolution(
population_size,
mutation_rate,
neuralnetwork,
window_size,
window_size,
close,
skip,
initial_money,
)
# In[7]:
fittest_nets = neural_evolve.evolve(50)
# In[8]:
states_buy, states_sell, total_gains, invest = neural_evolve.buy(fittest_nets)
# In[9]:
fig = plt.figure(figsize=(15, 5))
plt.plot(close, color="r", lw=2.0)
plt.plot(close, "^", markersize=10, color="m", label="buying signal", markevery=states_buy)
plt.plot(close, "v", markersize=10, color="k", label="selling signal", markevery=states_sell)
plt.title("total gains %f, total investment %f%%" % (total_gains, invest))
plt.legend()
plt.show()
# In[ ]:
| 30.432836
| 97
| 0.582761
|
490a9c6b53dfd836a8861646e415bfb04a48cb42
| 647
|
py
|
Python
|
Collect/JRC/Occurrence.py
|
ali1100/wa
|
700e5014533c45f38a245c3abdeacc537cb307bc
|
[
"Apache-2.0"
] | 16
|
2017-04-27T21:22:37.000Z
|
2020-10-21T12:57:03.000Z
|
Collect/JRC/Occurrence.py
|
ali1100/wa
|
700e5014533c45f38a245c3abdeacc537cb307bc
|
[
"Apache-2.0"
] | 1
|
2017-06-17T08:07:53.000Z
|
2017-08-22T12:28:37.000Z
|
Collect/JRC/Occurrence.py
|
wateraccounting/wa
|
29ed8e7eac732135678a5d171cd5e53a54c95313
|
[
"Apache-2.0"
] | 19
|
2016-10-24T13:24:34.000Z
|
2020-02-03T17:42:22.000Z
|
"""
Authors: Tim Hessels
UNESCO-IHE 2017
Contact: t.hessels@unesco-ihe.org
Repository: https://github.com/wateraccounting/wa
Module: Collect/JRC
"""
import sys
from DataAccess import DownloadData
def main(Dir, latlim, lonlim, Waitbar = 1):
"""
This function downloads JRC water occurrence data for the specified spatial extent.
Keyword arguments:
Dir -- 'C:/file/to/path/'
latlim -- [ymin, ymax]
lonlim -- [xmin, xmax]
Waitbar -- 1 (Default) will print a waitbar
"""
print '\nDownload JRC occurrence map'
DownloadData(Dir, latlim, lonlim, Waitbar)
if __name__ == '__main__':
main(sys.argv)
| 23.962963
| 87
| 0.678516
|
97fa1e8876afa7962b4bcd69e63da1517bfeff0f
| 1,870
|
py
|
Python
|
example/example.py
|
anyongjin/pylsd2
|
dc1cfc7bccae0fe843c9729f386958a11b4b3b92
|
[
"BSD-2-Clause"
] | 8
|
2021-07-28T16:09:01.000Z
|
2022-03-07T01:34:50.000Z
|
example/example.py
|
anyongjin/pylsd2
|
dc1cfc7bccae0fe843c9729f386958a11b4b3b92
|
[
"BSD-2-Clause"
] | 1
|
2021-06-24T09:54:44.000Z
|
2021-06-24T10:22:40.000Z
|
example/example.py
|
anyongjin/pylsd2
|
dc1cfc7bccae0fe843c9729f386958a11b4b3b92
|
[
"BSD-2-Clause"
] | 2
|
2021-06-24T08:55:25.000Z
|
2021-09-30T07:52:41.000Z
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# File : example.py
# Author: anyongjin
# Date : 2021/6/16
import os
import numpy as np
from pylsd2 import LineSegmentDetection, LineSegmentDetectionED
def extract_lines(gray, extract_type='lsd'):
if extract_type == 'lsd':
lines = LineSegmentDetection(gray)
else:
lines = LineSegmentDetectionED(gray)
return lines
def get_out_path(in_path, extract_type):
dirname, fname = os.path.split(in_path)
base_name, ext = os.path.splitext(fname)
out_dir = os.path.join(dirname, 'out')
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
return os.path.join(out_dir, base_name + '_' + extract_type + ext)
def detect_lines_with_cv2(path, extract_type='lsd'):
import cv2
out_path = get_out_path(path, extract_type)
src = cv2.imread(path, cv2.IMREAD_COLOR)
gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
lines = extract_lines(gray, extract_type)
for l in lines:
pt1, pt2 = tuple(l[:2]), tuple(l[2:4])
cv2.line(src, pt1, pt2, (0, 255, 0), 1)
cv2.imwrite(out_path, src)
def detect_lines_with_PIL(path, extract_type='lsd'):
from PIL import Image, ImageDraw
out_path = get_out_path(path, extract_type)
img = Image.open(path)
gray = np.asarray(img.convert('L'))
lines = extract_lines(gray, extract_type)
draw = ImageDraw.Draw(img)
for l in lines:
pt1, pt2 = l[:2], l[2:4]
draw.line((pt1, pt2), fill=(0, 0, 255), width=1)
img.save(out_path)
if __name__ == '__main__':
img_dir = '.'
img_exts = {'.jpg', '.png'}
names = os.listdir(img_dir)
full_paths = [os.path.join(img_dir, n) for n in names if os.path.splitext(n)[-1] in img_exts]
for path in full_paths:
detect_lines_with_cv2(path, 'lsd')
detect_lines_with_cv2(path, 'edlines')
print('complete')
| 29.68254
| 97
| 0.656684
|
70038e8840ccebaa0a9847b376feb571b003f3d2
| 3,566
|
py
|
Python
|
sudoku_MC.py
|
zeryabmoussaoui/sudoku-optimization-solver
|
caeab796a1e231996a7e80ed6e074eaf92b3d10c
|
[
"Unlicense"
] | null | null | null |
sudoku_MC.py
|
zeryabmoussaoui/sudoku-optimization-solver
|
caeab796a1e231996a7e80ed6e074eaf92b3d10c
|
[
"Unlicense"
] | null | null | null |
sudoku_MC.py
|
zeryabmoussaoui/sudoku-optimization-solver
|
caeab796a1e231996a7e80ed6e074eaf92b3d10c
|
[
"Unlicense"
] | null | null | null |
# Ref : https://www.lptmc.jussieu.fr/user/talbot/sudoku.html ( metropolis method)
import numpy as np
import math
import random
import matplotlib.pyplot as plt
# Algorithm Params
temp = 0.10;
ntrial = 1000000;
emin = 18;
zero = 0;
# Functions used to compute energy
def check(i, k, ncheck):
# determines number of unique elements in each row (k=1) or column (k!=1)
nu=0
if k!=1:
ncheck=np.transpose(ncheck)
nu=len(np.unique(ncheck[i,]))
return(nu)
def checksq(Is, Js, ncheck):
nu=0
sCell=int(pow(ncheck.size,1/4)) # compute these kind of variable outsite
subcell=ncheck[sCell*Is:sCell*Is+sCell,sCell*Js:sCell*Js+sCell]
nu=len(np.unique(subcell))
return(nu)
def energy(ncheck):
nsum=0
nCell=int(pow(ncheck.size,1/4))
nmax=3*pow(nCell,4)
nRange=np.arange(ncheck.shape[1])
cRange=np.arange(int(pow(ncheck.size,1/4)))
for i in nRange:
nsum += check(i,1,n) + check(i,2,n)
for i in cRange:
for j in cRange:
nsum += checksq(i,j,n)
return(nmax-nsum)
## Read the Cell
gameFile="sudoku.dat"
n=np.fromfile(gameFile,dtype=int,sep=" ")
#n=np.zeros(25*25) # only for test
size=int(math.sqrt(len(n)))
gameRange=np.arange(size)
cellSize=int(math.sqrt(size))
cellRange=np.arange(cellSize)
n=n.reshape(size,size)
## Initialise variables
nums=np.zeros(size)
num1=np.zeros(size)
ntemp=0
ep=0
mask=(n==0)*1
# Fill the Cell with resolved boxes
for ib in cellRange:
for jb in cellRange:
for k in gameRange:
nums[k]=k+1
for i in cellRange:
for j in cellRange:
i1 = ib*cellSize + i
j1 = jb*cellSize + j
if n[i1][j1] !=0:
ix = n[i1][j1]
nums[ix-1]=0
iy = -1
for k in gameRange:
if nums[k]!=0:
iy+=1
num1[iy] = nums[k]
kk=0
for i in cellRange:
for j in cellRange:
i1 = ib*cellSize + i
j1 = jb*cellSize + j
if n[i1][j1] ==0:
n[i1][j1]=num1[kk]
kk+=1
print(n)
e=energy(n) # To optimize
En=[]
# start Monte Carlo loop
for ll in np.arange(ntrial):
En.append(e)
# pick at random a block and two moveable elements in the block
ib = cellSize*(int)(cellSize*random.uniform(0,1))
jb = cellSize*(int)(cellSize*random.uniform(0,1))
while True:
i1 = (int)(cellSize*random.uniform(0,1))
j1 = (int)(cellSize*random.uniform(0,1))
if mask[ib+i1][jb+j1]==1:
break
while True:
i2 = (int)(cellSize*random.uniform(0,1))
j2 = (int)(cellSize*random.uniform(0,1))
if mask[ib+i2][jb+j2]==1:
break
# swap and compute the energy of the trial
ntemp = n[ib+i1][jb+j1]
n[ib+i1][jb+j1] = n[ib+i2][jb+j2]
n[ib+i2][jb+j2] = ntemp
ep=energy(n)
if ep<emin:
print("Step ",ll," energy= ",ep)
if ep==0: # Solution found
break
if math.exp((e-ep)/temp) > random.uniform(0,1):
e=ep
else:
ntemp=n[ib+i1][jb+j1]
n[ib+i1][jb+j1]=n[ib+i2][jb+j2]
n[ib+i2][jb+j2]=ntemp
if ep==0:
print("Solution found : ")
print(n)
plt.plot(En)
else:
print("No solution found after ",ntrial ," steps")
plt.plot(En)
| 26.61194
| 82
| 0.530566
|
ee49653542796bb6cd29fd1eb265eb87ce9e4f80
| 602
|
py
|
Python
|
vision_tile_query/config.py
|
eos-vision/tile-query
|
f6256ebb13c5c6f9beec9bb9a9f0cf0a85b23d0e
|
[
"MIT"
] | 1
|
2019-04-22T07:45:22.000Z
|
2019-04-22T07:45:22.000Z
|
vision_tile_query/config.py
|
eos-vision/tile-query
|
f6256ebb13c5c6f9beec9bb9a9f0cf0a85b23d0e
|
[
"MIT"
] | null | null | null |
vision_tile_query/config.py
|
eos-vision/tile-query
|
f6256ebb13c5c6f9beec9bb9a9f0cf0a85b23d0e
|
[
"MIT"
] | 2
|
2020-09-04T14:53:59.000Z
|
2020-10-28T02:30:29.000Z
|
# Default SRID numbers
MERCATOR_SRID = 4326
WEB_MERCATOR_SRID = 3857
# MVT query settings
# 1% of tile width
RELATIVE_BUFFER_WIDTH = 0.01
# default tile extent value. See details https://postgis.net/docs/ST_AsMVT.html
DEFAULT_EXTENT = 4096
# 0.5% of tile area fow VM polygons simplification
SIMPLIFY_COEFFICIENT = 0.000005
# Default columns names
GEOMETRY_COL_NAME = 'geometry'
ID_COL_NAME = 'id'
# Vector objects types
POINT_TYPES = ['POINT', 'MULTIPOINT']
POLYGON_TYPES = ['POLYGON', 'MULTIPOLYGON']
LINE_TYPES = ['LINESTRING', 'MULTILINESTRING']
SIMPLIFICATION_TYPES = LINE_TYPES + POLYGON_TYPES
| 27.363636
| 79
| 0.774086
|
1ee2ef4b08955ddb53e14b604b2fc406dae5609e
| 9,819
|
py
|
Python
|
models/train_rels.py
|
ivalab/Scene_Graph_Parsing
|
179b1653defe615de98165784ae3a527f822bf3a
|
[
"MIT"
] | null | null | null |
models/train_rels.py
|
ivalab/Scene_Graph_Parsing
|
179b1653defe615de98165784ae3a527f822bf3a
|
[
"MIT"
] | null | null | null |
models/train_rels.py
|
ivalab/Scene_Graph_Parsing
|
179b1653defe615de98165784ae3a527f822bf3a
|
[
"MIT"
] | null | null | null |
"""
Training script for scene graph detection. Integrated with my faster rcnn setup
"""
from dataloaders.georgia_tech import GTDataLoader, GT
import numpy as np
from torch import optim
import torch
import pandas as pd
import time
import os
from torch import nn
from config import ModelConfig, BOX_SCALE_GT, IM_SCALE_GT
from torch.nn import functional as F
from lib.pytorch_misc import optimistic_restore, de_chunkize, clip_grad_norm
from lib.evaluation.sg_eval import BasicSceneGraphEvaluator
from lib.pytorch_misc import print_para
from torch.optim.lr_scheduler import ReduceLROnPlateau
conf = ModelConfig()
if conf.model == 'rtnet':
from lib.rt_rel_model import RTRelModel
elif conf.model == 'stanford':
from lib.rel_model_stanford import RelModelStanford as RTRelModel
else:
raise ValueError()
train, val, _ = GT.splits(num_val_im=conf.val_size, filter_duplicate_rels=False,
use_proposals=conf.use_proposals,
filter_non_overlap=conf.mode == 'sgdet')
train_loader, val_loader = GTDataLoader.splits(train, val, mode='rel',
batch_size=conf.batch_size,
num_workers=conf.num_workers,
num_gpus=conf.num_gpus)
detector = RTRelModel(classes=train.ind_to_classes, aff_classes=train.ind_to_aff_classes,
att_classes=train.ind_to_att_classes, rel_classes=train.ind_to_predicates,
num_gpus=conf.num_gpus, mode=conf.mode, require_overlap_det=True,
use_resnet=conf.use_resnet, order=conf.order,
nl_edge=conf.nl_edge, nl_obj=conf.nl_obj, hidden_dim=conf.hidden_dim,
use_proposals=conf.use_proposals,
pass_in_obj_feats_to_decoder=conf.pass_in_obj_feats_to_decoder,
pass_in_obj_feats_to_edge=conf.pass_in_obj_feats_to_edge,
pooling_dim=conf.pooling_dim,
rec_dropout=conf.rec_dropout,
use_bias=conf.use_bias,
use_tanh=conf.use_tanh,
limit_vision=conf.limit_vision
)
for _, param in detector.detector.rpn_head.named_parameters():
param.requires_grad = False
print(print_para(detector), flush=True)
def get_optim(lr):
# Lower the learning rate on the VGG fully connected layers by 1/10th. It's a hack, but it helps
# stabilize the models.
fc_params = [p for n,p in detector.named_parameters() if n.startswith('roi_fmap') and p.requires_grad]
non_fc_params = [p for n,p in detector.named_parameters() if not n.startswith('roi_fmap') and p.requires_grad]
params = [{'params': fc_params, 'lr': lr / 10.0}, {'params': non_fc_params}]
# params = [p for n,p in detector.named_parameters() if p.requires_grad]
if conf.adam:
optimizer = optim.Adam(params, weight_decay=conf.l2, lr=lr, eps=1e-3)
else:
optimizer = optim.SGD(params, weight_decay=conf.l2, lr=lr, momentum=0.9)
scheduler = ReduceLROnPlateau(optimizer, 'max', patience=3, factor=0.1,
verbose=True, threshold=0.0001, threshold_mode='abs', cooldown=1)
return optimizer, scheduler
ckpt = torch.load(conf.ckpt)
if conf.ckpt.split('-')[-2].split('/')[-1] == 'vgrel':
print("Loading EVERYTHING")
start_epoch = ckpt['epoch']
if not optimistic_restore(detector, ckpt['state_dict']):
start_epoch = -1
# optimistic_restore(detector.detector, torch.load('checkpoints/vgdet/vg-28.tar')['state_dict'])
else:
start_epoch = -1
optimistic_restore(detector.detector, ckpt['state_dict'])
if (not conf.use_resnet):
detector.roi_fmap[1][0].weight.data.copy_(ckpt['state_dict']['roi_fmap.0.weight'])
detector.roi_fmap[1][3].weight.data.copy_(ckpt['state_dict']['roi_fmap.3.weight'])
detector.roi_fmap[1][0].bias.data.copy_(ckpt['state_dict']['roi_fmap.0.bias'])
detector.roi_fmap[1][3].bias.data.copy_(ckpt['state_dict']['roi_fmap.3.bias'])
detector.roi_fmap_obj[0].weight.data.copy_(ckpt['state_dict']['roi_fmap.0.weight'])
detector.roi_fmap_obj[3].weight.data.copy_(ckpt['state_dict']['roi_fmap.3.weight'])
detector.roi_fmap_obj[0].bias.data.copy_(ckpt['state_dict']['roi_fmap.0.bias'])
detector.roi_fmap_obj[3].bias.data.copy_(ckpt['state_dict']['roi_fmap.3.bias'])
detector.cuda()
def train_epoch(epoch_num):
detector.train()
tr = []
start = time.time()
for b, batch in enumerate(train_loader):
tr.append(train_batch(batch, verbose=b % (conf.print_interval*10) == 0)) #b == 0))
if b % conf.print_interval == 0 and b >= conf.print_interval:
mn = pd.concat(tr[-conf.print_interval:], axis=1).mean(1)
time_per_batch = (time.time() - start) / conf.print_interval
print("\ne{:2d}b{:5d}/{:5d} {:.3f}s/batch, {:.1f}m/epoch".format(
epoch_num, b, len(train_loader), time_per_batch, len(train_loader) * time_per_batch / 60))
print(mn)
print('-----------', flush=True)
start = time.time()
return pd.concat(tr, axis=1)
def train_batch(b, verbose=False):
"""
:param b: contains:
:param imgs: the image, [batch_size, 3, IM_SIZE, IM_SIZE]
:param all_anchors: [num_anchors, 4] the boxes of all anchors that we'll be using
:param all_anchor_inds: [num_anchors, 2] array of the indices into the concatenated
RPN feature vector that give us all_anchors,
each one (img_ind, fpn_idx)
:param im_sizes: a [batch_size, 4] numpy array of (h, w, scale, num_good_anchors) for each image.
:param num_anchors_per_img: int, number of anchors in total over the feature pyramid per img
Training parameters:
:param train_anchor_inds: a [num_train, 5] array of indices for the anchors that will
be used to compute the training loss (img_ind, fpn_idx)
:param gt_boxes: [num_gt, 4] GT boxes over the batch.
:param gt_classes: [num_gt, 2] gt boxes where each one is (img_id, class)
:return:
"""
result = detector[b]
weights_np = np.array([10., 1., 10., 10.])
weights_np = np.tile(weights_np, (int(result.rel_dists.size(0)), 1))
weights = torch.cuda.FloatTensor(weights_np)
multilabel_loss_w = nn.BCELoss(weight=weights)
multilabel_loss = nn.BCELoss()
losses = {}
losses['class_loss'] = F.cross_entropy(result.rm_obj_dists, result.rm_obj_labels)
losses['aff_loss'] = multilabel_loss(result.rm_obj_aff_dists, result.rm_obj_aff_labels.float())
losses['att_loss'] = multilabel_loss(result.rm_obj_att_dists, result.rm_obj_att_labels.float())
losses['rel_loss'] = multilabel_loss_w(result.rel_dists, result.rel_labels[:, 3:].float())
loss = sum(losses.values())
optimizer.zero_grad()
loss.backward()
clip_grad_norm(
[(n, p) for n, p in detector.named_parameters() if p.grad is not None],
max_norm=conf.clip, verbose=verbose, clip=True)
losses['total'] = loss
optimizer.step()
res = pd.Series({x: y.item() for x, y in losses.items()})
return res
def val_epoch():
detector.eval()
evaluator = BasicSceneGraphEvaluator.all_modes()
for val_b, batch in enumerate(val_loader):
val_batch(conf.num_gpus * val_b, batch, evaluator)
evaluator[conf.mode].print_stats()
return np.mean(evaluator[conf.mode].result_dict[conf.mode + '_recall'][100])
def val_batch(batch_num, b, evaluator):
det_res = detector[b]
if conf.num_gpus == 1:
det_res = [det_res]
for i, (boxes_i, objs_i, obj_scores_i, rels_i, pred_scores_i) in enumerate(det_res):
gt_entry = {
'gt_classes': val.gt_classes[batch_num + i].copy(),
'gt_aff_classes':val.gt_aff_classes[batch_num + i].copy(),
'gt_att_classes':val.gt_att_classes[batch_num + i].copy(),
'gt_relations': val.relationships[batch_num + i].copy(),
'gt_boxes': val.gt_boxes[batch_num + i].copy(),
}
assert np.all(objs_i[rels_i[:, 0]] > 0) and np.all(objs_i[rels_i[:, 1]] > 0)
pred_entry = {
'pred_boxes': boxes_i * BOX_SCALE_GT/IM_SCALE_GT,
'pred_classes': objs_i,
'pred_rel_inds': rels_i,
'obj_scores': obj_scores_i,
'rel_scores': pred_scores_i, # hack for now.
}
evaluator[conf.mode].evaluate_scene_graph_entry(
gt_entry,
pred_entry,
)
print("Training starts now!")
optimizer, scheduler = get_optim(conf.lr * conf.num_gpus * conf.batch_size)
for epoch in range(start_epoch + 1, start_epoch + 1 + conf.num_epochs):
rez = train_epoch(epoch)
print("overall{:2d}: ({:.3f})\n{}".format(epoch, rez.mean(1)['total'], rez.mean(1)), flush=True)
if conf.save_dir is not None:
torch.save({
'epoch': epoch,
'state_dict': detector.state_dict(), #{k:v for k,v in detector.state_dict().items() if not k.startswith('detector.')},
# 'optimizer': optimizer.state_dict(),
}, os.path.join(conf.save_dir, '{}-{}.tar'.format('vgrel', epoch)))
# save backbone network
torch.save({'epoch': epoch, 'state_dict': detector.detector.features.state_dict()},
os.path.join(conf.save_dir, '{}-{}.tar'.format('resnet_backbone', epoch)))
# mAp = val_epoch()
# scheduler.step(mAp)
if any([pg['lr'] <= (conf.lr * conf.num_gpus * conf.batch_size)/99.0 for pg in optimizer.param_groups]):
print("exiting training early", flush=True)
break
| 44.631818
| 130
| 0.642021
|
a3b19568d814cfd31a3d7a35df984d858abe0c42
| 389
|
py
|
Python
|
lesson_02/02_02_a.py
|
amindmobile/geekbrains-python-002
|
4bc2f7af755d00e73ddc48f1138830cb78e87034
|
[
"MIT"
] | null | null | null |
lesson_02/02_02_a.py
|
amindmobile/geekbrains-python-002
|
4bc2f7af755d00e73ddc48f1138830cb78e87034
|
[
"MIT"
] | null | null | null |
lesson_02/02_02_a.py
|
amindmobile/geekbrains-python-002
|
4bc2f7af755d00e73ddc48f1138830cb78e87034
|
[
"MIT"
] | null | null | null |
# на курсе этому не учили, но мы-то знаааем.. ;)
from itertools import zip_longest
# Мутно, но иначе чистый инпут принимает просто строку и она потом итерируется посимвольно
user_list = list(map(int, input('Введите числа через пробел: ').split()))
nl = []
for a, b in zip_longest(user_list[::2], user_list[1::2]):
if b:
nl.append(b)
if a:
nl.append(a)
print(nl)
| 27.785714
| 91
| 0.670951
|
a40998857d9f4a6479c064e5dd65dee7df01da1c
| 358
|
py
|
Python
|
Leetcode/0338. Counting Bits.py
|
luckyrabbit85/Python
|
ed134fd70b4a7b84b183b87b85ad5190f54c9526
|
[
"MIT"
] | 1
|
2021-07-15T18:40:26.000Z
|
2021-07-15T18:40:26.000Z
|
Leetcode/0338. Counting Bits.py
|
luckyrabbit85/Python
|
ed134fd70b4a7b84b183b87b85ad5190f54c9526
|
[
"MIT"
] | null | null | null |
Leetcode/0338. Counting Bits.py
|
luckyrabbit85/Python
|
ed134fd70b4a7b84b183b87b85ad5190f54c9526
|
[
"MIT"
] | null | null | null |
class Solution:
def countBits(self, n: int) -> list[int]:
res = []
for i in range(n + 1):
res.append(bin(i).count("1"))
return res
class Solution:
def countBits(self, n: int) -> list[int]:
res = [0] * (n + 1)
for x in range(1, n + 1):
res[x] = res[x & (x - 1)] + 1
return res
| 23.866667
| 45
| 0.458101
|
27db1b1598307857f582ab00faa76d45e9980fda
| 2,760
|
py
|
Python
|
district42_exp_types/uuid_str/_uuid_str_validator.py
|
nikitanovosibirsk/district42-exp-types
|
e36e43da62f32d58d4b14c65afa16856dc8849e1
|
[
"Apache-2.0"
] | null | null | null |
district42_exp_types/uuid_str/_uuid_str_validator.py
|
nikitanovosibirsk/district42-exp-types
|
e36e43da62f32d58d4b14c65afa16856dc8849e1
|
[
"Apache-2.0"
] | 2
|
2021-08-01T05:02:21.000Z
|
2021-08-01T10:06:28.000Z
|
district42_exp_types/uuid_str/_uuid_str_validator.py
|
nikitanovosibirsk/district42-exp-types
|
e36e43da62f32d58d4b14c65afa16856dc8849e1
|
[
"Apache-2.0"
] | null | null | null |
from typing import Any, cast
from uuid import UUID
from niltype import Nil, Nilable
from th import PathHolder
from valera import Formatter, ValidationResult, Validator
from valera.errors import TypeValidationError, ValidationError, ValueValidationError
from ._uuid_str_schema import UUIDStrSchema
__all__ = ("UUIDStrValidator", "StrCaseValidationError", "StrCaseFormatter",)
class StrCaseValidationError(ValidationError):
def __init__(self, path: PathHolder, actual_value: str, expected_case: str) -> None:
self.path = path
self.actual_value = actual_value
self.expected_case = expected_case
def format(self, formatter: Formatter) -> str:
return cast(str, formatter.format_str_case_error(self))
def __repr__(self) -> str:
return (f"{self.__class__.__name__}({self.path!r}, {self.actual_value!r}, "
f"{self.expected_case!r})")
class StrCaseFormatter(Formatter, extend=True):
def format_str_case_error(self, error: StrCaseValidationError) -> str:
actual_type = self._get_type(error.actual_value)
formatted_path = self._at_path(error.path)
return (f"Value {actual_type}{formatted_path} "
f"must be in {error.expected_case} case, but {error.actual_value!r} given")
class UUIDStrValidator(Validator, extend=True):
def visit_uuid_str(self, schema: UUIDStrSchema, *,
value: Any = Nil, path: Nilable[PathHolder] = Nil,
**kwargs: Any) -> ValidationResult:
result = self._validation_result_factory()
if path is Nil:
path = self._path_holder_factory()
if error := self._validate_type(path, value, str):
return result.add_error(error)
try:
actual_value = UUID(value)
except (TypeError, ValueError):
if schema.props.value is not Nil:
error = ValueValidationError(path, value, schema.props.value)
else:
error = TypeValidationError(path, value, UUID)
return result.add_error(error)
if schema.props.value is not Nil:
if actual_value != UUID(schema.props.value):
error = ValueValidationError(path, value, schema.props.value)
return result.add_error(error)
if schema.props.is_lowercase is not Nil:
if not value.islower():
error = StrCaseValidationError(path, value, str.lower.__name__)
return result.add_error(error)
if schema.props.is_uppercase is not Nil:
if not value.isupper():
error = StrCaseValidationError(path, value, str.upper.__name__)
return result.add_error(error)
return result
| 38.333333
| 91
| 0.654348
|
6ef2845703855fa225335b7b68afaa9c7d3a592e
| 206
|
py
|
Python
|
example/eulerproject/problem1.py
|
Xe/code
|
d970038329f7c4e4f0ee9dcd1b345741dd0fcc51
|
[
"Zlib"
] | 7
|
2015-03-26T07:35:06.000Z
|
2021-12-09T00:03:33.000Z
|
example/eulerproject/problem1.py
|
Xe/code
|
d970038329f7c4e4f0ee9dcd1b345741dd0fcc51
|
[
"Zlib"
] | null | null | null |
example/eulerproject/problem1.py
|
Xe/code
|
d970038329f7c4e4f0ee9dcd1b345741dd0fcc51
|
[
"Zlib"
] | 1
|
2020-11-03T22:59:31.000Z
|
2020-11-03T22:59:31.000Z
|
#!/usr/bin/python
numbers = [range(1000), range(1000)]
final = filter(lambda x: x % 3 == 0, numbers[0])
final.extend(filter(lambda x: x % 5 == 0, numbers[1]))
print reduce((lambda x,y: x+y), set(final))
| 22.888889
| 54
| 0.631068
|
2df0c2578f25884d980bf46c51747169770e2b3f
| 32,657
|
py
|
Python
|
logging/google/cloud/logging_v2/gapic/logging_service_v2_client.py
|
theacodes/google-cloud-python
|
57dafcb78540e12c82f7ca0fc77d75edeb269390
|
[
"Apache-2.0"
] | 1
|
2020-10-25T04:39:41.000Z
|
2020-10-25T04:39:41.000Z
|
logging/google/cloud/logging_v2/gapic/logging_service_v2_client.py
|
theacodes/google-cloud-python
|
57dafcb78540e12c82f7ca0fc77d75edeb269390
|
[
"Apache-2.0"
] | 4
|
2018-11-13T22:15:36.000Z
|
2018-12-07T18:31:38.000Z
|
logging/google/cloud/logging_v2/gapic/logging_service_v2_client.py
|
theacodes/google-cloud-python
|
57dafcb78540e12c82f7ca0fc77d75edeb269390
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.logging.v2 LoggingServiceV2 API."""
import functools
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import google.api_core.page_iterator
import google.api_core.path_template
import grpc
from google.api import monitored_resource_pb2
from google.cloud.logging_v2.gapic import enums
from google.cloud.logging_v2.gapic import logging_service_v2_client_config
from google.cloud.logging_v2.gapic.transports import logging_service_v2_grpc_transport
from google.cloud.logging_v2.proto import log_entry_pb2
from google.cloud.logging_v2.proto import logging_pb2
from google.cloud.logging_v2.proto import logging_pb2_grpc
from google.protobuf import empty_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
'google-cloud-logging', ).version
class LoggingServiceV2Client(object):
"""Service for ingesting and querying logs."""
SERVICE_ADDRESS = 'logging.googleapis.com:443'
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = 'google.logging.v2.LoggingServiceV2'
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
LoggingServiceV2Client: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def log_path(cls, project, log):
"""Return a fully-qualified log string."""
return google.api_core.path_template.expand(
'projects/{project}/logs/{log}',
project=project,
log=log,
)
@classmethod
def project_path(cls, project):
"""Return a fully-qualified project string."""
return google.api_core.path_template.expand(
'projects/{project}',
project=project,
)
def __init__(self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None):
"""Constructor.
Args:
transport (Union[~.LoggingServiceV2GrpcTransport,
Callable[[~.Credentials, type], ~.LoggingServiceV2GrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
'The `client_config` argument is deprecated.',
PendingDeprecationWarning,
stacklevel=2)
else:
client_config = logging_service_v2_client_config.config
if channel:
warnings.warn(
'The `channel` argument is deprecated; use '
'`transport` instead.',
PendingDeprecationWarning,
stacklevel=2)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=logging_service_v2_grpc_transport.
LoggingServiceV2GrpcTransport,
)
else:
if credentials:
raise ValueError(
'Received both a transport instance and '
'credentials; these are mutually exclusive.')
self.transport = transport
else:
self.transport = logging_service_v2_grpc_transport.LoggingServiceV2GrpcTransport(
address=self.SERVICE_ADDRESS,
channel=channel,
credentials=credentials,
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION, )
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config['interfaces'][self._INTERFACE_NAME], )
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def delete_log(self,
log_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Deletes all the log entries in a log.
The log reappears if it receives new entries.
Log entries written shortly before the delete operation might not be
deleted.
Example:
>>> from google.cloud import logging_v2
>>>
>>> client = logging_v2.LoggingServiceV2Client()
>>>
>>> log_name = client.log_path('[PROJECT]', '[LOG]')
>>>
>>> client.delete_log(log_name)
Args:
log_name (str): Required. The resource name of the log to delete:
::
"projects/[PROJECT_ID]/logs/[LOG_ID]"
"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]"
"folders/[FOLDER_ID]/logs/[LOG_ID]"
``[LOG_ID]`` must be URL-encoded. For example,
``"projects/my-project-id/logs/syslog"``,
``"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"``.
For more information about log names, see ``LogEntry``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'delete_log' not in self._inner_api_calls:
self._inner_api_calls[
'delete_log'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_log,
default_retry=self._method_configs['DeleteLog'].retry,
default_timeout=self._method_configs['DeleteLog'].timeout,
client_info=self._client_info,
)
request = logging_pb2.DeleteLogRequest(log_name=log_name, )
self._inner_api_calls['delete_log'](
request, retry=retry, timeout=timeout, metadata=metadata)
def write_log_entries(self,
entries,
log_name=None,
resource=None,
labels=None,
partial_success=None,
dry_run=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Writes log entries to Logging. This API method is the
only way to send log entries to Logging. This method
is used, directly or indirectly, by the Logging agent
(fluentd) and all logging libraries configured to use Logging.
A single request may contain log entries for a maximum of 1000
different resources (projects, organizations, billing accounts or
folders)
Example:
>>> from google.cloud import logging_v2
>>>
>>> client = logging_v2.LoggingServiceV2Client()
>>>
>>> # TODO: Initialize `entries`:
>>> entries = []
>>>
>>> response = client.write_log_entries(entries)
Args:
entries (list[Union[dict, ~google.cloud.logging_v2.types.LogEntry]]): Required. The log entries to send to Logging. The order of log entries
in this list does not matter. Values supplied in this method's
``log_name``, ``resource``, and ``labels`` fields are copied into those
log entries in this list that do not include values for their
corresponding fields. For more information, see the ``LogEntry`` type.
If the ``timestamp`` or ``insert_id`` fields are missing in log entries,
then this method supplies the current time or a unique identifier,
respectively. The supplied values are chosen so that, among the log
entries that did not supply their own values, the entries earlier in the
list will sort before the entries later in the list. See the
``entries.list`` method.
Log entries with timestamps that are more than the `logs retention
period <https://cloud.google.com/logging/quota-policy>`__ in the past or
more than 24 hours in the future will not be available when calling
``entries.list``. However, those log entries can still be exported with
`LogSinks <https://cloud.google.com/logging/docs/api/tasks/exporting-logs>`__.
To improve throughput and to avoid exceeding the `quota
limit <https://cloud.google.com/logging/quota-policy>`__ for calls to
``entries.write``, you should try to include several log entries in this
list, rather than calling this method for each individual log entry.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.logging_v2.types.LogEntry`
log_name (str): Optional. A default log resource name that is assigned to all log
entries in ``entries`` that do not specify a value for ``log_name``:
::
"projects/[PROJECT_ID]/logs/[LOG_ID]"
"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]"
"folders/[FOLDER_ID]/logs/[LOG_ID]"
``[LOG_ID]`` must be URL-encoded. For example:
::
"projects/my-project-id/logs/syslog"
"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"
The permission logging.logEntries.create is needed on each project,
organization, billing account, or folder that is receiving new log
entries, whether the resource is specified in logName or in an
individual log entry.
resource (Union[dict, ~google.cloud.logging_v2.types.MonitoredResource]): Optional. A default monitored resource object that is assigned to all
log entries in ``entries`` that do not specify a value for ``resource``.
Example:
::
{ "type": "gce_instance",
"labels": {
"zone": "us-central1-a", "instance_id": "00000000000000000000" }}
See ``LogEntry``.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.logging_v2.types.MonitoredResource`
labels (dict[str -> str]): Optional. Default labels that are added to the ``labels`` field of all
log entries in ``entries``. If a log entry already has a label with the
same key as a label in this parameter, then the log entry's label is not
changed. See ``LogEntry``.
partial_success (bool): Optional. Whether valid entries should be written even if some other
entries fail due to INVALID\_ARGUMENT or PERMISSION\_DENIED errors. If
any entry is not written, then the response status is the error
associated with one of the failed entries and the response includes
error details keyed by the entries' zero-based index in the
``entries.write`` method.
dry_run (bool): Optional. If true, the request should expect normal response, but the
entries won't be persisted nor exported. Useful for checking whether the
logging API endpoints are working properly before sending valuable data.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.logging_v2.types.WriteLogEntriesResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'write_log_entries' not in self._inner_api_calls:
self._inner_api_calls[
'write_log_entries'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.write_log_entries,
default_retry=self._method_configs['WriteLogEntries'].
retry,
default_timeout=self._method_configs['WriteLogEntries'].
timeout,
client_info=self._client_info,
)
request = logging_pb2.WriteLogEntriesRequest(
entries=entries,
log_name=log_name,
resource=resource,
labels=labels,
partial_success=partial_success,
dry_run=dry_run,
)
return self._inner_api_calls['write_log_entries'](
request, retry=retry, timeout=timeout, metadata=metadata)
def list_log_entries(self,
resource_names,
project_ids=None,
filter_=None,
order_by=None,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Lists log entries. Use this method to retrieve log entries from Logging.
For ways to export log entries, see `Exporting
Logs <https://cloud.google.com/logging/docs/export>`__.
Example:
>>> from google.cloud import logging_v2
>>>
>>> client = logging_v2.LoggingServiceV2Client()
>>>
>>> # TODO: Initialize `resource_names`:
>>> resource_names = []
>>>
>>> # Iterate over all results
>>> for element in client.list_log_entries(resource_names):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_log_entries(resource_names).pages:
... for element in page:
... # process element
... pass
Args:
resource_names (list[str]): Required. Names of one or more parent resources from which to retrieve
log entries:
::
"projects/[PROJECT_ID]"
"organizations/[ORGANIZATION_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]"
"folders/[FOLDER_ID]"
Projects listed in the ``project_ids`` field are added to this list.
project_ids (list[str]): Deprecated. Use ``resource_names`` instead. One or more project
identifiers or project numbers from which to retrieve log entries.
Example: ``"my-project-1A"``. If present, these project identifiers are
converted to resource name format and added to the list of resources in
``resource_names``.
filter_ (str): Optional. A filter that chooses which log entries to return. See
`Advanced Logs
Filters <https://cloud.google.com/logging/docs/view/advanced_filters>`__.
Only log entries that match the filter are returned. An empty filter
matches all log entries in the resources listed in ``resource_names``.
Referencing a parent resource that is not listed in ``resource_names``
will cause the filter to return no results. The maximum length of the
filter is 20000 characters.
order_by (str): Optional. How the results should be sorted. Presently, the only
permitted values are ``"timestamp asc"`` (default) and
``"timestamp desc"``. The first option returns entries in order of
increasing values of ``LogEntry.timestamp`` (oldest first), and the
second option returns entries in order of decreasing timestamps (newest
first). Entries with equal timestamps are returned in order of their
``insert_id`` values.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.logging_v2.types.LogEntry` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'list_log_entries' not in self._inner_api_calls:
self._inner_api_calls[
'list_log_entries'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_log_entries,
default_retry=self._method_configs['ListLogEntries'].retry,
default_timeout=self._method_configs['ListLogEntries'].
timeout,
client_info=self._client_info,
)
request = logging_pb2.ListLogEntriesRequest(
resource_names=resource_names,
project_ids=project_ids,
filter=filter_,
order_by=order_by,
page_size=page_size,
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls['list_log_entries'],
retry=retry,
timeout=timeout,
metadata=metadata),
request=request,
items_field='entries',
request_token_field='page_token',
response_token_field='next_page_token',
)
return iterator
def list_monitored_resource_descriptors(
self,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Lists the descriptors for monitored resource types used by Logging.
Example:
>>> from google.cloud import logging_v2
>>>
>>> client = logging_v2.LoggingServiceV2Client()
>>>
>>> # Iterate over all results
>>> for element in client.list_monitored_resource_descriptors():
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_monitored_resource_descriptors().pages:
... for element in page:
... # process element
... pass
Args:
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.logging_v2.types.MonitoredResourceDescriptor` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'list_monitored_resource_descriptors' not in self._inner_api_calls:
self._inner_api_calls[
'list_monitored_resource_descriptors'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_monitored_resource_descriptors,
default_retry=self.
_method_configs['ListMonitoredResourceDescriptors'].retry,
default_timeout=self._method_configs[
'ListMonitoredResourceDescriptors'].timeout,
client_info=self._client_info,
)
request = logging_pb2.ListMonitoredResourceDescriptorsRequest(
page_size=page_size, )
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls['list_monitored_resource_descriptors'],
retry=retry,
timeout=timeout,
metadata=metadata),
request=request,
items_field='resource_descriptors',
request_token_field='page_token',
response_token_field='next_page_token',
)
return iterator
def list_logs(self,
parent,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Lists the logs in projects, organizations, folders, or billing accounts.
Only logs that have entries are listed.
Example:
>>> from google.cloud import logging_v2
>>>
>>> client = logging_v2.LoggingServiceV2Client()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> # Iterate over all results
>>> for element in client.list_logs(parent):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_logs(parent).pages:
... for element in page:
... # process element
... pass
Args:
parent (str): Required. The resource name that owns the logs:
::
"projects/[PROJECT_ID]"
"organizations/[ORGANIZATION_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]"
"folders/[FOLDER_ID]"
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`str` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'list_logs' not in self._inner_api_calls:
self._inner_api_calls[
'list_logs'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_logs,
default_retry=self._method_configs['ListLogs'].retry,
default_timeout=self._method_configs['ListLogs'].timeout,
client_info=self._client_info,
)
request = logging_pb2.ListLogsRequest(
parent=parent,
page_size=page_size,
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls['list_logs'],
retry=retry,
timeout=timeout,
metadata=metadata),
request=request,
items_field='log_names',
request_token_field='page_token',
response_token_field='next_page_token',
)
return iterator
| 46.45377
| 155
| 0.594788
|
598e1f7b629f8dcacb33d96e26760d22cff36cee
| 1,783
|
py
|
Python
|
versiongrid/db/base.py
|
rsnyman/versiongrid
|
0870f320f2b53f1071282692816fcbba1f9a0346
|
[
"MIT"
] | null | null | null |
versiongrid/db/base.py
|
rsnyman/versiongrid
|
0870f320f2b53f1071282692816fcbba1f9a0346
|
[
"MIT"
] | null | null | null |
versiongrid/db/base.py
|
rsnyman/versiongrid
|
0870f320f2b53f1071282692816fcbba1f9a0346
|
[
"MIT"
] | null | null | null |
from uuid import UUID
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.dialects.postgresql import UUID as PostgresUUID
from sqlalchemy.types import CHAR
from sqlalchemy.types import TypeDecorator
db = SQLAlchemy()
Model = db.Model
Boolean = db.Boolean
Column = db.Column
DateTime = db.DateTime
Float = db.Float
ForeignKey = db.ForeignKey
Integer = db.Integer
LargeBinary = db.LargeBinary
Table = db.Table
Text = db.Text
relationship = db.relationship
inspect = db.inspect
session = db.session
class PortableUUID(TypeDecorator):
"""Platform-independent UUID type.
Uses PostgreSQL's UUID type, otherwise uses CHAR(32), storing as stringified hex values.
Based on https://docs.sqlalchemy.org/en/13/core/custom_types.html#backend-agnostic-guid-type
"""
impl = CHAR
def __init__(self, *args, **kwargs):
if "as_uuid" in kwargs:
self.as_uuid = kwargs.pop("as_uuid")
else:
self.as_uuid = False
super().__init__(*args, **kwargs)
def load_dialect_impl(self, dialect):
if dialect.name == "postgresql":
return dialect.type_descriptor(PostgresUUID(as_uuid=self.as_uuid))
else:
return dialect.type_descriptor(CHAR(32))
def process_bind_param(self, value, dialect):
if value is None:
return value
elif dialect.name == "postgresql":
return value
else:
if isinstance(value, UUID):
return str(value)
else:
return value
def process_result_value(self, value, dialect):
if value is None:
return value
else:
if self.as_uuid and not isinstance(value, UUID):
value = UUID(value)
return value
| 27.015152
| 96
| 0.649467
|
25f8d9fd0c6d587b21b656f149495e4e707044de
| 28,004
|
py
|
Python
|
plugin/tensorboard_plugin_profile/profile_plugin.py
|
Saiprasad16/profiler
|
00499416f250acca6c73dd7f191e24ee142c2bef
|
[
"Apache-2.0"
] | 1
|
2021-05-10T10:47:15.000Z
|
2021-05-10T10:47:15.000Z
|
plugin/tensorboard_plugin_profile/profile_plugin.py
|
Saiprasad16/profiler
|
00499416f250acca6c73dd7f191e24ee142c2bef
|
[
"Apache-2.0"
] | null | null | null |
plugin/tensorboard_plugin_profile/profile_plugin.py
|
Saiprasad16/profiler
|
00499416f250acca6c73dd7f191e24ee142c2bef
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The TensorBoard plugin for performance profiling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import logging
import os
import re
import threading
import six
import tensorflow.compat.v2 as tf
from werkzeug import wrappers
from tensorboard.backend.event_processing import plugin_asset_util
from tensorboard.context import RequestContext
from tensorboard.plugins import base_plugin
from tensorflow.python.profiler import profiler_client # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.profiler import profiler_v2 as profiler # pylint: disable=g-direct-tensorflow-import
from tensorboard_plugin_profile.convert import raw_to_tool_data as convert
tf.enable_v2_behavior()
logger = logging.getLogger('tensorboard')
# The prefix of routes provided by this plugin.
PLUGIN_NAME = 'profile'
INDEX_JS_ROUTE = '/index.js'
INDEX_HTML_ROUTE = '/index.html'
BUNDLE_JS_ROUTE = '/bundle.js'
STYLES_CSS_ROUTE = '/styles.css'
MATERIALICONS_WOFF2_ROUTE = '/materialicons.woff2'
TRACE_VIEWER_INDEX_HTML_ROUTE = '/trace_viewer_index.html'
TRACE_VIEWER_INDEX_JS_ROUTE = '/trace_viewer_index.js'
ZONE_JS_ROUTE = '/zone.js'
DATA_ROUTE = '/data'
TOOLS_ROUTE = '/tools'
HOSTS_ROUTE = '/hosts'
CAPTURE_ROUTE = '/capture_profile'
# Suffixes of "^, #, @" symbols represent different input data formats for the
# same tool.
# 1) '^': data generate from XPlane.
# 2) '#': data is in gzip format.
# 3) '@': data generate from proto, or tracetable for streaming trace viewer.
# 4) no suffix: data is in json format, ready to feed to frontend.
TOOLS = {
'trace_viewer': 'trace',
'trace_viewer#': 'trace.json.gz',
'trace_viewer@': 'tracetable', # streaming trace viewer
'op_profile': 'op_profile.json',
'input_pipeline_analyzer': 'input_pipeline.json',
'input_pipeline_analyzer@': 'input_pipeline.pb',
'overview_page': 'overview_page.json',
'overview_page@': 'overview_page.pb',
'memory_viewer': 'memory_viewer.json',
'pod_viewer': 'pod_viewer.json',
'tensorflow_stats': 'tensorflow_stats.pb',
'kernel_stats': 'kernel_stats.pb',
'memory_profile#': 'memory_profile.json.gz',
'xplane': 'xplane.pb',
'tf_data_bottleneck_analysis': 'tf_data_bottleneck_analysis.json',
}
ALL_HOSTS = 'ALL_HOSTS'
_EXTENSION_TO_TOOL = {extension: tool for tool, extension in TOOLS.items()}
_FILENAME_RE = re.compile(r'(?:(.*)\.)?(' +
'|'.join(TOOLS.values()).replace('.', r'\.') + r')')
# Tools that consume raw data.
_RAW_DATA_TOOLS = frozenset(
tool for tool, extension in TOOLS.items()
if extension.endswith('.json') or extension.endswith('.json.gz'))
# Tools that can be generated from xplane end with ^.
XPLANE_TOOLS = [
'trace_viewer^',
'overview_page^',
'input_pipeline_analyzer^',
'tensorflow_stats^',
'kernel_stats^',
'memory_profile^',
'pod_viewer^',
'tf_data_bottleneck_analysis^',
]
# XPlane generated tools that support all host mode.
XPLANE_TOOLS_ALL_HOSTS_SUPPORTED = frozenset([
'input_pipeline_analyzer^',
'tensorflow_stats^',
'kernel_stats^',
'overview_page^',
'pod_viewer^',
'tf_data_bottleneck_analysis^',
])
# XPlane generated tools that only support all host mode.
XPLANE_TOOLS_ALL_HOSTS_ONLY = frozenset(
['overview_page^', 'pod_viewer^', 'tf_data_bottleneck_analysis^'])
def _use_xplane(tool):
return tool[-1] == '^'
def _make_filename(host, tool):
"""Returns the name of the file containing data for the given host and tool.
Args:
host: Name of the host that produced the profile data, e.g., 'localhost'.
tool: Name of the tool, e.g., 'trace_viewer'.
Returns:
The host name concatenated with the tool-specific extension, e.g.,
'localhost.trace'.
"""
filename = str(host) + '.' if host else ''
tool = 'xplane' if _use_xplane(tool) else tool
return filename + TOOLS[tool]
def _parse_filename(filename):
"""Returns the host and tool encoded in a filename in the run directory.
Args:
filename: Name of a file in the run directory. The name might encode a host
and tool, e.g., 'host.tracetable', 'host.domain.op_profile.json', or just
a tool, e.g., 'trace', 'tensorflow_stats.pb'.
Returns:
A tuple (host, tool) containing the names of the host and tool, e.g.,
('localhost', 'trace_viewer'). Either of the tuple's components can be None.
"""
m = _FILENAME_RE.fullmatch(filename)
if m is None:
return filename, None
return m.group(1), _EXTENSION_TO_TOOL[m.group(2)]
def _get_hosts(filenames):
"""Parses a list of filenames and returns the set of hosts.
Args:
filenames: A list of filenames (just basenames, no directory).
Returns:
A set of host names encoded in the filenames.
"""
hosts = set()
for name in filenames:
host, _ = _parse_filename(name)
if host:
hosts.add(host)
return hosts
def _get_tools(filenames):
"""Parses a list of filenames and returns the set of tools.
If xplane is present in the repository, add tools that can be generated by
xplane if we don't have a file for the tool.
Args:
filenames: A list of filenames (just basenames, no directory).
Returns:
A set of tool names encoded in the filenames.
"""
tools = set()
found = set()
has_xplane = False
for name in filenames:
_, tool = _parse_filename(name)
if tool == 'xplane':
has_xplane = True
continue
elif tool:
tools.add(tool)
if tool[-1] in ('@', '#'):
found.add(tool[:-1])
else:
found.add(tool)
if has_xplane:
for item in XPLANE_TOOLS:
if item[:-1] not in found:
tools.add(item)
return tools
def get_worker_list(cluster_resolver):
"""Parses TPU workers list from the cluster resolver."""
cluster_spec = cluster_resolver.cluster_spec()
task_indices = cluster_spec.task_indices('worker')
worker_list = [
cluster_spec.task_address('worker', i).replace(':8470', ':8466')
for i in task_indices
]
return ','.join(worker_list)
def respond(body, content_type, code=200, content_encoding=None):
"""Create a Werkzeug response, handling JSON serialization and CSP.
Args:
body: For JSON responses, a JSON-serializable object; otherwise, a raw
`bytes` string or Unicode `str` (which will be encoded as UTF-8).
content_type: Response content-type (`str`); use `application/json` to
automatically serialize structures.
code: HTTP status code (`int`).
content_encoding: Response Content-Encoding header ('str'); e.g. 'gzip'.
Returns:
A `werkzeug.wrappers.BaseResponse` object.
"""
if content_type == 'application/json' and isinstance(
body, (dict, list, set, tuple)):
body = json.dumps(body, sort_keys=True)
if not isinstance(body, bytes):
body = body.encode('utf-8')
csp_parts = {
'default-src': ["'self'"],
'script-src': [
"'self'",
"'unsafe-eval'",
"'unsafe-inline'",
'https://www.gstatic.com',
],
'object-src': ["'none'"],
'style-src': [
"'self'",
"'unsafe-inline'",
'https://www.gstatic.com',
],
'img-src': [
"'self'",
'blob:',
'data:',
],
}
csp = ';'.join((' '.join([k] + v) for (k, v) in csp_parts.items()))
headers = [
('Content-Security-Policy', csp),
('X-Content-Type-Options', 'nosniff'),
]
if content_encoding:
headers.append(('Content-Encoding', content_encoding))
return wrappers.Response(
body, content_type=content_type, status=code, headers=headers)
def _plugin_assets(logdir, runs, plugin_name):
result = {}
for run in runs:
run_path = os.path.join(logdir, run)
assets = plugin_asset_util.ListAssets(run_path, plugin_name)
result[run] = assets
return result
class ProfilePlugin(base_plugin.TBPlugin):
"""Profile Plugin for TensorBoard."""
plugin_name = PLUGIN_NAME
def __init__(self, context):
"""Constructs a profiler plugin for TensorBoard.
This plugin adds handlers for performance-related frontends.
Args:
context: A base_plugin.TBContext instance.
"""
self.logdir = context.logdir
self.data_provider = context.data_provider
self.stub = None
self.master_tpu_unsecure_channel = context.flags.master_tpu_unsecure_channel
# Whether the plugin is active. This is an expensive computation, so we
# compute this asynchronously and cache positive results indefinitely.
self._is_active = False
# Lock to ensure at most one thread computes _is_active at a time.
self._is_active_lock = threading.Lock()
def is_active(self):
"""Whether this plugin is active and has any profile data to show.
Detecting profile data is expensive, so this process runs asynchronously
and the value reported by this method is the cached value and may be stale.
Returns:
Whether any run has profile data.
"""
# If we are already active, we remain active and don't recompute this.
# Otherwise, try to acquire the lock without blocking; if we get it and
# we're still not active, launch a thread to check if we're active and
# release the lock once the computation is finished. Either way, this
# thread returns the current cached value to avoid blocking.
if not self._is_active and self._is_active_lock.acquire(False):
if self._is_active:
self._is_active_lock.release()
else:
def compute_is_active():
self._is_active = any(self.generate_run_to_tools())
self._is_active_lock.release()
new_thread = threading.Thread(
target=compute_is_active, name='DynamicProfilePluginIsActiveThread')
new_thread.start()
return self._is_active
def get_plugin_apps(self):
return {
INDEX_JS_ROUTE: self.static_file_route,
INDEX_HTML_ROUTE: self.static_file_route,
BUNDLE_JS_ROUTE: self.static_file_route,
STYLES_CSS_ROUTE: self.static_file_route,
MATERIALICONS_WOFF2_ROUTE: self.static_file_route,
TRACE_VIEWER_INDEX_HTML_ROUTE: self.static_file_route,
TRACE_VIEWER_INDEX_JS_ROUTE: self.static_file_route,
ZONE_JS_ROUTE: self.static_file_route,
TOOLS_ROUTE: self.tools_route,
HOSTS_ROUTE: self.hosts_route,
DATA_ROUTE: self.data_route,
CAPTURE_ROUTE: self.capture_route,
}
def frontend_metadata(self):
return base_plugin.FrontendMetadata(es_module_path='/index.js')
@wrappers.Request.application
def static_file_route(self, request):
filename = os.path.basename(request.path)
extention = os.path.splitext(filename)[1]
if extention == '.html':
mimetype = 'text/html'
elif extention == '.css':
mimetype = 'text/css'
elif extention == '.js':
mimetype = 'application/javascript'
else:
mimetype = 'application/octet-stream'
filepath = os.path.join(os.path.dirname(__file__), 'static', filename)
try:
with open(filepath, 'rb') as infile:
contents = infile.read()
except IOError:
return respond('404 Not Found', 'text/plain', code=404)
return respond(contents, mimetype)
@wrappers.Request.application
def tools_route(self, request):
run_to_tools = dict(self.generate_run_to_tools())
return respond(run_to_tools, 'application/json')
def host_impl(self, run, tool):
"""Returns available hosts for the run and tool in the log directory.
In the plugin log directory, each directory contains profile data for a
single run (identified by the directory name), and files in the run
directory contains data for different tools and hosts. The file that
contains profile for a specific tool "x" will have extension TOOLS["x"].
Example:
log/
run1/
plugins/
profile/
host1.trace
host2.trace
run2/
plugins/
profile/
host1.trace
host2.trace
Args:
run: the frontend run name, e.g., 'run1' or 'run2' for the example above.
tool: the requested tool, e.g., 'trace_viewer' for the example above.
Returns:
A list of host names, e.g. ["host1", "host2"] for the example above.
"""
run_dir = self._run_dir(run)
if not run_dir:
logger.warning('Cannot find asset directory for: %s', run)
return []
tool_pattern = _make_filename('*', tool)
try:
filenames = tf.io.gfile.glob(os.path.join(run_dir, tool_pattern))
except tf.errors.OpError as e:
logger.warning('Cannot read asset directory: %s, OpError %s', run_dir, e)
filenames = [os.path.basename(f) for f in filenames]
hosts = _get_hosts(filenames)
if len(hosts) > 1:
if tool in XPLANE_TOOLS_ALL_HOSTS_ONLY:
hosts = [ALL_HOSTS]
elif tool in XPLANE_TOOLS_ALL_HOSTS_SUPPORTED:
hosts.add(ALL_HOSTS)
return sorted(hosts)
@wrappers.Request.application
def hosts_route(self, request):
run = request.args.get('run')
tool = request.args.get('tag')
hosts = self.host_impl(run, tool)
return respond(hosts, 'application/json')
def data_impl(self, request):
"""Retrieves and processes the tool data for a run and a host.
Args:
request: XMLHttpRequest
Returns:
A string that can be served to the frontend tool or None if tool,
run or host is invalid.
"""
run = request.args.get('run')
tool = request.args.get('tag')
host = request.args.get('host')
tqx = request.args.get('tqx')
run_dir = self._run_dir(run)
# Profile plugin "run" is the last component of run dir.
profile_run = os.path.basename(run_dir)
if tool not in TOOLS and not _use_xplane(tool):
return None, None
self.start_grpc_stub_if_necessary()
if tool == 'trace_viewer@' and self.stub is not None:
# Streaming trace viewer needs profiler_analysis service, which is only
# supported in Cloud TPU. This code is unused when data was produced by
# open-source TensorFlow. Only import the library when needed.
# pylint: disable=g-import-not-at-top
# pylint: disable=g-direct-tensorflow-import
from tensorflow.core.profiler import profiler_analysis_pb2
# pylint: enable=g-import-not-at-top
# pylint: enable=g-direct-tensorflow-import
grpc_request = profiler_analysis_pb2.ProfileSessionDataRequest()
grpc_request.repository_root = os.path.dirname(run_dir)
grpc_request.session_id = profile_run
grpc_request.tool_name = 'trace_viewer'
# Remove the trailing dot if present
grpc_request.host_name = host.rstrip('.')
grpc_request.parameters['resolution'] = request.args.get(
'resolution', 8000)
if request.args.get('start_time_ms') is not None:
grpc_request.parameters['start_time_ms'] = request.args.get(
'start_time_ms')
if request.args.get('end_time_ms') is not None:
grpc_request.parameters['end_time_ms'] = request.args.get('end_time_ms')
grpc_response = self.stub.GetSessionToolData(grpc_request)
return grpc_response.output, None
asset_path = os.path.join(run_dir, _make_filename(host, tool))
data, content_encoding = None, None
if _use_xplane(tool):
if host == ALL_HOSTS:
file_pattern = _make_filename('*', 'xplane')
try:
asset_paths = tf.io.gfile.glob(os.path.join(run_dir, file_pattern))
except tf.errors.OpError as e:
logger.warning('Cannot read asset directory: %s, OpError %s', run_dir,
e)
else:
asset_paths = [asset_path]
try:
data = convert.xspace_to_tool_data(asset_paths, tool, tqx)
except AttributeError:
logger.warning('XPlane converters are available after Tensorflow 2.4')
return data, content_encoding
raw_data = None
try:
with tf.io.gfile.GFile(asset_path, 'rb') as f:
raw_data = f.read()
except tf.errors.NotFoundError:
logger.warning('Asset path %s not found', asset_path)
except tf.errors.OpError as e:
logger.warning("Couldn't read asset path: %s, OpError %s", asset_path, e)
if raw_data is None:
return None, None
if tool in _RAW_DATA_TOOLS:
data = raw_data
if tool[-1] == '#':
content_encoding = 'gzip'
else:
data = convert.tool_proto_to_tool_data(raw_data, tool, tqx)
return data, content_encoding
@wrappers.Request.application
def data_route(self, request):
# params
# request: XMLHTTPRequest.
data, content_encoding = self.data_impl(request)
if data is None:
return respond('404 Not Found', 'text/plain', code=404)
return respond(data, 'application/json', content_encoding=content_encoding)
@wrappers.Request.application
def capture_route(self, request):
return self.capture_route_impl(request)
def capture_route_impl(self, request):
"""Runs the client trace for capturing profiling information."""
service_addr = request.args.get('service_addr')
duration = int(request.args.get('duration', '1000'))
is_tpu_name = request.args.get('is_tpu_name') == 'true'
worker_list = request.args.get('worker_list')
num_tracing_attempts = int(request.args.get('num_retry', '0')) + 1
options = None
try:
options = profiler.ProfilerOptions(
host_tracer_level=int(request.args.get('host_tracer_level', '2')),
device_tracer_level=int(request.args.get('device_tracer_level', '1')),
python_tracer_level=int(request.args.get('python_tracer_level', '0')),
)
# For preserving backwards compatibility with TensorFlow 2.3 and older.
if 'delay_ms' in options._fields:
options.delay_ms = int(request.args.get('delay', '0'))
except AttributeError:
logger.warning('ProfilerOptions are available after tensorflow 2.3')
if is_tpu_name:
try:
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
service_addr)
master_grpc_addr = tpu_cluster_resolver.get_master()
except (ImportError, RuntimeError) as err:
return respond({'error': err.message}, 'application/json', code=200)
except (ValueError, TypeError):
return respond(
{'error': 'no TPUs with the specified names exist.'},
'application/json',
code=200,
)
if not worker_list:
worker_list = get_worker_list(tpu_cluster_resolver)
# TPU cluster resolver always returns port 8470. Replace it with 8466
# on which profiler service is running.
master_ip = master_grpc_addr.replace('grpc://', '').replace(':8470', '')
service_addr = master_ip + ':8466'
# Set the master TPU for streaming trace viewer.
self.master_tpu_unsecure_channel = master_ip
try:
if options:
profiler_client.trace(
service_addr,
self.logdir,
duration,
worker_list,
num_tracing_attempts,
options=options)
else:
profiler_client.trace(
service_addr,
self.logdir,
duration,
worker_list,
num_tracing_attempts,
)
return respond(
{'result': 'Capture profile successfully. Please refresh.'},
'application/json',
)
except tf.errors.UnavailableError:
return respond(
{'error': 'empty trace result.'},
'application/json',
code=200,
)
except Exception as e: # pylint: disable=broad-except
return respond(
{'error': str(e)},
'application/json',
code=200,
)
def start_grpc_stub_if_necessary(self):
# We will enable streaming trace viewer on two conditions:
# 1. user specify the flags master_tpu_unsecure_channel to the ip address of
# as "master" TPU. grpc will be used to fetch streaming trace data.
# 2. the logdir is on google cloud storage.
if self.master_tpu_unsecure_channel and self.logdir.startswith('gs://'):
if self.stub is None:
# gRPC and profiler_analysis are only needed to support streaming trace
# viewer in Cloud TPU. This code is unused when data was produced by
# open-source TensorFlow. Only import the libraries when needed.
# pylint: disable=g-import-not-at-top
import grpc
from tensorflow.python.tpu.profiler import profiler_analysis_pb2_grpc
# pylint: enable=g-import-not-at-top
# Workaround the grpc's 4MB message limitation.
gigabyte = 1024 * 1024 * 1024
options = [('grpc.max_message_length', gigabyte),
('grpc.max_send_message_length', gigabyte),
('grpc.max_receive_message_length', gigabyte)]
tpu_profiler_port = self.master_tpu_unsecure_channel + ':8466'
channel = grpc.insecure_channel(tpu_profiler_port, options)
self.stub = profiler_analysis_pb2_grpc.ProfileAnalysisStub(channel)
def _run_dir(self, run):
"""Helper that maps a frontend run name to a profile "run" directory.
The frontend run name consists of the TensorBoard run name (aka the relative
path from the logdir root to the directory containing the data) path-joined
to the Profile plugin's "run" concept (which is a subdirectory of the
plugins/profile directory representing an individual run of the tool), with
the special case that TensorBoard run is the logdir root (which is the run
named '.') then only the Profile plugin "run" name is used, for backwards
compatibility.
Args:
run: the frontend run name, as described above, e.g. train/run1.
Returns:
The resolved directory path, e.g. /logdir/train/plugins/profile/run1.
Raises:
RuntimeError: If the run directory is not found.
"""
run = run.rstrip(os.sep)
tb_run_name, profile_run_name = os.path.split(run)
if not tb_run_name:
tb_run_name = '.'
if tb_run_name == '.' and tf.io.gfile.isdir(self.logdir):
tb_run_directory = self.logdir
else:
tb_run_directory = os.path.join(self.logdir, tb_run_name)
if not tf.io.gfile.isdir(tb_run_directory):
raise RuntimeError('No matching run directory for run %s' % run)
plugin_directory = plugin_asset_util.PluginDirectory(
tb_run_directory, PLUGIN_NAME)
return os.path.join(plugin_directory, profile_run_name)
def generate_run_to_tools(self):
"""Generator for pairs of "run name" and a list of tools for that run.
The "run name" here is a "frontend run name" - see _run_dir() for the
definition of a "frontend run name" and how it maps to a directory of
profile data for a specific profile "run". The profile plugin concept of
"run" is different from the normal TensorBoard run; each run in this case
represents a single instance of profile data collection, more similar to a
"step" of data in typical TensorBoard semantics. These runs reside in
subdirectories of the plugins/profile directory within any regular
TensorBoard run directory (defined as a subdirectory of the logdir that
contains at least one tfevents file) or within the logdir root directory
itself (even if it contains no tfevents file and would thus not be
considered a normal TensorBoard run, for backwards compatibility).
Within those "profile run directories", there are files in the directory
that correspond to different profiling tools. The file that contains profile
for a specific tool "x" will have a suffix name TOOLS["x"].
Example:
logs/
plugins/
profile/
run1/
hostA.trace
train/
events.out.tfevents.foo
plugins/
profile/
run1/
hostA.trace
hostB.trace
run2/
hostA.trace
validation/
events.out.tfevents.foo
plugins/
profile/
run1/
hostA.trace
Yields:
A sequence of tuples mapping "frontend run names" to lists of tool names
available for those runs. For the above example, this would be:
("run1", ["trace_viewer"])
("train/run1", ["trace_viewer"])
("train/run2", ["trace_viewer"])
("validation/run1", ["trace_viewer"])
"""
self.start_grpc_stub_if_necessary()
# Create a background context; we may not be in a request.
ctx = RequestContext()
tb_run_names_to_dirs = {
run.run_name: os.path.join(self.logdir, run.run_name)
for run in self.data_provider.list_runs(ctx, experiment_id='')
}
plugin_assets = _plugin_assets(self.logdir, list(tb_run_names_to_dirs),
PLUGIN_NAME)
# Ensure that we also check the root logdir, even if it isn't a recognized
# TensorBoard run (i.e. has no tfevents file directly under it), to remain
# backwards compatible with previously profile plugin behavior. Note that we
# check if logdir is a directory to handle case where it's actually a
# multipart directory spec, which this plugin does not support.
if '.' not in plugin_assets and tf.io.gfile.isdir(self.logdir):
tb_run_names_to_dirs['.'] = self.logdir
plugin_assets['.'] = plugin_asset_util.ListAssets(self.logdir,
PLUGIN_NAME)
for tb_run_name, profile_runs in six.iteritems(plugin_assets):
tb_run_dir = tb_run_names_to_dirs[tb_run_name]
tb_plugin_dir = plugin_asset_util.PluginDirectory(tb_run_dir, PLUGIN_NAME)
for profile_run in profile_runs:
# Remove trailing separator; some filesystem implementations emit this.
profile_run = profile_run.rstrip(os.sep)
if tb_run_name == '.':
frontend_run = profile_run
else:
frontend_run = os.path.join(tb_run_name, profile_run)
profile_run_dir = os.path.join(tb_plugin_dir, profile_run)
if tf.io.gfile.isdir(profile_run_dir):
yield frontend_run, self._get_active_tools(profile_run_dir)
def _get_active_tools(self, profile_run_dir):
try:
filenames = tf.io.gfile.listdir(profile_run_dir)
except tf.errors.NotFoundError as e:
logger.warning('Cannot read asset directory: %s, NotFoundError %s',
profile_run_dir, e)
return []
tools = _get_tools(filenames)
if 'trace_viewer@' in tools:
# streaming trace viewer always override normal trace viewer.
# the trailing '@' is to inform tf-profile-dashboard.html and
# tf-trace-viewer.html that stream trace viewer should be used.
if self.stub is None:
tools.discard('trace_viewer@')
else:
tools.discard('trace_viewer#')
tools.discard('trace_viewer')
if 'trace_viewer#' in tools:
# use compressed trace
tools.discard('trace_viewer')
# Return sorted list of tools with 'overview_page' at the front.
op = frozenset(['overview_page@', 'overview_page', 'overview_page^'])
return list(tools.intersection(op)) + sorted(tools.difference(op))
| 36.558747
| 108
| 0.672368
|
b85655eed9df524c80f97cdcfd06cc48ca6f1c0e
| 606
|
py
|
Python
|
advent/day03/day03_test.py
|
benjackwhite/adventofcode2017
|
ce29e625cbe11fd5f36cff6b36a879c6a3955581
|
[
"MIT"
] | null | null | null |
advent/day03/day03_test.py
|
benjackwhite/adventofcode2017
|
ce29e625cbe11fd5f36cff6b36a879c6a3955581
|
[
"MIT"
] | null | null | null |
advent/day03/day03_test.py
|
benjackwhite/adventofcode2017
|
ce29e625cbe11fd5f36cff6b36a879c6a3955581
|
[
"MIT"
] | null | null | null |
from day03 import calculate_distance, calculate_next_large_value
target = 325489
def test_calculate_distance():
assert calculate_distance(1) == 0
assert calculate_distance(12) == 3
assert calculate_distance(23) == 2
assert calculate_distance(1024) == 31
assert calculate_distance(target) == 552
def test_calculate_next_large_value():
assert calculate_next_large_value(1) == 2
assert calculate_next_large_value(2) == 4
assert calculate_next_large_value(10) == 11
assert calculate_next_large_value(133) == 142
assert calculate_next_large_value(target) == 330785
| 28.857143
| 64
| 0.759076
|
beaf0c39c4b5ae53960e92b9908f9547c1abe742
| 4,834
|
py
|
Python
|
learning/graph2vec/corpus_parser.py
|
RiS3-Lab/FICS-
|
82c8abef52ca943946b7e82a16998cf67f1d2049
|
[
"Apache-2.0"
] | 37
|
2020-12-04T09:15:50.000Z
|
2022-03-28T13:33:29.000Z
|
learning/graph2vec/corpus_parser.py
|
RiS3-Lab/FICS-
|
82c8abef52ca943946b7e82a16998cf67f1d2049
|
[
"Apache-2.0"
] | 7
|
2020-12-03T08:14:31.000Z
|
2021-11-24T14:14:03.000Z
|
learning/graph2vec/corpus_parser.py
|
RiS3-Lab/FICS-
|
82c8abef52ca943946b7e82a16998cf67f1d2049
|
[
"Apache-2.0"
] | 19
|
2020-12-04T08:43:31.000Z
|
2022-03-28T13:33:27.000Z
|
import numpy as np
import logging
from collections import Counter
from random import shuffle
import numpy as np
from utils import get_files
class Corpus(object):
def __init__(self, fnames=None, extn='WL2', max_files=0):
assert fnames != None, "please specify the corpus folder"
self.fnames = fnames
self.subgraph_index = 0
self.graph_index = 0
self.epoch_flag = 0
self.max_files = max_files
self.graph_ids_for_batch_traversal = []
self.extn = extn
def scan_corpus(self):
subgraphs = []
for fname in self.graph_fname_list:
subgraphs.extend(
[l.split()[0] for l in open(fname).xreadlines()]) # just take the first word of every sentence
subgraphs.append('UNK')
subgraph_to_freq_map = Counter(subgraphs)
del subgraphs
subgraph_to_id_map = {sg: i for i, sg in
enumerate(subgraph_to_freq_map.iterkeys())} # output layer of the skipgram network
self._subgraph_to_freq_map = subgraph_to_freq_map # to be used for negative sampling
self._subgraph_to_id_map = subgraph_to_id_map
self._id_to_subgraph_map = {v: k for k, v in subgraph_to_id_map.iteritems()}
self._subgraphcount = sum(subgraph_to_freq_map.values()) # total num subgraphs in all graphs
self.num_graphs = len(self.graph_fname_list) # doc size
self.num_subgraphs = len(subgraph_to_id_map) # vocab of word size
self.subgraph_id_freq_map_as_list = [] # id of this list is the word id and value is the freq of word with corresponding word id
for i in xrange(len(self._subgraph_to_freq_map)):
self.subgraph_id_freq_map_as_list.append(self._subgraph_to_freq_map[self._id_to_subgraph_map[i]])
return self._subgraph_to_id_map
def scan_and_load_corpus(self):
self.graph_fname_list = get_files(self.fnames, extn=self.extn, max_files=self.max_files)
self._graph_name_to_id_map = {g: i for i, g in
enumerate(self.graph_fname_list)} # input layer of the skipgram network
self._id_to_graph_name_map = {i: g for g, i in self._graph_name_to_id_map.iteritems()}
subgraph_to_id_map = self.scan_corpus()
logging.info('number of graphs: %d' % self.num_graphs)
logging.info('subgraph vocabulary size: %d' % self.num_subgraphs)
logging.info('total number of subgraphs to be trained: %d' % self._subgraphcount)
self.graph_ids_for_batch_traversal = range(self.num_graphs)
shuffle(self.graph_ids_for_batch_traversal)
def generate_batch_from_file(self, batch_size):
target_graph_ids = []
context_subgraph_ids = []
graph_name = self.graph_fname_list[self.graph_ids_for_batch_traversal[self.graph_index]]
graph_contents = open(graph_name).readlines()
while self.subgraph_index >= len(graph_contents):
self.subgraph_index = 0
self.graph_index += 1
if self.graph_index == len(self.graph_fname_list):
self.graph_index = 0
np.random.shuffle(self.graph_ids_for_batch_traversal)
self.epoch_flag = True
graph_name = self.graph_fname_list[self.graph_ids_for_batch_traversal[self.graph_index]]
graph_contents = open(graph_name).readlines()
while len(context_subgraph_ids) < batch_size:
line_id = self.subgraph_index
context_subgraph = graph_contents[line_id].split()[0]
target_graph = graph_name
context_subgraph_ids.append(self._subgraph_to_id_map[context_subgraph])
target_graph_ids.append(self._graph_name_to_id_map[target_graph])
self.subgraph_index += 1
while self.subgraph_index == len(graph_contents):
self.subgraph_index = 0
self.graph_index += 1
if self.graph_index == len(self.graph_fname_list):
self.graph_index = 0
np.random.shuffle(self.graph_ids_for_batch_traversal)
self.epoch_flag = True
graph_name = self.graph_fname_list[self.graph_ids_for_batch_traversal[self.graph_index]]
graph_contents = open(graph_name).readlines()
target_context_pairs = zip(target_graph_ids, context_subgraph_ids)
shuffle(target_context_pairs)
target_graph_ids, context_subgraph_ids = zip(*target_context_pairs)
target_graph_ids = np.array(target_graph_ids, dtype=np.int32)
context_subgraph_ids = np.array(context_subgraph_ids, dtype=np.int32)
contextword_outputs = np.reshape(context_subgraph_ids, [len(context_subgraph_ids), 1])
return target_graph_ids, contextword_outputs
| 43.54955
| 137
| 0.672735
|
d6ba9d41d36cd22f3fbe7696457900a00bbeaaab
| 1,198
|
py
|
Python
|
app/core/events.py
|
tuilagio/project1_be
|
8b3760a9d4255d71b91f9a8013ac7d8117513191
|
[
"MIT"
] | null | null | null |
app/core/events.py
|
tuilagio/project1_be
|
8b3760a9d4255d71b91f9a8013ac7d8117513191
|
[
"MIT"
] | null | null | null |
app/core/events.py
|
tuilagio/project1_be
|
8b3760a9d4255d71b91f9a8013ac7d8117513191
|
[
"MIT"
] | null | null | null |
from typing import Callable
from fastapi import FastAPI
from loguru import logger
from app.db.events import close_db_connection, connect_to_db
from app.core.config import settings
from O365 import Account, MSGraphProtocol
CLIENT_ID = '3a9eef7d-ab34-45f1-a9fd-3780564d7a2e'
SECRET_ID = '<your secret id>'
SECRET_VALUE = 'kpiQvG6_4ovz05n4c7Sn8.KOZE0rT.21s_'
credentials = (CLIENT_ID, SECRET_VALUE)
scopes = [
'https://graph.microsoft.com/Mail.ReadWrite',
'https://graph.microsoft.com/Mail.Send',
'https://graph.microsoft.com/Calendars.ReadWrite',
'offline_access'
]
def create_start_app_handler(app: FastAPI) -> Callable: # type: ignore
async def start_app() -> None:
await connect_to_db(app)
# protocol = MSGraphProtocol()
# account = Account(credentials, protocol=protocol)
# if account.authenticate(scopes=scopes):
# print('Authenticated!')
# settings.o365_account = account
# else:
# print("Auth O365 failed!")
return start_app
def create_stop_app_handler(app: FastAPI) -> Callable: # type: ignore
@logger.catch
async def stop_app() -> None:
await close_db_connection(app)
return stop_app
| 27.227273
| 71
| 0.714524
|
8518aba8fba41aef96c552965bf816e732bc183d
| 3,790
|
py
|
Python
|
MIT/ps4.py
|
mrouhi13/my-mit-python-practice
|
f3b29418576fec54d3f9f55155aa8f2096ad974a
|
[
"MIT"
] | null | null | null |
MIT/ps4.py
|
mrouhi13/my-mit-python-practice
|
f3b29418576fec54d3f9f55155aa8f2096ad974a
|
[
"MIT"
] | null | null | null |
MIT/ps4.py
|
mrouhi13/my-mit-python-practice
|
f3b29418576fec54d3f9f55155aa8f2096ad974a
|
[
"MIT"
] | null | null | null |
# Problem Set 4
# Name:
# Collaborators:
# Time:
#
# Problem 1
#
def nestEggFixed(salary, save, growthRate, years):
"""
- salary: the amount of money you make each year.
- save: the percent of your salary to save in the investment account each
year (an integer between 0 and 100).
- growthRate: the annual percent increase in your investment account (an
integer between 0 and 100).
- years: the number of years to work.
- return: a list whose values are the size of your retirement account at
the end of each year.
"""
# TODO: Your code here.
def testNestEggFixed():
salary = 10000
save = 10
growthRate = 15
years = 5
savingsRecord = nestEggFixed(salary, save, growthRate, years)
print savingsRecord
# Output should have values close to:
# [1000.0, 2150.0, 3472.5, 4993.375, 6742.3812499999995]
# TODO: Add more test cases here.
#
# Problem 2
#
def nestEggVariable(salary, save, growthRates):
# TODO: Your code here.
"""
- salary: the amount of money you make each year.
- save: the percent of your salary to save in the investment account each
year (an integer between 0 and 100).
- growthRate: a list of the annual percent increases in your investment
account (integers between 0 and 100).
- return: a list of your retirement account value at the end of each year.
"""
def testNestEggVariable():
salary = 10000
save = 10
growthRates = [3, 4, 5, 0, 3]
savingsRecord = nestEggVariable(salary, save, growthRates)
print savingsRecord
# Output should have values close to:
# [1000.0, 2040.0, 3142.0, 4142.0, 5266.2600000000002]
# TODO: Add more test cases here.
#
# Problem 3
#
def postRetirement(savings, growthRates, expenses):
"""
- savings: the initial amount of money in your savings account.
- growthRate: a list of the annual percent increases in your investment
account (an integer between 0 and 100).
- expenses: the amount of money you plan to spend each year during
retirement.
- return: a list of your retirement account value at the end of each year.
"""
# TODO: Your code here.
def testPostRetirement():
savings = 100000
growthRates = [10, 5, 0, 5, 1]
expenses = 30000
savingsRecord = postRetirement(savings, growthRates, expenses)
print savingsRecord
# Output should have values close to:
# [80000.000000000015, 54000.000000000015, 24000.000000000015,
# -4799.9999999999854, -34847.999999999985]
# TODO: Add more test cases here.
#
# Problem 4
#
def findMaxExpenses(salary, save, preRetireGrowthRates, postRetireGrowthRates,
epsilon):
"""
- salary: the amount of money you make each year.
- save: the percent of your salary to save in the investment account each
year (an integer between 0 and 100).
- preRetireGrowthRates: a list of annual growth percentages on investments
while you are still working.
- postRetireGrowthRates: a list of annual growth percentages on investments
while you are retired.
- epsilon: an upper bound on the absolute value of the amount remaining in
the investment fund at the end of retirement.
"""
# TODO: Your code here.
def testFindMaxExpenses():
salary = 10000
save = 10
preRetireGrowthRates = [3, 4, 5, 0, 3]
postRetireGrowthRates = [10, 5, 0, 5, 1]
epsilon = .01
expenses = findMaxExpenses(salary, save, preRetireGrowthRates,
postRetireGrowthRates, epsilon)
print expenses
# Output should have a value close to:
# 1229.95548986
# TODO: Add more test cases here.
| 31.583333
| 79
| 0.658311
|
d041291a6ee314b9154fa84097aaca98d711a1c6
| 5,301
|
py
|
Python
|
raspberrypi/files/home/pi/watchdog.py
|
groovenauts/SmartShoppingNavigator
|
510dcea622d81fa40cafee3b30e5975be7712f54
|
[
"MIT"
] | 31
|
2018-05-10T01:51:46.000Z
|
2021-09-02T16:02:03.000Z
|
raspberrypi/files/home/pi/watchdog.py
|
groovenauts/SmartShoppingNavigator
|
510dcea622d81fa40cafee3b30e5975be7712f54
|
[
"MIT"
] | 18
|
2018-05-02T11:35:36.000Z
|
2019-11-27T13:44:14.000Z
|
raspberrypi/files/home/pi/watchdog.py
|
groovenauts/SmartShoppingNavigator
|
510dcea622d81fa40cafee3b30e5975be7712f54
|
[
"MIT"
] | 14
|
2018-05-11T04:40:58.000Z
|
2020-09-11T16:17:08.000Z
|
import sys
import time
import base64
import json
import datetime
import jwt
import requests
import picamera
def create_jwt(project_id, private_key_file, algorithm):
"""Creates a JWT (https://jwt.io) to establish an MQTT connection.
Args:
project_id: The cloud project ID this device belongs to
private_key_file: A path to a file containing either an RSA256 or
ES256 private key.
algorithm: The encryption algorithm to use. Either 'RS256' or 'ES256'
Returns:
An MQTT generated from the given project_id and private key, which
expires in 20 minutes. After 20 minutes, your client will be
disconnected, and a new JWT will have to be generated.
Raises:
ValueError: If the private_key_file does not contain a known key.
"""
token = {
# The time that the token was issued at
'iat': datetime.datetime.utcnow(),
# The time the token expires.
'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=60),
# The audience field should always be set to the GCP project id.
'aud': project_id
}
# Read the private key file.
with open(private_key_file, 'r') as f:
private_key = f.read()
return jwt.encode(token, private_key, algorithm=algorithm).decode("utf-8")
def capture(camera):
file = "/tmp/image.jpg"
camera.capture(file)
with open(file, "rb") as f:
buf = f.read()
buf = base64.urlsafe_b64encode(buf)
return buf.decode("utf-8")
def upload_image(project_id, location, registry, device, jwt, b64_buf):
headers = {
"Authorization": "Bearer {}".format(jwt),
"Content-Type" : "application/json",
"Cache-Control": "no-cache"
}
url = "https://cloudiotdevice.googleapis.com/v1/projects/{}/locations/{}/registries/{}/devices/{}:publishEvent".format(project_id, location, registry, device)
data = { "binaryData": b64_buf }
data = json.dumps(data).encode("utf-8")
res = requests.post(url, data=data, headers=headers)
print("POST HTTP Code={}".format(res.status_code))
if res.status_code != 200:
print(res.json())
filename = time.strftime("/tmp/failure_image_%Y%M%d_%H%M%S.jpg")
with open(filename, "wb") as f:
f.write(base64.urlsafe_b64decode(b64_buf))
print("Saved failed image to {}".format(filename))
def capture_and_upload(camera, project_id, location, registry, device, private_key):
buf = capture(camera)
jwt = create_jwt(project_id, private_key, "ES256")
upload_image(project_id, location, registry, device, jwt, buf)
def get_config(
version, base_url, project_id, cloud_region, registry_id,
device_id, private_key):
jwt = create_jwt(project_id, private_key, "ES256")
headers = {
'authorization': 'Bearer {}'.format(jwt),
'content-type': 'application/json',
'cache-control': 'no-cache'
}
basepath = '{}/projects/{}/locations/{}/registries/{}/devices/{}/'
template = basepath + 'config?local_version={}'
config_url = template.format(
base_url, project_id, cloud_region, registry_id, device_id, version)
resp = requests.get(config_url, headers=headers)
if (resp.status_code != 200):
print('Error getting config: {}, retrying'.format(resp.status_code))
raise AssertionError('Not OK response: {}'.format(resp.status_code))
return resp
def main(argv):
_, project_id, location, registry, device, private_key = argv
camera = picamera.PiCamera()
camera.resolution = (600, 360)
camera.brightness = 60
camera.hflip = False
camera.vflip = False
version = "0"
config_interval = 1
capture_interval = 60
last_captured_at = time.time()
while True:
res = get_config(
version, "https://cloudiotdevice.googleapis.com/v1",
project_id, location, registry, device, private_key)
res = res.json()
if version != res["version"]:
version = res["version"]
binary = res["binaryData"]
buf = base64.urlsafe_b64decode(binary).decode("utf-8")
print("Configuration update: {}".format(buf))
config = json.loads(buf)
config_interval = config.get("config_interval", 1)
capture_interval = config.get("capture_interval", 60)
camera.hflip = config.get("camera_hflip", False)
camera.vflip = config.get("camera_vflip", False)
camera.brightness = config.get("camera_brightness", 60)
camera.sharpness = config.get("camera_sharpness", 0)
camera.contrast = config.get("camera_contrast", 0)
camera.iso = config.get("camera_iso", 0)
url = config.get("dashboard_url")
if url:
with open("/tmp/dashboard_url", "w") as f:
f.write(url)
if time.time() - last_captured_at > capture_interval:
capture_and_upload(camera, project_id, location, registry, device, private_key)
print("Still image captured.")
last_captured_at = time.time()
time.sleep(config_interval)
main(sys.argv)
| 38.413043
| 162
| 0.627806
|
11ec09bdbe6446924ac6ee3d0175f876e635821e
| 1,266
|
py
|
Python
|
tests/test_calc_batch_size.py
|
enpaul/peewee-plus
|
8076d8cd292e3d7b0f8d0b79abdfa4e623b3bb45
|
[
"MIT"
] | 1
|
2021-11-29T22:11:42.000Z
|
2021-11-29T22:11:42.000Z
|
tests/test_calc_batch_size.py
|
enpaul/peewee-plus
|
8076d8cd292e3d7b0f8d0b79abdfa4e623b3bb45
|
[
"MIT"
] | null | null | null |
tests/test_calc_batch_size.py
|
enpaul/peewee-plus
|
8076d8cd292e3d7b0f8d0b79abdfa4e623b3bb45
|
[
"MIT"
] | null | null | null |
# pylint: disable=redefined-outer-name
# pylint: disable=missing-class-docstring
# pylint: disable=too-few-public-methods
# pylint: disable=unused-import
import peewee
import peewee_plus
from .fixtures import fakedb
def test_sqlite(fakedb):
"""Test the calculation of batch sizes on SQLite"""
class TestModel(peewee.Model):
class Meta:
database = fakedb
data = peewee.IntegerField()
models = [TestModel(item) for item in range(500)]
assert (
peewee_plus.calc_batch_size(models) <= peewee_plus.SQLITE_DEFAULT_VARIABLE_LIMIT
)
assert peewee_plus.calc_batch_size(models) < len(models)
assert peewee_plus.calc_batch_size([]) == 0
def test_non_sqlite():
"""Test the calculation of batch sizes on non-SQLite"""
class TestModel(peewee.Model):
class Meta:
database = peewee.DatabaseProxy()
data = peewee.IntegerField()
# Three is just chosen as an arbitrary multiplier to ensure the value is larger than the
# sqlite variable limit
assert peewee_plus.calc_batch_size(
[
TestModel(item)
for item in range(peewee_plus.SQLITE_DEFAULT_VARIABLE_LIMIT * 3)
]
) == (peewee_plus.SQLITE_DEFAULT_VARIABLE_LIMIT * 3)
| 27.521739
| 92
| 0.687204
|
feb99509e9f05999fa11b14a6dd79d8f5213e3f2
| 3,415
|
py
|
Python
|
ritpytrading/orders.py
|
tjsavage/ritpytrading
|
ead3ec11951b24b74cd8031c490e5c1c19718e4d
|
[
"Apache-2.0"
] | null | null | null |
ritpytrading/orders.py
|
tjsavage/ritpytrading
|
ead3ec11951b24b74cd8031c490e5c1c19718e4d
|
[
"Apache-2.0"
] | null | null | null |
ritpytrading/orders.py
|
tjsavage/ritpytrading
|
ead3ec11951b24b74cd8031c490e5c1c19718e4d
|
[
"Apache-2.0"
] | null | null | null |
'''
order return object attributes
param possible order attributes: JSON formatted
i.e. get_order_response( ses, url_end, param="order_id" )
{
"order_id": 1221,
"period": 1,
"tick": 10,
"trader_id": "trader49",
"ticker": "CRZY",
"type": "LIMIT",
"quantity": 100,
"action": "BUY",
"price": 14.21,
"quantity_filled": 10,
"vwap": 14.21,
"status": "OPEN"
}
'''
# Make sure the RIT client uses the same 9999 port
host_url = 'http://localhost:9999'
base_path = '/v1'
base_url = host_url + base_path
class ApiException(Exception):
""" to print error messages and stop the program when needed """
pass
class Order():
""" order_response is a json obj returned from the API get request """
def __init__(self, order_response):
self.order_id = order_response["order_id"]
self.period = order_response["period"]
self.tick = order_response["tick"]
self.trader_id = order_response["trader_id"]
self.ticker = order_response["ticker"]
self.type = order_response["type"]
self.quantity = order_response["quantity"]
self.action = order_response["action"]
self.price = order_response["price"]
self.quantity_filled = order_response["quantity_filled"]
self.vwap = order_response["vwap"]
self.status = order_response["status"]
def __repr__(self):
return (self.action + '_' + str(self.quantity) + '_'
+ self.ticker + '_' + str(self.price) + '__' + str(self.order_id))
def __eq__(self, other):
return self.__dict__ == other.__dict__
def _get_orders_json(ses, url_end, order_status='OPEN', order_id=None):
""" function requires a requests.Session() object
as the ses argument with a loaded API_KEY
order status can be OPEN, TRANSACTED or CANCELLED
Json return mode is set to 0/Off by default
"""
# to query all orders
if url_end == '/orders':
payload = {'status': order_status}
response = ses.get((base_url + url_end), params=payload)
# to query just one order
elif url_end == '/orders/{}':
response = ses.get((base_url + url_end).format(order_id))
if response.ok:
orders_json = response.json()
# Return orders json output unformatted
return orders_json
raise ApiException('Authorization Error: Please check API key.')
def _orders_response_handle(orders_json, url_end):
if url_end == '/orders/{}':
orders_obj = Order(orders_json)
return orders_obj
if url_end == '/orders':
orders_dict = {(Order(ord)).order_id: Order(ord)
for ord in orders_json}
return orders_dict
def order(ses, orderId, status='OPEN'):
""" status can be OPEN, TRANSACTED or CLOSED
status OPEN by default
returns a Order object of the order class given an order id
"""
return _orders_response_handle(_get_orders_json(
ses, '/orders/{}', status, order_id=orderId), '/orders/{}')
def orders_json(ses, status='OPEN'):
""" returns all the attribs of all orders in a json type list format """
return _get_orders_json(ses, '/orders', status, order_id=None)
def orders_dict(ses, status='OPEN'):
""" returns all the orders as a dict with the order_ids as key """
return _orders_response_handle(_get_orders_json(
ses, '/orders', status, order_id=None), '/orders')
| 34.15
| 82
| 0.648023
|
41dcf5700603ddc8f82c4fc9b14d035b982084b4
| 10,736
|
py
|
Python
|
baselines/her/experiment/config.py
|
flowersteam/curious
|
05f0ce053c3614688c256ec34b08060808e62d81
|
[
"MIT"
] | 27
|
2019-05-30T16:45:34.000Z
|
2022-03-31T06:22:07.000Z
|
baselines/her/experiment/config.py
|
flowersteam/curious
|
05f0ce053c3614688c256ec34b08060808e62d81
|
[
"MIT"
] | 1
|
2019-06-18T18:57:10.000Z
|
2019-08-16T00:17:50.000Z
|
baselines/her/experiment/config.py
|
flowersteam/curious
|
05f0ce053c3614688c256ec34b08060808e62d81
|
[
"MIT"
] | 5
|
2019-08-11T23:29:53.000Z
|
2021-02-24T14:11:58.000Z
|
import numpy as np
import gym
import gym_flowers
from baselines import logger
from baselines.her.ddpg import DDPG
from baselines.her.her import make_sample_her_transitions, make_sample_multi_task_her_transitions
from baselines.her.util import import_function
from baselines.her.replay_buffer import ReplayBuffer
DEFAULT_ENV_PARAMS = {
'FetchReach-v1': {
'n_cycles': 10,
},
}
DEFAULT_PARAMS = {
# env
'max_u': 1., # max absolute value of actions on different coordinates
# ddpg
'layers': 3, # number of layers in the critic/actor networks
'hidden': 256, # number of neurons in each hidden layers
'network_class': 'baselines.her.actor_critic:ActorCritic',
'Q_lr': 0.001, # critic learning rate
'pi_lr': 0.001, # actor learning rate
'buffer_size': int(1E6), # for experience replay
'polyak': 0.95, # polyak averaging coefficient
'action_l2': 1.0, # quadratic penalty on actions (before rescaling by max_u)
'clip_obs': 200.,
'scope': 'ddpg', # can be tweaked for testing
'relative_goals': False,
# training
'n_cycles': 25, # per epoch
'rollout_batch_size': 2, # per mpi thread
'n_batches': 100, # training batches per cycle
'batch_size': 256, # per mpi thread, measured in transitions and reduced to even multiple of chunk_length.
'n_test_rollouts': 5, # number of test rollouts per epoch, each consists of rollout_batch_size rollouts
'test_with_polyak': False, # run test episodes with the target network
# exploration
'random_eps': 0.3, # percentage of time a random action is taken
'noise_eps': 0.2, # std of gaussian noise added to not-completely-random actions as a percentage of max_u
# HER
'her_replay_k': 4, # number of additional goals used for replay, only used if off_policy_data=future
# normalization
'norm_eps': 0.01, # epsilon used for observation normalization
'norm_clip': 5, # normalized observations are cropped to this values
'her_sampling_func': 'baselines.her.her:make_sample_her_transitions',
'queue_length': 200,
}
MULTI_TASK_PARAMS = {
# env
'max_u': 1., # max absolute value of actions on different coordinates
# ddpg
'layers': 3, # number of layers in the critic/actor networks
'hidden': 256, # number of neurons in each hidden layers
'network_class': 'baselines.her.actor_critic:MultiTaskActorCritic',
'Q_lr': 0.001, # critic learning rate
'pi_lr': 0.001, # actor learning rate
'buffer_size': int(1E6), # for experience replay
'polyak': 0.95, # polyak averaging coefficient
'action_l2': 1.0, # quadratic penalty on actions (before rescaling by max_u)
'clip_obs': 200.,
'scope': 'ddpg', # can be tweaked for testing
'relative_goals': False,
# training
'n_cycles': 25, # per epoch
'rollout_batch_size': 2, # per mpi thread
'n_batches': 100, #40, # training batches per cycle
'batch_size': 256, # per mpi thread, measured in transitions and reduced to even multiple of chunk_length.
'n_test_rollouts': 5, # number of test rollouts per epoch, each consists of rollout_batch_size rollouts
'test_with_polyak': False, # run test episodes with the target network
# exploration
'random_eps': 0.3, # percentage of time a random action is taken
'noise_eps': 0.2, # std of gaussian noise added to not-completely-random actions as a percentage of max_u
# HER
'her_replay_k': 4, # number of additional goals used for replay, only used if off_policy_data=future
# normalization
'norm_eps': 0.01, # epsilon used for observation normalization
'norm_clip': 5, # normalized observations are cropped to this values
'her_sampling_func': 'baselines.her.her:make_sample_multi_task_her_transitions',
'queue_length': 300, # length of queue for computation of competence
'eps_task': 0.4 # epsilon greedy parameter for active task choice
}
CACHED_ENVS = {}
def cached_make_env(make_env):
"""
Only creates a new environment from the provided function if one has not yet already been
created. This is useful here because we need to infer certain properties of the env, e.g.
its observation and action spaces, without any intend of actually using it.
"""
if make_env not in CACHED_ENVS:
env = make_env()
CACHED_ENVS[make_env] = env
return CACHED_ENVS[make_env]
def prepare_params(kwargs):
# DDPG params
ddpg_params = dict()
env_name = kwargs['env_name']
def make_env():
return gym.make(env_name)
kwargs['make_env'] = make_env
tmp_env = cached_make_env(kwargs['make_env'])
if kwargs['structure'] == 'flat':
tmp_env.unwrapped.set_flat_env()
kwargs['nb_tasks'] = tmp_env.unwrapped.nb_tasks
kwargs['tasks_g_id'] = tmp_env.unwrapped.tasks_g_id
kwargs['tasks_ag_id'] = tmp_env.unwrapped.tasks_ag_id
assert hasattr(tmp_env, '_max_episode_steps')
kwargs['T'] = tmp_env._max_episode_steps
tmp_env.reset()
kwargs['max_u'] = np.array(kwargs['max_u']) if isinstance(kwargs['max_u'], list) else kwargs['max_u']
kwargs['gamma'] = 1. - 1. / kwargs['T']
if 'lr' in kwargs:
kwargs['pi_lr'] = kwargs['lr']
kwargs['Q_lr'] = kwargs['lr']
del kwargs['lr']
for name in ['hidden', 'layers',
'network_class',
'polyak',
'batch_size', 'Q_lr', 'pi_lr',
'norm_eps', 'norm_clip', 'max_u',
'action_l2', 'clip_obs', 'scope', 'relative_goals']:
ddpg_params[name] = kwargs[name]
kwargs['_' + name] = kwargs[name]
del kwargs[name]
kwargs['ddpg_params'] = ddpg_params
# if kwargs['num_cpu'] == 1:
# # make more test rollout when number of cpu is lower (to get a better estimate)
# kwargs['n_test_rollouts'] = 100
return kwargs
def log_params(params, logger=logger):
for key in sorted(params.keys()):
logger.info('{}: {}'.format(key, params[key]))
def configure_her(params):
env = cached_make_env(params['make_env'])
env.reset()
if params['structure'] == 'flat':
env.unwrapped.set_flat_env()
def reward_fun(ag_2, g, task_descr, info): # vectorized
return env.unwrapped.compute_reward(achieved_goal=ag_2, goal=g, task_descr=task_descr, info=info)
# Prepare configuration for HER.
her_params = {
'reward_fun': reward_fun,
'tasks_ag_id': params['tasks_ag_id'],
'tasks_g_id': params['tasks_g_id'],
'goal_replay': params['goal_replay'],
'her_replay_k': params['her_replay_k'],
'task_replay': params['task_replay']
}
her_sampling_func = import_function(params['her_sampling_func'])
sample_her_transitions = her_sampling_func(**her_params)
return sample_her_transitions
def simple_goal_subtract(a, b):
assert a.shape == b.shape
return a - b
def dims_to_shapes(input_dims):
return {key: tuple([val]) if val > 0 else tuple() for key, val in input_dims.items()}
def configure_buffer(dims, params):
T = params['T']
structure = params['structure']
buffer_size = params['buffer_size']
rollout_batch_size = params['rollout_batch_size']
task_replay = params['task_replay']
sample_her_transitions = configure_her(params)
input_shapes = dims_to_shapes(dims)
dimg = dims['g']
dimag = dims['ag']
if structure == 'curious' or structure == 'task_experts':
dimtask_descr = dims['task_descr']
# Configure the replay buffer.
buffer_shapes = {key: (T if key != 'o' else T + 1, *input_shapes[key])
for key, val in input_shapes.items()}
buffer_shapes['g'] = (buffer_shapes['g'][0], dimg)
buffer_shapes['ag'] = (T + 1, dimag)
buffer_size = (buffer_size // rollout_batch_size) * rollout_batch_size
# addition for goal module selection
buffer_shapes['task_descr'] = (buffer_shapes['g'][0], dimtask_descr)
buffer_shapes['change'] = (buffer_shapes['g'][0], dimag)
if 'buffer' in task_replay:
# use several buffer, each corresponds to a task, the first corresponds to transition where no outcome moved.
buffers = [ReplayBuffer(buffer_shapes, buffer_size, T, sample_her_transitions) for i in range(params['nb_tasks'] + 1)]
else:
buffers = ReplayBuffer(buffer_shapes, buffer_size, T, sample_her_transitions)
return buffers
def configure_ddpg(dims, params, buffers, reuse=False, use_mpi=True, clip_return=True, t_id=None):
sample_her_transitions = configure_her(params)
# Extract relevant parameters.
gamma = params['gamma']
rollout_batch_size = params['rollout_batch_size']
ddpg_params = params['ddpg_params']
input_dims = dims.copy()
# DDPG agent
env = cached_make_env(params['make_env'])
env.reset()
ddpg_params.update({'input_dims': input_dims, # agent takes an input observations
'T': params['T'],
'clip_pos_returns': True, # clip positive returns
'clip_return': (1. / (1. - gamma)) if clip_return else np.inf, # max abs of return
'rollout_batch_size': rollout_batch_size,
'subtract_goals': simple_goal_subtract,
'sample_transitions': sample_her_transitions,
'gamma': gamma,
'task_replay': params['task_replay'],
'structure': params['structure'],
'tasks_ag_id': params['tasks_ag_id'],
'tasks_g_id': params['tasks_g_id'],
'eps_task': params['eps_task']
})
if t_id is not None:
# give task id to rollout worker in the case of multiple task-experts
ddpg_params.update({'t_id':t_id})
ddpg_params['info'] = {
'env_name': params['env_name'],
}
policy = DDPG(reuse=reuse, **ddpg_params, buffers=buffers, use_mpi=use_mpi)
return policy
def configure_dims(params):
env = cached_make_env(params['make_env'])
info = env.unwrapped.info
dims = {
'o': env.observation_space.spaces['observation'].shape[0],
'u': env.action_space.shape[0],
'g': env.observation_space.spaces['desired_goal'].shape[0],
'ag': env.observation_space.spaces['achieved_goal'].shape[0]
}
# addition in the case of curious structure
dims['task_descr'] = params['nb_tasks']
for key, value in info.items():
value = np.array(value)
if value.ndim == 0:
value = value.reshape(1)
dims['info_{}'.format(key)] = value.shape[0]
return dims
| 38.898551
| 126
| 0.655086
|
dd4062f2c72b3e52da3618078d5d55541688c8aa
| 1,487
|
py
|
Python
|
observations/r/macro.py
|
hajime9652/observations
|
2c8b1ac31025938cb17762e540f2f592e302d5de
|
[
"Apache-2.0"
] | 199
|
2017-07-24T01:34:27.000Z
|
2022-01-29T00:50:55.000Z
|
observations/r/macro.py
|
hajime9652/observations
|
2c8b1ac31025938cb17762e540f2f592e302d5de
|
[
"Apache-2.0"
] | 46
|
2017-09-05T19:27:20.000Z
|
2019-01-07T09:47:26.000Z
|
observations/r/macro.py
|
hajime9652/observations
|
2c8b1ac31025938cb17762e540f2f592e302d5de
|
[
"Apache-2.0"
] | 45
|
2017-07-26T00:10:44.000Z
|
2022-03-16T20:44:59.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def macro(path):
"""Macroeconomic Data
Selected macroeconomic indicators for Austria, Belgium, Canada, Denmark,
Finland, France, Italy, Japan, the Netherlands, Norway, Sweden, the
United Kingdom, the United States, and West Germany for the period
1966-1990.
A table containing 6 variables ("country", "year", "gdp", "unem",
"capmob", and "trade") and 350 observations.
ICPSR
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `macro.csv`.
Returns:
Tuple of np.ndarray `x_train` with 350 rows and 6 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'macro.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/Zelig/macro.csv'
maybe_download_and_extract(path, url,
save_file_name='macro.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| 28.056604
| 74
| 0.68729
|
3f1ee6ecc7b1530c7dbd481e290eddc7cec81af8
| 158,987
|
py
|
Python
|
cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_infra_syslog_cfg.py
|
tkamata-test/ydk-py
|
b637e7853a8edbbd31fbc05afa3aa4110b31c5f9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_infra_syslog_cfg.py
|
tkamata-test/ydk-py
|
b637e7853a8edbbd31fbc05afa3aa4110b31c5f9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_infra_syslog_cfg.py
|
tkamata-test/ydk-py
|
b637e7853a8edbbd31fbc05afa3aa4110b31c5f9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'LogSeverityEnum' : _MetaInfoEnum('LogSeverityEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg',
{
'emergency':'emergency',
'alert':'alert',
'critical':'critical',
'error':'error',
'warning':'warning',
'notice':'notice',
'informational':'informational',
'debug':'debug',
}, 'Cisco-IOS-XR-infra-syslog-cfg', _yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg']),
'FacilityEnum' : _MetaInfoEnum('FacilityEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg',
{
'kern':'kern',
'user':'user',
'mail':'mail',
'daemon':'daemon',
'auth':'auth',
'syslog':'syslog',
'lpr':'lpr',
'news':'news',
'uucp':'uucp',
'cron':'cron',
'authpriv':'authpriv',
'ftp':'ftp',
'local0':'local0',
'local1':'local1',
'local2':'local2',
'local3':'local3',
'local4':'local4',
'local5':'local5',
'local6':'local6',
'local7':'local7',
'sys9':'sys9',
'sys10':'sys10',
'sys11':'sys11',
'sys12':'sys12',
'sys13':'sys13',
'sys14':'sys14',
}, 'Cisco-IOS-XR-infra-syslog-cfg', _yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg']),
'LogCollectFrequencyEnum' : _MetaInfoEnum('LogCollectFrequencyEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg',
{
'weekly':'weekly',
'daily':'daily',
}, 'Cisco-IOS-XR-infra-syslog-cfg', _yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg']),
'LoggingPrecedenceValueEnum' : _MetaInfoEnum('LoggingPrecedenceValueEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg',
{
'routine':'routine',
'priority':'priority',
'immediate':'immediate',
'flash':'flash',
'flash-override':'flash_override',
'critical':'critical',
'internet':'internet',
'network':'network',
}, 'Cisco-IOS-XR-infra-syslog-cfg', _yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg']),
'LoggingTosEnum' : _MetaInfoEnum('LoggingTosEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg',
{
'precedence':'precedence',
'dscp':'dscp',
}, 'Cisco-IOS-XR-infra-syslog-cfg', _yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg']),
'LoggingLevelsEnum' : _MetaInfoEnum('LoggingLevelsEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg',
{
'emergency':'emergency',
'alert':'alert',
'critical':'critical',
'error':'error',
'warning':'warning',
'notice':'notice',
'info':'info',
'debug':'debug',
'disable':'disable',
}, 'Cisco-IOS-XR-infra-syslog-cfg', _yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg']),
'LoggingPrecedenceEnum' : _MetaInfoEnum('LoggingPrecedenceEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg',
{
'precedence':'precedence',
}, 'Cisco-IOS-XR-infra-syslog-cfg', _yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg']),
'LoggingDscpValueEnum' : _MetaInfoEnum('LoggingDscpValueEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg',
{
'default':'default',
'af11':'af11',
'af12':'af12',
'af13':'af13',
'af21':'af21',
'af22':'af22',
'af23':'af23',
'af31':'af31',
'af32':'af32',
'af33':'af33',
'af41':'af41',
'af42':'af42',
'af43':'af43',
'ef':'ef',
'cs1':'cs1',
'cs2':'cs2',
'cs3':'cs3',
'cs4':'cs4',
'cs5':'cs5',
'cs6':'cs6',
'cs7':'cs7',
}, 'Cisco-IOS-XR-infra-syslog-cfg', _yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg']),
'LogMessageSeverityEnum' : _MetaInfoEnum('LogMessageSeverityEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg',
{
'emergency':'emergency',
'alert':'alert',
'critical':'critical',
'error':'error',
'warning':'warning',
'notice':'notice',
'informational':'informational',
'debug':'debug',
}, 'Cisco-IOS-XR-infra-syslog-cfg', _yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg']),
'TimeInfoEnum' : _MetaInfoEnum('TimeInfoEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg',
{
'disable':'disable',
'enable':'enable',
}, 'Cisco-IOS-XR-infra-syslog-cfg', _yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg']),
'LoggingDscpEnum' : _MetaInfoEnum('LoggingDscpEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg',
{
'dscp':'dscp',
}, 'Cisco-IOS-XR-infra-syslog-cfg', _yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg']),
'SyslogService.Timestamps.Log.LogDatetime.LogDatetimeValue' : {
'meta_info' : _MetaInfoClass('SyslogService.Timestamps.Log.LogDatetime.LogDatetimeValue',
False,
[
_MetaInfoClassMember('msec', REFERENCE_ENUM_CLASS, 'TimeInfoEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'TimeInfoEnum',
[], [],
''' Seconds
''',
'msec',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('time-stamp-value', REFERENCE_ENUM_CLASS, 'TimeInfoEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'TimeInfoEnum',
[], [],
''' Time
''',
'time_stamp_value',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('time-zone', REFERENCE_ENUM_CLASS, 'TimeInfoEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'TimeInfoEnum',
[], [],
''' Timezone
''',
'time_zone',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('year', REFERENCE_ENUM_CLASS, 'TimeInfoEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'TimeInfoEnum',
[], [],
''' Year
''',
'year',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'log-datetime-value',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'SyslogService.Timestamps.Log.LogDatetime' : {
'meta_info' : _MetaInfoClass('SyslogService.Timestamps.Log.LogDatetime',
False,
[
_MetaInfoClassMember('log-datetime-value', REFERENCE_CLASS, 'LogDatetimeValue' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'SyslogService.Timestamps.Log.LogDatetime.LogDatetimeValue',
[], [],
''' Set timestamp for log message
''',
'log_datetime_value',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'log-datetime',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'SyslogService.Timestamps.Log' : {
'meta_info' : _MetaInfoClass('SyslogService.Timestamps.Log',
False,
[
_MetaInfoClassMember('log-datetime', REFERENCE_CLASS, 'LogDatetime' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'SyslogService.Timestamps.Log.LogDatetime',
[], [],
''' Timestamp with date and time
''',
'log_datetime',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('log-timestamp-disable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Disable timestamp log messages
''',
'log_timestamp_disable',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('log-uptime', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Timestamp with systime uptime
''',
'log_uptime',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'log',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'SyslogService.Timestamps.Debug.DebugDatetime.DatetimeValue' : {
'meta_info' : _MetaInfoClass('SyslogService.Timestamps.Debug.DebugDatetime.DatetimeValue',
False,
[
_MetaInfoClassMember('msec', REFERENCE_ENUM_CLASS, 'TimeInfoEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'TimeInfoEnum',
[], [],
''' Seconds
''',
'msec',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('time-stamp-value', REFERENCE_ENUM_CLASS, 'TimeInfoEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'TimeInfoEnum',
[], [],
''' Time
''',
'time_stamp_value',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('time-zone', REFERENCE_ENUM_CLASS, 'TimeInfoEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'TimeInfoEnum',
[], [],
''' Timezone
''',
'time_zone',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('year', REFERENCE_ENUM_CLASS, 'TimeInfoEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'TimeInfoEnum',
[], [],
''' Year
''',
'year',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'datetime-value',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'SyslogService.Timestamps.Debug.DebugDatetime' : {
'meta_info' : _MetaInfoClass('SyslogService.Timestamps.Debug.DebugDatetime',
False,
[
_MetaInfoClassMember('datetime-value', REFERENCE_CLASS, 'DatetimeValue' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'SyslogService.Timestamps.Debug.DebugDatetime.DatetimeValue',
[], [],
''' Set time format for debug msg
''',
'datetime_value',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'debug-datetime',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'SyslogService.Timestamps.Debug' : {
'meta_info' : _MetaInfoClass('SyslogService.Timestamps.Debug',
False,
[
_MetaInfoClassMember('debug-datetime', REFERENCE_CLASS, 'DebugDatetime' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'SyslogService.Timestamps.Debug.DebugDatetime',
[], [],
''' Timestamp with date and time
''',
'debug_datetime',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('debug-timestamp-disable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Disable timestamp debug messages
''',
'debug_timestamp_disable',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('debug-uptime', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Timestamp with systime uptime
''',
'debug_uptime',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'debug',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'SyslogService.Timestamps' : {
'meta_info' : _MetaInfoClass('SyslogService.Timestamps',
False,
[
_MetaInfoClassMember('debug', REFERENCE_CLASS, 'Debug' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'SyslogService.Timestamps.Debug',
[], [],
''' Timestamp debug messages
''',
'debug',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable timestamp debug/log messages
''',
'enable',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('log', REFERENCE_CLASS, 'Log' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'SyslogService.Timestamps.Log',
[], [],
''' Timestamp log messages
''',
'log',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'timestamps',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'SyslogService' : {
'meta_info' : _MetaInfoClass('SyslogService',
False,
[
_MetaInfoClassMember('timestamps', REFERENCE_CLASS, 'Timestamps' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'SyslogService.Timestamps',
[], [],
''' Timestamp debug/log messages configuration
''',
'timestamps',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'syslog-service',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.MonitorLogging.MonitorDiscriminator' : {
'meta_info' : _MetaInfoClass('Syslog.MonitorLogging.MonitorDiscriminator',
False,
[
_MetaInfoClassMember('match1', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set monitor logging match1 discriminator
''',
'match1',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('match2', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set monitor logging match2 discriminator
''',
'match2',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('match3', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set monitor logging match3 discriminator
''',
'match3',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('nomatch1', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set monitor logging no-match1 discriminator
''',
'nomatch1',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('nomatch2', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set monitor logging no-match2 discriminator
''',
'nomatch2',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('nomatch3', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set monitor logging no-match3 discriminator
''',
'nomatch3',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'monitor-discriminator',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.MonitorLogging' : {
'meta_info' : _MetaInfoClass('Syslog.MonitorLogging',
False,
[
_MetaInfoClassMember('logging-level', REFERENCE_ENUM_CLASS, 'LoggingLevelsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'LoggingLevelsEnum',
[], [],
''' Monitor Logging Level
''',
'logging_level',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('monitor-discriminator', REFERENCE_CLASS, 'MonitorDiscriminator' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.MonitorLogging.MonitorDiscriminator',
[], [],
''' Set monitor logging discriminators
''',
'monitor_discriminator',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'monitor-logging',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.HistoryLogging' : {
'meta_info' : _MetaInfoClass('Syslog.HistoryLogging',
False,
[
_MetaInfoClassMember('history-size', ATTRIBUTE, 'int' , None, None,
[('1', '500')], [],
''' Logging history size
''',
'history_size',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('logging-level', REFERENCE_ENUM_CLASS, 'LoggingLevelsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'LoggingLevelsEnum',
[], [],
''' History logging level
''',
'logging_level',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'history-logging',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.LoggingFacilities' : {
'meta_info' : _MetaInfoClass('Syslog.LoggingFacilities',
False,
[
_MetaInfoClassMember('facility-level', REFERENCE_ENUM_CLASS, 'FacilityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'FacilityEnum',
[], [],
''' Facility from which logging is done
''',
'facility_level',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'logging-facilities',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.TrapLogging' : {
'meta_info' : _MetaInfoClass('Syslog.TrapLogging',
False,
[
_MetaInfoClassMember('logging-level', REFERENCE_ENUM_CLASS, 'LoggingLevelsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'LoggingLevelsEnum',
[], [],
''' Trap logging level
''',
'logging_level',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'trap-logging',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.BufferedLogging.BufferedDiscriminator' : {
'meta_info' : _MetaInfoClass('Syslog.BufferedLogging.BufferedDiscriminator',
False,
[
_MetaInfoClassMember('match1', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set buffered logging match1 discriminator
''',
'match1',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('match2', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set buffered logging match2 discriminator
''',
'match2',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('match3', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set buffered logging match3 discriminator
''',
'match3',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('nomatch1', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set buffered logging no-match1 discriminator
''',
'nomatch1',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('nomatch2', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set buffered logging no-match2 discriminator
''',
'nomatch2',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('nomatch3', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set buffered logging no-match3 discriminator
''',
'nomatch3',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'buffered-discriminator',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.BufferedLogging' : {
'meta_info' : _MetaInfoClass('Syslog.BufferedLogging',
False,
[
_MetaInfoClassMember('buffer-size', ATTRIBUTE, 'int' , None, None,
[('4096', '4294967295')], [],
''' Logging buffered size
''',
'buffer_size',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('buffered-discriminator', REFERENCE_CLASS, 'BufferedDiscriminator' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.BufferedLogging.BufferedDiscriminator',
[], [],
''' Set buffered logging discriminators
''',
'buffered_discriminator',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('logging-level', REFERENCE_ENUM_CLASS, 'LoggingLevelsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'LoggingLevelsEnum',
[], [],
''' Logging level for Buffered logging
''',
'logging_level',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'buffered-logging',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.HostServer.Vrfs.Vrf.Ipv6S.Ipv6.Ipv6SeverityPort' : {
'meta_info' : _MetaInfoClass('Syslog.HostServer.Vrfs.Vrf.Ipv6S.Ipv6.Ipv6SeverityPort',
False,
[
_MetaInfoClassMember('port', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' Port for the logging host
''',
'port',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('severity', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' Severity for the logging host
''',
'severity',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'ipv6-severity-port',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.HostServer.Vrfs.Vrf.Ipv6S.Ipv6.Ipv6Discriminator' : {
'meta_info' : _MetaInfoClass('Syslog.HostServer.Vrfs.Vrf.Ipv6S.Ipv6.Ipv6Discriminator',
False,
[
_MetaInfoClassMember('match1', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set IPv6 logging match1 discriminator
''',
'match1',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('match2', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set IPv6 logging match2 discriminator
''',
'match2',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('match3', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set IPv6 logging match3 discriminator
''',
'match3',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('nomatch1', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set IPv6 logging no-match1 discriminator
''',
'nomatch1',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('nomatch2', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set IPv6 logging no-match2 discriminator
''',
'nomatch2',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('nomatch3', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set IPv6 logging no-match3 discriminator
''',
'nomatch3',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'ipv6-discriminator',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.HostServer.Vrfs.Vrf.Ipv6S.Ipv6.Ipv6SeverityLevels.Ipv6SeverityLevel' : {
'meta_info' : _MetaInfoClass('Syslog.HostServer.Vrfs.Vrf.Ipv6S.Ipv6.Ipv6SeverityLevels.Ipv6SeverityLevel',
False,
[
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'LogSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'LogSeverityEnum',
[], [],
''' Severity for the logging host
''',
'severity',
'Cisco-IOS-XR-infra-syslog-cfg', True),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'ipv6-severity-level',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.HostServer.Vrfs.Vrf.Ipv6S.Ipv6.Ipv6SeverityLevels' : {
'meta_info' : _MetaInfoClass('Syslog.HostServer.Vrfs.Vrf.Ipv6S.Ipv6.Ipv6SeverityLevels',
False,
[
_MetaInfoClassMember('ipv6-severity-level', REFERENCE_LIST, 'Ipv6SeverityLevel' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.HostServer.Vrfs.Vrf.Ipv6S.Ipv6.Ipv6SeverityLevels.Ipv6SeverityLevel',
[], [],
''' Severity for the logging host
''',
'ipv6_severity_level',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'ipv6-severity-levels',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.HostServer.Vrfs.Vrf.Ipv6S.Ipv6' : {
'meta_info' : _MetaInfoClass('Syslog.HostServer.Vrfs.Vrf.Ipv6S.Ipv6',
False,
[
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv6 address of the logging host
''',
'address',
'Cisco-IOS-XR-infra-syslog-cfg', True),
_MetaInfoClassMember('ipv6-discriminator', REFERENCE_CLASS, 'Ipv6Discriminator' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.HostServer.Vrfs.Vrf.Ipv6S.Ipv6.Ipv6Discriminator',
[], [],
''' Set IPv6 logging discriminators
''',
'ipv6_discriminator',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('ipv6-severity-levels', REFERENCE_CLASS, 'Ipv6SeverityLevels' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.HostServer.Vrfs.Vrf.Ipv6S.Ipv6.Ipv6SeverityLevels',
[], [],
''' Severity container of the logging host
''',
'ipv6_severity_levels',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('ipv6-severity-port', REFERENCE_CLASS, 'Ipv6SeverityPort' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.HostServer.Vrfs.Vrf.Ipv6S.Ipv6.Ipv6SeverityPort',
[], [],
''' Severity/Port for the logging host
''',
'ipv6_severity_port',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'ipv6',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.HostServer.Vrfs.Vrf.Ipv6S' : {
'meta_info' : _MetaInfoClass('Syslog.HostServer.Vrfs.Vrf.Ipv6S',
False,
[
_MetaInfoClassMember('ipv6', REFERENCE_LIST, 'Ipv6' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.HostServer.Vrfs.Vrf.Ipv6S.Ipv6',
[], [],
''' IPv6 address of the logging host
''',
'ipv6',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'ipv6s',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.HostServer.Vrfs.Vrf.Hosts.Host.HostNameSeverities.HostNameSeverity' : {
'meta_info' : _MetaInfoClass('Syslog.HostServer.Vrfs.Vrf.Hosts.Host.HostNameSeverities.HostNameSeverity',
False,
[
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'LogSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'LogSeverityEnum',
[], [],
''' Severity for the logging host
''',
'severity',
'Cisco-IOS-XR-infra-syslog-cfg', True),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'host-name-severity',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.HostServer.Vrfs.Vrf.Hosts.Host.HostNameSeverities' : {
'meta_info' : _MetaInfoClass('Syslog.HostServer.Vrfs.Vrf.Hosts.Host.HostNameSeverities',
False,
[
_MetaInfoClassMember('host-name-severity', REFERENCE_LIST, 'HostNameSeverity' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.HostServer.Vrfs.Vrf.Hosts.Host.HostNameSeverities.HostNameSeverity',
[], [],
''' Severity for the logging host
''',
'host_name_severity',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'host-name-severities',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.HostServer.Vrfs.Vrf.Hosts.Host.HostNameDiscriminator' : {
'meta_info' : _MetaInfoClass('Syslog.HostServer.Vrfs.Vrf.Hosts.Host.HostNameDiscriminator',
False,
[
_MetaInfoClassMember('match1', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set hostname logging match1 discriminator
''',
'match1',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('match2', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set hostname logging match2 discriminator
''',
'match2',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('match3', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set hostname logging match3 discriminator
''',
'match3',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('nomatch1', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set hostname logging no-match1
discriminator
''',
'nomatch1',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('nomatch2', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set hostname logging no-match2
discriminator
''',
'nomatch2',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('nomatch3', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set hostname logging no-match3
discriminator
''',
'nomatch3',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'host-name-discriminator',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.HostServer.Vrfs.Vrf.Hosts.Host.HostSeverityPort' : {
'meta_info' : _MetaInfoClass('Syslog.HostServer.Vrfs.Vrf.Hosts.Host.HostSeverityPort',
False,
[
_MetaInfoClassMember('port', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' Port for the logging host
''',
'port',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('severity', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' Severity for the logging host
''',
'severity',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'host-severity-port',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.HostServer.Vrfs.Vrf.Hosts.Host' : {
'meta_info' : _MetaInfoClass('Syslog.HostServer.Vrfs.Vrf.Hosts.Host',
False,
[
_MetaInfoClassMember('host-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Name of the logging host
''',
'host_name',
'Cisco-IOS-XR-infra-syslog-cfg', True),
_MetaInfoClassMember('host-name-discriminator', REFERENCE_CLASS, 'HostNameDiscriminator' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.HostServer.Vrfs.Vrf.Hosts.Host.HostNameDiscriminator',
[], [],
''' Set Hostname logging discriminators
''',
'host_name_discriminator',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('host-name-severities', REFERENCE_CLASS, 'HostNameSeverities' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.HostServer.Vrfs.Vrf.Hosts.Host.HostNameSeverities',
[], [],
''' Severity container of the logging host
''',
'host_name_severities',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('host-severity-port', REFERENCE_CLASS, 'HostSeverityPort' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.HostServer.Vrfs.Vrf.Hosts.Host.HostSeverityPort',
[], [],
''' Severity/Port for the logging host
''',
'host_severity_port',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'host',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.HostServer.Vrfs.Vrf.Hosts' : {
'meta_info' : _MetaInfoClass('Syslog.HostServer.Vrfs.Vrf.Hosts',
False,
[
_MetaInfoClassMember('host', REFERENCE_LIST, 'Host' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.HostServer.Vrfs.Vrf.Hosts.Host',
[], [],
''' Name of the logging host
''',
'host',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'hosts',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.HostServer.Vrfs.Vrf.Ipv4S.Ipv4.Ipv4SeverityLevels.Ipv4SeverityLevel' : {
'meta_info' : _MetaInfoClass('Syslog.HostServer.Vrfs.Vrf.Ipv4S.Ipv4.Ipv4SeverityLevels.Ipv4SeverityLevel',
False,
[
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'LogSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'LogSeverityEnum',
[], [],
''' Severity for the logging host
''',
'severity',
'Cisco-IOS-XR-infra-syslog-cfg', True),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'ipv4-severity-level',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.HostServer.Vrfs.Vrf.Ipv4S.Ipv4.Ipv4SeverityLevels' : {
'meta_info' : _MetaInfoClass('Syslog.HostServer.Vrfs.Vrf.Ipv4S.Ipv4.Ipv4SeverityLevels',
False,
[
_MetaInfoClassMember('ipv4-severity-level', REFERENCE_LIST, 'Ipv4SeverityLevel' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.HostServer.Vrfs.Vrf.Ipv4S.Ipv4.Ipv4SeverityLevels.Ipv4SeverityLevel',
[], [],
''' Severity for the logging host
''',
'ipv4_severity_level',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'ipv4-severity-levels',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.HostServer.Vrfs.Vrf.Ipv4S.Ipv4.Ipv4SeverityPort' : {
'meta_info' : _MetaInfoClass('Syslog.HostServer.Vrfs.Vrf.Ipv4S.Ipv4.Ipv4SeverityPort',
False,
[
_MetaInfoClassMember('port', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' Port for the logging host
''',
'port',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('severity', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' Severity for the logging host
''',
'severity',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'ipv4-severity-port',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.HostServer.Vrfs.Vrf.Ipv4S.Ipv4.Ipv4Discriminator' : {
'meta_info' : _MetaInfoClass('Syslog.HostServer.Vrfs.Vrf.Ipv4S.Ipv4.Ipv4Discriminator',
False,
[
_MetaInfoClassMember('match1', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set IPv4 logging match1 discriminator
''',
'match1',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('match2', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set IPv4 logging match2 discriminator
''',
'match2',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('match3', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set IPv4 logging match3 discriminator
''',
'match3',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('nomatch1', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set IPv4 logging no-match1 discriminator
''',
'nomatch1',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('nomatch2', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set IPv4 logging no-match2 discriminator
''',
'nomatch2',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('nomatch3', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set IPv4 logging no-match3 discriminator
''',
'nomatch3',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'ipv4-discriminator',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.HostServer.Vrfs.Vrf.Ipv4S.Ipv4' : {
'meta_info' : _MetaInfoClass('Syslog.HostServer.Vrfs.Vrf.Ipv4S.Ipv4',
False,
[
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPv4 address of the logging host
''',
'address',
'Cisco-IOS-XR-infra-syslog-cfg', True),
_MetaInfoClassMember('ipv4-discriminator', REFERENCE_CLASS, 'Ipv4Discriminator' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.HostServer.Vrfs.Vrf.Ipv4S.Ipv4.Ipv4Discriminator',
[], [],
''' Set IPv4 logging discriminators
''',
'ipv4_discriminator',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('ipv4-severity-levels', REFERENCE_CLASS, 'Ipv4SeverityLevels' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.HostServer.Vrfs.Vrf.Ipv4S.Ipv4.Ipv4SeverityLevels',
[], [],
''' Severity container of the logging host
''',
'ipv4_severity_levels',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('ipv4-severity-port', REFERENCE_CLASS, 'Ipv4SeverityPort' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.HostServer.Vrfs.Vrf.Ipv4S.Ipv4.Ipv4SeverityPort',
[], [],
''' Severity/Port for the logging host
''',
'ipv4_severity_port',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'ipv4',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.HostServer.Vrfs.Vrf.Ipv4S' : {
'meta_info' : _MetaInfoClass('Syslog.HostServer.Vrfs.Vrf.Ipv4S',
False,
[
_MetaInfoClassMember('ipv4', REFERENCE_LIST, 'Ipv4' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.HostServer.Vrfs.Vrf.Ipv4S.Ipv4',
[], [],
''' IPv4 address of the logging host
''',
'ipv4',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'ipv4s',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.HostServer.Vrfs.Vrf' : {
'meta_info' : _MetaInfoClass('Syslog.HostServer.Vrfs.Vrf',
False,
[
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' Name of the VRF instance
''',
'vrf_name',
'Cisco-IOS-XR-infra-syslog-cfg', True),
_MetaInfoClassMember('hosts', REFERENCE_CLASS, 'Hosts' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.HostServer.Vrfs.Vrf.Hosts',
[], [],
''' List of the logging host
''',
'hosts',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('ipv4s', REFERENCE_CLASS, 'Ipv4S' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.HostServer.Vrfs.Vrf.Ipv4S',
[], [],
''' List of the IPv4 logging host
''',
'ipv4s',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('ipv6s', REFERENCE_CLASS, 'Ipv6S' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.HostServer.Vrfs.Vrf.Ipv6S',
[], [],
''' List of the IPv6 logging host
''',
'ipv6s',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'vrf',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.HostServer.Vrfs' : {
'meta_info' : _MetaInfoClass('Syslog.HostServer.Vrfs',
False,
[
_MetaInfoClassMember('vrf', REFERENCE_LIST, 'Vrf' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.HostServer.Vrfs.Vrf',
[], [],
''' VRF specific data
''',
'vrf',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'vrfs',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.HostServer' : {
'meta_info' : _MetaInfoClass('Syslog.HostServer',
False,
[
_MetaInfoClassMember('vrfs', REFERENCE_CLASS, 'Vrfs' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.HostServer.Vrfs',
[], [],
''' VRF table
''',
'vrfs',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'host-server',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.ConsoleLogging.ConsoleDiscriminator' : {
'meta_info' : _MetaInfoClass('Syslog.ConsoleLogging.ConsoleDiscriminator',
False,
[
_MetaInfoClassMember('match1', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set console logging match1 discriminator
''',
'match1',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('match2', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set console logging match2 discriminator
''',
'match2',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('match3', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set console logging match3 discriminator
''',
'match3',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('nomatch1', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set console logging no-match1 discriminator
''',
'nomatch1',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('nomatch2', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set console logging no-match2 discriminator
''',
'nomatch2',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('nomatch3', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set console logging no-match3 discriminator
''',
'nomatch3',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'console-discriminator',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.ConsoleLogging' : {
'meta_info' : _MetaInfoClass('Syslog.ConsoleLogging',
False,
[
_MetaInfoClassMember('console-discriminator', REFERENCE_CLASS, 'ConsoleDiscriminator' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.ConsoleLogging.ConsoleDiscriminator',
[], [],
''' Set console logging discriminators
''',
'console_discriminator',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('logging-level', REFERENCE_ENUM_CLASS, 'LoggingLevelsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'LoggingLevelsEnum',
[], [],
''' Console logging level
''',
'logging_level',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'console-logging',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Files.File.FileSpecification' : {
'meta_info' : _MetaInfoClass('Syslog.Files.File.FileSpecification',
False,
[
_MetaInfoClassMember('max-file-size', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' Maximum file size (in KB)
''',
'max_file_size',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('path', ATTRIBUTE, 'str' , None, None,
[], [],
''' File path
''',
'path',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('severity', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' Severity of messages
''',
'severity',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'file-specification',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Files.File.FileLogAttributes' : {
'meta_info' : _MetaInfoClass('Syslog.Files.File.FileLogAttributes',
False,
[
_MetaInfoClassMember('max-file-size', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' Maximum file size (in KB)
''',
'max_file_size',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('severity', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' Severity of messages
''',
'severity',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'file-log-attributes',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Files.File.FileLogDiscriminator' : {
'meta_info' : _MetaInfoClass('Syslog.Files.File.FileLogDiscriminator',
False,
[
_MetaInfoClassMember('match1', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set file logging match discriminator 1
''',
'match1',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('match2', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set file logging match discriminator 2
''',
'match2',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('match3', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set file logging match discriminator 3
''',
'match3',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('nomatch1', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set file logging no match discriminator 1
''',
'nomatch1',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('nomatch2', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set file logging no match discriminator 2
''',
'nomatch2',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('nomatch3', ATTRIBUTE, 'str' , None, None,
[], [],
''' Set file logging no match discriminator 3
''',
'nomatch3',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'file-log-discriminator',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Files.File' : {
'meta_info' : _MetaInfoClass('Syslog.Files.File',
False,
[
_MetaInfoClassMember('file-name', ATTRIBUTE, 'str' , None, None,
[], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' Name of the file
''',
'file_name',
'Cisco-IOS-XR-infra-syslog-cfg', True),
_MetaInfoClassMember('file-log-attributes', REFERENCE_CLASS, 'FileLogAttributes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Files.File.FileLogAttributes',
[], [],
''' Attributes of the logging file destination
''',
'file_log_attributes',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('file-log-discriminator', REFERENCE_CLASS, 'FileLogDiscriminator' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Files.File.FileLogDiscriminator',
[], [],
''' Set File logging discriminators
''',
'file_log_discriminator',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('file-specification', REFERENCE_CLASS, 'FileSpecification' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Files.File.FileSpecification',
[], [],
''' Specifications of the logging file destination
''',
'file_specification',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'file',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Files' : {
'meta_info' : _MetaInfoClass('Syslog.Files',
False,
[
_MetaInfoClassMember('file', REFERENCE_LIST, 'File' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Files.File',
[], [],
''' Specify File Name
''',
'file',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'files',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Ipv4.Dscp' : {
'meta_info' : _MetaInfoClass('Syslog.Ipv4.Dscp',
False,
[
_MetaInfoClassMember('type', REFERENCE_ENUM_CLASS, 'LoggingDscpEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'LoggingDscpEnum',
[], [],
''' Logging TOS type DSCP
''',
'type',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('unused', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Unused
''',
'unused',
'Cisco-IOS-XR-infra-syslog-cfg', False, [
_MetaInfoClassMember('unused', REFERENCE_ENUM_CLASS, 'LoggingPrecedenceValueEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'LoggingPrecedenceValueEnum',
[], [],
''' Unused
''',
'unused',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('unused', ATTRIBUTE, 'int' , None, None,
[('0', '7')], [],
''' Unused
''',
'unused',
'Cisco-IOS-XR-infra-syslog-cfg', False),
]),
_MetaInfoClassMember('value', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Logging DSCP value
''',
'value',
'Cisco-IOS-XR-infra-syslog-cfg', False, [
_MetaInfoClassMember('value', REFERENCE_ENUM_CLASS, 'LoggingDscpValueEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'LoggingDscpValueEnum',
[], [],
''' Logging DSCP value
''',
'value',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('value', ATTRIBUTE, 'int' , None, None,
[('0', '63')], [],
''' Logging DSCP value
''',
'value',
'Cisco-IOS-XR-infra-syslog-cfg', False),
]),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'dscp',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Ipv4.Tos' : {
'meta_info' : _MetaInfoClass('Syslog.Ipv4.Tos',
False,
[
_MetaInfoClassMember('dscp', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Logging DSCP value
''',
'dscp',
'Cisco-IOS-XR-infra-syslog-cfg', False, [
_MetaInfoClassMember('dscp', REFERENCE_ENUM_CLASS, 'LoggingDscpValueEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'LoggingDscpValueEnum',
[], [],
''' Logging DSCP value
''',
'dscp',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('dscp', ATTRIBUTE, 'int' , None, None,
[('0', '63')], [],
''' Logging DSCP value
''',
'dscp',
'Cisco-IOS-XR-infra-syslog-cfg', False),
]),
_MetaInfoClassMember('precedence', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Logging precedence value
''',
'precedence',
'Cisco-IOS-XR-infra-syslog-cfg', False, [
_MetaInfoClassMember('precedence', REFERENCE_ENUM_CLASS, 'LoggingPrecedenceValueEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'LoggingPrecedenceValueEnum',
[], [],
''' Logging precedence value
''',
'precedence',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('precedence', ATTRIBUTE, 'int' , None, None,
[('0', '7')], [],
''' Logging precedence value
''',
'precedence',
'Cisco-IOS-XR-infra-syslog-cfg', False),
]),
_MetaInfoClassMember('type', REFERENCE_ENUM_CLASS, 'LoggingTosEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'LoggingTosEnum',
[], [],
''' Logging TOS type DSCP or precedence
''',
'type',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'tos',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Ipv4.Precedence' : {
'meta_info' : _MetaInfoClass('Syslog.Ipv4.Precedence',
False,
[
_MetaInfoClassMember('type', REFERENCE_ENUM_CLASS, 'LoggingPrecedenceEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'LoggingPrecedenceEnum',
[], [],
''' Logging TOS type precedence
''',
'type',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('unused', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Unused
''',
'unused',
'Cisco-IOS-XR-infra-syslog-cfg', False, [
_MetaInfoClassMember('unused', REFERENCE_ENUM_CLASS, 'LoggingDscpValueEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'LoggingDscpValueEnum',
[], [],
''' Unused
''',
'unused',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('unused', ATTRIBUTE, 'int' , None, None,
[('0', '63')], [],
''' Unused
''',
'unused',
'Cisco-IOS-XR-infra-syslog-cfg', False),
]),
_MetaInfoClassMember('value', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Logging precedence value
''',
'value',
'Cisco-IOS-XR-infra-syslog-cfg', False, [
_MetaInfoClassMember('value', REFERENCE_ENUM_CLASS, 'LoggingPrecedenceValueEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'LoggingPrecedenceValueEnum',
[], [],
''' Logging precedence value
''',
'value',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('value', ATTRIBUTE, 'int' , None, None,
[('0', '7')], [],
''' Logging precedence value
''',
'value',
'Cisco-IOS-XR-infra-syslog-cfg', False),
]),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'precedence',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Ipv4' : {
'meta_info' : _MetaInfoClass('Syslog.Ipv4',
False,
[
_MetaInfoClassMember('dscp', REFERENCE_CLASS, 'Dscp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Ipv4.Dscp',
[], [],
''' DSCP value
''',
'dscp',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('precedence', REFERENCE_CLASS, 'Precedence' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Ipv4.Precedence',
[], [],
''' Precedence value
''',
'precedence',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('tos', REFERENCE_CLASS, 'Tos' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Ipv4.Tos',
[], [],
''' Type of service
''',
'tos',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'ipv4',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Archive' : {
'meta_info' : _MetaInfoClass('Syslog.Archive',
False,
[
_MetaInfoClassMember('device', ATTRIBUTE, 'str' , None, None,
[], [],
''' '/disk0:' or '/disk1:' or '/harddisk:'
''',
'device',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('file-size', ATTRIBUTE, 'int' , None, None,
[('1', '2047')], [],
''' The maximum file size for a single log file.
''',
'file_size',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('frequency', REFERENCE_ENUM_CLASS, 'LogCollectFrequencyEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'LogCollectFrequencyEnum',
[], [],
''' The collection interval for logs
''',
'frequency',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('length', ATTRIBUTE, 'int' , None, None,
[('1', '256')], [],
''' The maximum number of weeks of log to maintain
''',
'length',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'LogMessageSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'LogMessageSeverityEnum',
[], [],
''' The minimum severity of log messages to archive
''',
'severity',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('size', ATTRIBUTE, 'int' , None, None,
[('1', '2047')], [],
''' The total size of the archive
''',
'size',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('threshold', ATTRIBUTE, 'int' , None, None,
[('1', '99')], [],
''' The size threshold at which a syslog is
generated
''',
'threshold',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'archive',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Ipv6.Dscp' : {
'meta_info' : _MetaInfoClass('Syslog.Ipv6.Dscp',
False,
[
_MetaInfoClassMember('type', REFERENCE_ENUM_CLASS, 'LoggingDscpEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'LoggingDscpEnum',
[], [],
''' Logging TOS type DSCP
''',
'type',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('unused', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Unused
''',
'unused',
'Cisco-IOS-XR-infra-syslog-cfg', False, [
_MetaInfoClassMember('unused', REFERENCE_ENUM_CLASS, 'LoggingPrecedenceValueEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'LoggingPrecedenceValueEnum',
[], [],
''' Unused
''',
'unused',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('unused', ATTRIBUTE, 'int' , None, None,
[('0', '7')], [],
''' Unused
''',
'unused',
'Cisco-IOS-XR-infra-syslog-cfg', False),
]),
_MetaInfoClassMember('value', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Logging DSCP value
''',
'value',
'Cisco-IOS-XR-infra-syslog-cfg', False, [
_MetaInfoClassMember('value', REFERENCE_ENUM_CLASS, 'LoggingDscpValueEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'LoggingDscpValueEnum',
[], [],
''' Logging DSCP value
''',
'value',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('value', ATTRIBUTE, 'int' , None, None,
[('0', '63')], [],
''' Logging DSCP value
''',
'value',
'Cisco-IOS-XR-infra-syslog-cfg', False),
]),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'dscp',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Ipv6.TrafficClass' : {
'meta_info' : _MetaInfoClass('Syslog.Ipv6.TrafficClass',
False,
[
_MetaInfoClassMember('dscp', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Logging DSCP value
''',
'dscp',
'Cisco-IOS-XR-infra-syslog-cfg', False, [
_MetaInfoClassMember('dscp', REFERENCE_ENUM_CLASS, 'LoggingDscpValueEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'LoggingDscpValueEnum',
[], [],
''' Logging DSCP value
''',
'dscp',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('dscp', ATTRIBUTE, 'int' , None, None,
[('0', '63')], [],
''' Logging DSCP value
''',
'dscp',
'Cisco-IOS-XR-infra-syslog-cfg', False),
]),
_MetaInfoClassMember('precedence', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Logging precedence value
''',
'precedence',
'Cisco-IOS-XR-infra-syslog-cfg', False, [
_MetaInfoClassMember('precedence', REFERENCE_ENUM_CLASS, 'LoggingPrecedenceValueEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'LoggingPrecedenceValueEnum',
[], [],
''' Logging precedence value
''',
'precedence',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('precedence', ATTRIBUTE, 'int' , None, None,
[('0', '7')], [],
''' Logging precedence value
''',
'precedence',
'Cisco-IOS-XR-infra-syslog-cfg', False),
]),
_MetaInfoClassMember('type', REFERENCE_ENUM_CLASS, 'LoggingTosEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'LoggingTosEnum',
[], [],
''' Logging TOS type DSCP or precedence
''',
'type',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'traffic-class',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Ipv6.Precedence' : {
'meta_info' : _MetaInfoClass('Syslog.Ipv6.Precedence',
False,
[
_MetaInfoClassMember('type', REFERENCE_ENUM_CLASS, 'LoggingPrecedenceEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'LoggingPrecedenceEnum',
[], [],
''' Logging TOS type precedence
''',
'type',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('unused', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Unused
''',
'unused',
'Cisco-IOS-XR-infra-syslog-cfg', False, [
_MetaInfoClassMember('unused', REFERENCE_ENUM_CLASS, 'LoggingDscpValueEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'LoggingDscpValueEnum',
[], [],
''' Unused
''',
'unused',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('unused', ATTRIBUTE, 'int' , None, None,
[('0', '63')], [],
''' Unused
''',
'unused',
'Cisco-IOS-XR-infra-syslog-cfg', False),
]),
_MetaInfoClassMember('value', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Logging precedence value
''',
'value',
'Cisco-IOS-XR-infra-syslog-cfg', False, [
_MetaInfoClassMember('value', REFERENCE_ENUM_CLASS, 'LoggingPrecedenceValueEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'LoggingPrecedenceValueEnum',
[], [],
''' Logging precedence value
''',
'value',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('value', ATTRIBUTE, 'int' , None, None,
[('0', '7')], [],
''' Logging precedence value
''',
'value',
'Cisco-IOS-XR-infra-syslog-cfg', False),
]),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'precedence',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Ipv6' : {
'meta_info' : _MetaInfoClass('Syslog.Ipv6',
False,
[
_MetaInfoClassMember('dscp', REFERENCE_CLASS, 'Dscp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Ipv6.Dscp',
[], [],
''' DSCP value
''',
'dscp',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('precedence', REFERENCE_CLASS, 'Precedence' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Ipv6.Precedence',
[], [],
''' Precedence value
''',
'precedence',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('traffic-class', REFERENCE_CLASS, 'TrafficClass' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Ipv6.TrafficClass',
[], [],
''' Type of traffic class
''',
'traffic_class',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'ipv6',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.SourceInterfaceTable.SourceInterfaceValues.SourceInterfaceValue.SourceInterfaceVrfs.SourceInterfaceVrf' : {
'meta_info' : _MetaInfoClass('Syslog.SourceInterfaceTable.SourceInterfaceValues.SourceInterfaceValue.SourceInterfaceVrfs.SourceInterfaceVrf',
False,
[
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' Name of the VRF instance
''',
'vrf_name',
'Cisco-IOS-XR-infra-syslog-cfg', True),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'source-interface-vrf',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.SourceInterfaceTable.SourceInterfaceValues.SourceInterfaceValue.SourceInterfaceVrfs' : {
'meta_info' : _MetaInfoClass('Syslog.SourceInterfaceTable.SourceInterfaceValues.SourceInterfaceValue.SourceInterfaceVrfs',
False,
[
_MetaInfoClassMember('source-interface-vrf', REFERENCE_LIST, 'SourceInterfaceVrf' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.SourceInterfaceTable.SourceInterfaceValues.SourceInterfaceValue.SourceInterfaceVrfs.SourceInterfaceVrf',
[], [],
''' Specify VRF for source interface
''',
'source_interface_vrf',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'source-interface-vrfs',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.SourceInterfaceTable.SourceInterfaceValues.SourceInterfaceValue' : {
'meta_info' : _MetaInfoClass('Syslog.SourceInterfaceTable.SourceInterfaceValues.SourceInterfaceValue',
False,
[
_MetaInfoClassMember('src-interface-name-value', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Which Interface
''',
'src_interface_name_value',
'Cisco-IOS-XR-infra-syslog-cfg', True),
_MetaInfoClassMember('source-interface-vrfs', REFERENCE_CLASS, 'SourceInterfaceVrfs' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.SourceInterfaceTable.SourceInterfaceValues.SourceInterfaceValue.SourceInterfaceVrfs',
[], [],
''' Configure source interface VRF
''',
'source_interface_vrfs',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'source-interface-value',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.SourceInterfaceTable.SourceInterfaceValues' : {
'meta_info' : _MetaInfoClass('Syslog.SourceInterfaceTable.SourceInterfaceValues',
False,
[
_MetaInfoClassMember('source-interface-value', REFERENCE_LIST, 'SourceInterfaceValue' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.SourceInterfaceTable.SourceInterfaceValues.SourceInterfaceValue',
[], [],
''' Source interface
''',
'source_interface_value',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'source-interface-values',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.SourceInterfaceTable' : {
'meta_info' : _MetaInfoClass('Syslog.SourceInterfaceTable',
False,
[
_MetaInfoClassMember('source-interface-values', REFERENCE_CLASS, 'SourceInterfaceValues' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.SourceInterfaceTable.SourceInterfaceValues',
[], [],
''' Specify interface for source address in logging
transactions
''',
'source_interface_values',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'source-interface-table',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.AlarmLogger' : {
'meta_info' : _MetaInfoClass('Syslog.AlarmLogger',
False,
[
_MetaInfoClassMember('buffer-size', ATTRIBUTE, 'int' , None, None,
[('1024', '1024000')], [],
''' Set size of the local event buffer
''',
'buffer_size',
'Cisco-IOS-XR-infra-alarm-logger-cfg', False),
_MetaInfoClassMember('severity-level', REFERENCE_ENUM_CLASS, 'AlarmLoggerSeverityLevelEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_alarm_logger_datatypes', 'AlarmLoggerSeverityLevelEnum',
[], [],
''' Log all events with equal or higher (lower
level) severity than this
''',
'severity_level',
'Cisco-IOS-XR-infra-alarm-logger-cfg', False),
_MetaInfoClassMember('source-location', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable alarm source location in message text
''',
'source_location',
'Cisco-IOS-XR-infra-alarm-logger-cfg', False),
_MetaInfoClassMember('threshold', ATTRIBUTE, 'int' , None, None,
[('10', '100')], [],
''' Configure threshold (%) for capacity alarm
''',
'threshold',
'Cisco-IOS-XR-infra-alarm-logger-cfg', False),
],
'Cisco-IOS-XR-infra-alarm-logger-cfg',
'alarm-logger',
_yang_ns._namespaces['Cisco-IOS-XR-infra-alarm-logger-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Correlator.Rules.Rule.Definition' : {
'meta_info' : _MetaInfoClass('Syslog.Correlator.Rules.Rule.Definition',
False,
[
_MetaInfoClassMember('category-name-entry1', ATTRIBUTE, 'str' , None, None,
[], [],
''' Root message category name
''',
'category_name_entry1',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('category-name-entry10', ATTRIBUTE, 'str' , None, None,
[], [],
''' Correlated message category name
''',
'category_name_entry10',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('category-name-entry2', ATTRIBUTE, 'str' , None, None,
[], [],
''' Correlated message category name
''',
'category_name_entry2',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('category-name-entry3', ATTRIBUTE, 'str' , None, None,
[], [],
''' Correlated message category name
''',
'category_name_entry3',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('category-name-entry4', ATTRIBUTE, 'str' , None, None,
[], [],
''' Correlated message category name
''',
'category_name_entry4',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('category-name-entry5', ATTRIBUTE, 'str' , None, None,
[], [],
''' Correlated message category name
''',
'category_name_entry5',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('category-name-entry6', ATTRIBUTE, 'str' , None, None,
[], [],
''' Correlated message category name
''',
'category_name_entry6',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('category-name-entry7', ATTRIBUTE, 'str' , None, None,
[], [],
''' Correlated message category name
''',
'category_name_entry7',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('category-name-entry8', ATTRIBUTE, 'str' , None, None,
[], [],
''' Correlated message category name
''',
'category_name_entry8',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('category-name-entry9', ATTRIBUTE, 'str' , None, None,
[], [],
''' Correlated message category name
''',
'category_name_entry9',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('group-name-entry1', ATTRIBUTE, 'str' , None, None,
[], [],
''' Root message group name
''',
'group_name_entry1',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('group-name-entry10', ATTRIBUTE, 'str' , None, None,
[], [],
''' Correlated message group name
''',
'group_name_entry10',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('group-name-entry2', ATTRIBUTE, 'str' , None, None,
[], [],
''' Correlated message group name
''',
'group_name_entry2',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('group-name-entry3', ATTRIBUTE, 'str' , None, None,
[], [],
''' Correlated message group name
''',
'group_name_entry3',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('group-name-entry4', ATTRIBUTE, 'str' , None, None,
[], [],
''' Correlated message group name
''',
'group_name_entry4',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('group-name-entry5', ATTRIBUTE, 'str' , None, None,
[], [],
''' Correlated message group name
''',
'group_name_entry5',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('group-name-entry6', ATTRIBUTE, 'str' , None, None,
[], [],
''' Correlated message group name
''',
'group_name_entry6',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('group-name-entry7', ATTRIBUTE, 'str' , None, None,
[], [],
''' Correlated message group name
''',
'group_name_entry7',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('group-name-entry8', ATTRIBUTE, 'str' , None, None,
[], [],
''' Correlated message group name
''',
'group_name_entry8',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('group-name-entry9', ATTRIBUTE, 'str' , None, None,
[], [],
''' Correlated message group name
''',
'group_name_entry9',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('message-code-entry1', ATTRIBUTE, 'str' , None, None,
[], [],
''' Root message code
''',
'message_code_entry1',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('message-code-entry10', ATTRIBUTE, 'str' , None, None,
[], [],
''' Correlated message code
''',
'message_code_entry10',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('message-code-entry2', ATTRIBUTE, 'str' , None, None,
[], [],
''' Correlated message code
''',
'message_code_entry2',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('message-code-entry3', ATTRIBUTE, 'str' , None, None,
[], [],
''' Correlated message code
''',
'message_code_entry3',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('message-code-entry4', ATTRIBUTE, 'str' , None, None,
[], [],
''' Correlated message code
''',
'message_code_entry4',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('message-code-entry5', ATTRIBUTE, 'str' , None, None,
[], [],
''' Correlated message code
''',
'message_code_entry5',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('message-code-entry6', ATTRIBUTE, 'str' , None, None,
[], [],
''' Correlated message code
''',
'message_code_entry6',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('message-code-entry7', ATTRIBUTE, 'str' , None, None,
[], [],
''' Correlated message code
''',
'message_code_entry7',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('message-code-entry8', ATTRIBUTE, 'str' , None, None,
[], [],
''' Correlated message code
''',
'message_code_entry8',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('message-code-entry9', ATTRIBUTE, 'str' , None, None,
[], [],
''' Correlated message code
''',
'message_code_entry9',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('timeout', ATTRIBUTE, 'int' , None, None,
[('1', '7200000')], [],
''' Timeout (time the rule is to be active) in
milliseconds
''',
'timeout',
'Cisco-IOS-XR-infra-correlator-cfg', False),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'definition',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Correlator.Rules.Rule.NonStateful.NonRootCauses.NonRootCause' : {
'meta_info' : _MetaInfoClass('Syslog.Correlator.Rules.Rule.NonStateful.NonRootCauses.NonRootCause',
False,
[
_MetaInfoClassMember('category', ATTRIBUTE, 'str' , None, None,
[], [],
''' Correlated message category
''',
'category',
'Cisco-IOS-XR-infra-correlator-cfg', True),
_MetaInfoClassMember('group', ATTRIBUTE, 'str' , None, None,
[], [],
''' Correlated message group
''',
'group',
'Cisco-IOS-XR-infra-correlator-cfg', True),
_MetaInfoClassMember('message-code', ATTRIBUTE, 'str' , None, None,
[], [],
''' Correlated message code
''',
'message_code',
'Cisco-IOS-XR-infra-correlator-cfg', True),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'non-root-cause',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Correlator.Rules.Rule.NonStateful.NonRootCauses' : {
'meta_info' : _MetaInfoClass('Syslog.Correlator.Rules.Rule.NonStateful.NonRootCauses',
False,
[
_MetaInfoClassMember('non-root-cause', REFERENCE_LIST, 'NonRootCause' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Correlator.Rules.Rule.NonStateful.NonRootCauses.NonRootCause',
[], [],
''' A non-rootcause
''',
'non_root_cause',
'Cisco-IOS-XR-infra-correlator-cfg', False),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'non-root-causes',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Correlator.Rules.Rule.NonStateful.RootCause' : {
'meta_info' : _MetaInfoClass('Syslog.Correlator.Rules.Rule.NonStateful.RootCause',
False,
[
_MetaInfoClassMember('category', ATTRIBUTE, 'str' , None, None,
[], [],
''' Root message category
''',
'category',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('group', ATTRIBUTE, 'str' , None, None,
[], [],
''' Root message group
''',
'group',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('message-code', ATTRIBUTE, 'str' , None, None,
[], [],
''' Root message code
''',
'message_code',
'Cisco-IOS-XR-infra-correlator-cfg', False),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'root-cause',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Correlator.Rules.Rule.NonStateful' : {
'meta_info' : _MetaInfoClass('Syslog.Correlator.Rules.Rule.NonStateful',
False,
[
_MetaInfoClassMember('context-correlation', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable correlation on alarm context
''',
'context_correlation',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('non-root-causes', REFERENCE_CLASS, 'NonRootCauses' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Correlator.Rules.Rule.NonStateful.NonRootCauses',
[], [],
''' Table of configured non-rootcause
''',
'non_root_causes',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('root-cause', REFERENCE_CLASS, 'RootCause' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Correlator.Rules.Rule.NonStateful.RootCause',
[], [],
''' The root cause
''',
'root_cause',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('timeout', ATTRIBUTE, 'int' , None, None,
[('1', '7200000')], [],
''' Timeout (time to wait for active correlation) in
milliseconds
''',
'timeout',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('timeout-root-cause', ATTRIBUTE, 'int' , None, None,
[('1', '7200000')], [],
''' Rootcause Timeout (time to wait for rootcause)
in milliseconds
''',
'timeout_root_cause',
'Cisco-IOS-XR-infra-correlator-cfg', False),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'non-stateful',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Correlator.Rules.Rule.Stateful.NonRootCauses.NonRootCause' : {
'meta_info' : _MetaInfoClass('Syslog.Correlator.Rules.Rule.Stateful.NonRootCauses.NonRootCause',
False,
[
_MetaInfoClassMember('category', ATTRIBUTE, 'str' , None, None,
[], [],
''' Correlated message category
''',
'category',
'Cisco-IOS-XR-infra-correlator-cfg', True),
_MetaInfoClassMember('group', ATTRIBUTE, 'str' , None, None,
[], [],
''' Correlated message group
''',
'group',
'Cisco-IOS-XR-infra-correlator-cfg', True),
_MetaInfoClassMember('message-code', ATTRIBUTE, 'str' , None, None,
[], [],
''' Correlated message code
''',
'message_code',
'Cisco-IOS-XR-infra-correlator-cfg', True),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'non-root-cause',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Correlator.Rules.Rule.Stateful.NonRootCauses' : {
'meta_info' : _MetaInfoClass('Syslog.Correlator.Rules.Rule.Stateful.NonRootCauses',
False,
[
_MetaInfoClassMember('non-root-cause', REFERENCE_LIST, 'NonRootCause' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Correlator.Rules.Rule.Stateful.NonRootCauses.NonRootCause',
[], [],
''' A non-rootcause
''',
'non_root_cause',
'Cisco-IOS-XR-infra-correlator-cfg', False),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'non-root-causes',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Correlator.Rules.Rule.Stateful.RootCause' : {
'meta_info' : _MetaInfoClass('Syslog.Correlator.Rules.Rule.Stateful.RootCause',
False,
[
_MetaInfoClassMember('category', ATTRIBUTE, 'str' , None, None,
[], [],
''' Root message category
''',
'category',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('group', ATTRIBUTE, 'str' , None, None,
[], [],
''' Root message group
''',
'group',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('message-code', ATTRIBUTE, 'str' , None, None,
[], [],
''' Root message code
''',
'message_code',
'Cisco-IOS-XR-infra-correlator-cfg', False),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'root-cause',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Correlator.Rules.Rule.Stateful' : {
'meta_info' : _MetaInfoClass('Syslog.Correlator.Rules.Rule.Stateful',
False,
[
_MetaInfoClassMember('context-correlation', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable correlation on alarm context
''',
'context_correlation',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('non-root-causes', REFERENCE_CLASS, 'NonRootCauses' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Correlator.Rules.Rule.Stateful.NonRootCauses',
[], [],
''' Table of configured non-rootcause
''',
'non_root_causes',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('reissue', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable reissue of non-bistate alarms on
rootcause alarm clear
''',
'reissue',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('reparent', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable reparent of alarm on rootcause alarm
clear
''',
'reparent',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('root-cause', REFERENCE_CLASS, 'RootCause' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Correlator.Rules.Rule.Stateful.RootCause',
[], [],
''' The root cause
''',
'root_cause',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('timeout', ATTRIBUTE, 'int' , None, None,
[('1', '7200000')], [],
''' Timeout (time to wait for active correlation) in
milliseconds
''',
'timeout',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('timeout-root-cause', ATTRIBUTE, 'int' , None, None,
[('1', '7200000')], [],
''' Rootcause Timeout (time to wait for rootcause)
in milliseconds
''',
'timeout_root_cause',
'Cisco-IOS-XR-infra-correlator-cfg', False),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'stateful',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Correlator.Rules.Rule.ApplyTo.Contexts' : {
'meta_info' : _MetaInfoClass('Syslog.Correlator.Rules.Rule.ApplyTo.Contexts',
False,
[
_MetaInfoClassMember('context', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' One or more context names
''',
'context',
'Cisco-IOS-XR-infra-correlator-cfg', False, max_elements=32),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'contexts',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Correlator.Rules.Rule.ApplyTo.Locations' : {
'meta_info' : _MetaInfoClass('Syslog.Correlator.Rules.Rule.ApplyTo.Locations',
False,
[
_MetaInfoClassMember('location', REFERENCE_LEAFLIST, 'str' , None, None,
[], ['([a-zA-Z0-9_]*\\d+/){1,2}([a-zA-Z0-9_]*\\d+)'],
''' One or more Locations
''',
'location',
'Cisco-IOS-XR-infra-correlator-cfg', False, max_elements=32),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'locations',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Correlator.Rules.Rule.ApplyTo' : {
'meta_info' : _MetaInfoClass('Syslog.Correlator.Rules.Rule.ApplyTo',
False,
[
_MetaInfoClassMember('all-of-router', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Apply the rule to all of the router
''',
'all_of_router',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('contexts', REFERENCE_CLASS, 'Contexts' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Correlator.Rules.Rule.ApplyTo.Contexts',
[], [],
''' Apply rule to a specified list of contexts,
e.g. interfaces
''',
'contexts',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('locations', REFERENCE_CLASS, 'Locations' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Correlator.Rules.Rule.ApplyTo.Locations',
[], [],
''' Apply rule to a specified list of Locations
''',
'locations',
'Cisco-IOS-XR-infra-correlator-cfg', False),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'apply-to',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Correlator.Rules.Rule.AppliedTo.Contexts.Context' : {
'meta_info' : _MetaInfoClass('Syslog.Correlator.Rules.Rule.AppliedTo.Contexts.Context',
False,
[
_MetaInfoClassMember('context', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Context
''',
'context',
'Cisco-IOS-XR-infra-correlator-cfg', True),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'context',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Correlator.Rules.Rule.AppliedTo.Contexts' : {
'meta_info' : _MetaInfoClass('Syslog.Correlator.Rules.Rule.AppliedTo.Contexts',
False,
[
_MetaInfoClassMember('context', REFERENCE_LIST, 'Context' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Correlator.Rules.Rule.AppliedTo.Contexts.Context',
[], [],
''' A context
''',
'context',
'Cisco-IOS-XR-infra-correlator-cfg', False),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'contexts',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Correlator.Rules.Rule.AppliedTo.Locations.Location' : {
'meta_info' : _MetaInfoClass('Syslog.Correlator.Rules.Rule.AppliedTo.Locations.Location',
False,
[
_MetaInfoClassMember('location', ATTRIBUTE, 'str' , None, None,
[], ['([a-zA-Z0-9_]*\\d+/){1,2}([a-zA-Z0-9_]*\\d+)'],
''' Location
''',
'location',
'Cisco-IOS-XR-infra-correlator-cfg', True),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'location',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Correlator.Rules.Rule.AppliedTo.Locations' : {
'meta_info' : _MetaInfoClass('Syslog.Correlator.Rules.Rule.AppliedTo.Locations',
False,
[
_MetaInfoClassMember('location', REFERENCE_LIST, 'Location' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Correlator.Rules.Rule.AppliedTo.Locations.Location',
[], [],
''' A location
''',
'location',
'Cisco-IOS-XR-infra-correlator-cfg', False),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'locations',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Correlator.Rules.Rule.AppliedTo' : {
'meta_info' : _MetaInfoClass('Syslog.Correlator.Rules.Rule.AppliedTo',
False,
[
_MetaInfoClassMember('all', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Apply to all of the router
''',
'all',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('contexts', REFERENCE_CLASS, 'Contexts' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Correlator.Rules.Rule.AppliedTo.Contexts',
[], [],
''' Table of configured contexts to apply
''',
'contexts',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('locations', REFERENCE_CLASS, 'Locations' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Correlator.Rules.Rule.AppliedTo.Locations',
[], [],
''' Table of configured locations to apply
''',
'locations',
'Cisco-IOS-XR-infra-correlator-cfg', False),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'applied-to',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Correlator.Rules.Rule' : {
'meta_info' : _MetaInfoClass('Syslog.Correlator.Rules.Rule',
False,
[
_MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Rule name
''',
'name',
'Cisco-IOS-XR-infra-correlator-cfg', True),
_MetaInfoClassMember('applied-to', REFERENCE_CLASS, 'AppliedTo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Correlator.Rules.Rule.AppliedTo',
[], [],
''' Applied to the Rule or Ruleset
''',
'applied_to',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('apply-to', REFERENCE_CLASS, 'ApplyTo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Correlator.Rules.Rule.ApplyTo',
[], [],
''' Apply the Rules
''',
'apply_to',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('definition', REFERENCE_CLASS, 'Definition' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Correlator.Rules.Rule.Definition',
[], [],
''' Configure a specified correlation rule
''',
'definition',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('non-stateful', REFERENCE_CLASS, 'NonStateful' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Correlator.Rules.Rule.NonStateful',
[], [],
''' The Non-Stateful Rule Type
''',
'non_stateful',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('stateful', REFERENCE_CLASS, 'Stateful' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Correlator.Rules.Rule.Stateful',
[], [],
''' The Stateful Rule Type
''',
'stateful',
'Cisco-IOS-XR-infra-correlator-cfg', False),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'rule',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Correlator.Rules' : {
'meta_info' : _MetaInfoClass('Syslog.Correlator.Rules',
False,
[
_MetaInfoClassMember('rule', REFERENCE_LIST, 'Rule' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Correlator.Rules.Rule',
[], [],
''' Rule name
''',
'rule',
'Cisco-IOS-XR-infra-correlator-cfg', False),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'rules',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Correlator.RuleSets.RuleSet.Rulenames.Rulename' : {
'meta_info' : _MetaInfoClass('Syslog.Correlator.RuleSets.RuleSet.Rulenames.Rulename',
False,
[
_MetaInfoClassMember('rulename', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Rule name
''',
'rulename',
'Cisco-IOS-XR-infra-correlator-cfg', True),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'rulename',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Correlator.RuleSets.RuleSet.Rulenames' : {
'meta_info' : _MetaInfoClass('Syslog.Correlator.RuleSets.RuleSet.Rulenames',
False,
[
_MetaInfoClassMember('rulename', REFERENCE_LIST, 'Rulename' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Correlator.RuleSets.RuleSet.Rulenames.Rulename',
[], [],
''' A rulename
''',
'rulename',
'Cisco-IOS-XR-infra-correlator-cfg', False),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'rulenames',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Correlator.RuleSets.RuleSet.AppliedTo.Contexts.Context' : {
'meta_info' : _MetaInfoClass('Syslog.Correlator.RuleSets.RuleSet.AppliedTo.Contexts.Context',
False,
[
_MetaInfoClassMember('context', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Context
''',
'context',
'Cisco-IOS-XR-infra-correlator-cfg', True),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'context',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Correlator.RuleSets.RuleSet.AppliedTo.Contexts' : {
'meta_info' : _MetaInfoClass('Syslog.Correlator.RuleSets.RuleSet.AppliedTo.Contexts',
False,
[
_MetaInfoClassMember('context', REFERENCE_LIST, 'Context' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Correlator.RuleSets.RuleSet.AppliedTo.Contexts.Context',
[], [],
''' A context
''',
'context',
'Cisco-IOS-XR-infra-correlator-cfg', False),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'contexts',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Correlator.RuleSets.RuleSet.AppliedTo.Locations.Location' : {
'meta_info' : _MetaInfoClass('Syslog.Correlator.RuleSets.RuleSet.AppliedTo.Locations.Location',
False,
[
_MetaInfoClassMember('location', ATTRIBUTE, 'str' , None, None,
[], ['([a-zA-Z0-9_]*\\d+/){1,2}([a-zA-Z0-9_]*\\d+)'],
''' Location
''',
'location',
'Cisco-IOS-XR-infra-correlator-cfg', True),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'location',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Correlator.RuleSets.RuleSet.AppliedTo.Locations' : {
'meta_info' : _MetaInfoClass('Syslog.Correlator.RuleSets.RuleSet.AppliedTo.Locations',
False,
[
_MetaInfoClassMember('location', REFERENCE_LIST, 'Location' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Correlator.RuleSets.RuleSet.AppliedTo.Locations.Location',
[], [],
''' A location
''',
'location',
'Cisco-IOS-XR-infra-correlator-cfg', False),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'locations',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Correlator.RuleSets.RuleSet.AppliedTo' : {
'meta_info' : _MetaInfoClass('Syslog.Correlator.RuleSets.RuleSet.AppliedTo',
False,
[
_MetaInfoClassMember('all', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Apply to all of the router
''',
'all',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('contexts', REFERENCE_CLASS, 'Contexts' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Correlator.RuleSets.RuleSet.AppliedTo.Contexts',
[], [],
''' Table of configured contexts to apply
''',
'contexts',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('locations', REFERENCE_CLASS, 'Locations' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Correlator.RuleSets.RuleSet.AppliedTo.Locations',
[], [],
''' Table of configured locations to apply
''',
'locations',
'Cisco-IOS-XR-infra-correlator-cfg', False),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'applied-to',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Correlator.RuleSets.RuleSet' : {
'meta_info' : _MetaInfoClass('Syslog.Correlator.RuleSets.RuleSet',
False,
[
_MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Ruleset name
''',
'name',
'Cisco-IOS-XR-infra-correlator-cfg', True),
_MetaInfoClassMember('applied-to', REFERENCE_CLASS, 'AppliedTo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Correlator.RuleSets.RuleSet.AppliedTo',
[], [],
''' Applied to the Rule or Ruleset
''',
'applied_to',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('rulenames', REFERENCE_CLASS, 'Rulenames' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Correlator.RuleSets.RuleSet.Rulenames',
[], [],
''' Table of configured rulenames
''',
'rulenames',
'Cisco-IOS-XR-infra-correlator-cfg', False),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'rule-set',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Correlator.RuleSets' : {
'meta_info' : _MetaInfoClass('Syslog.Correlator.RuleSets',
False,
[
_MetaInfoClassMember('rule-set', REFERENCE_LIST, 'RuleSet' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Correlator.RuleSets.RuleSet',
[], [],
''' Ruleset name
''',
'rule_set',
'Cisco-IOS-XR-infra-correlator-cfg', False),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'rule-sets',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Correlator' : {
'meta_info' : _MetaInfoClass('Syslog.Correlator',
False,
[
_MetaInfoClassMember('buffer-size', ATTRIBUTE, 'int' , None, None,
[('1024', '52428800')], [],
''' Configure size of the correlator buffer
''',
'buffer_size',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('rule-sets', REFERENCE_CLASS, 'RuleSets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Correlator.RuleSets',
[], [],
''' Table of configured rulesets
''',
'rule_sets',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('rules', REFERENCE_CLASS, 'Rules' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Correlator.Rules',
[], [],
''' Table of configured rules
''',
'rules',
'Cisco-IOS-XR-infra-correlator-cfg', False),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'correlator',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Suppression.Rules.Rule.AppliedTo.Sources.Source' : {
'meta_info' : _MetaInfoClass('Syslog.Suppression.Rules.Rule.AppliedTo.Sources.Source',
False,
[
_MetaInfoClassMember('source', ATTRIBUTE, 'str' , None, None,
[], ['([a-zA-Z0-9_]*\\d+/){1,2}([a-zA-Z0-9_]*\\d+)'],
''' Source
''',
'source',
'Cisco-IOS-XR-infra-correlator-cfg', True),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'source',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Suppression.Rules.Rule.AppliedTo.Sources' : {
'meta_info' : _MetaInfoClass('Syslog.Suppression.Rules.Rule.AppliedTo.Sources',
False,
[
_MetaInfoClassMember('source', REFERENCE_LIST, 'Source' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Suppression.Rules.Rule.AppliedTo.Sources.Source',
[], [],
''' An alarm source
''',
'source',
'Cisco-IOS-XR-infra-correlator-cfg', False),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'sources',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Suppression.Rules.Rule.AppliedTo' : {
'meta_info' : _MetaInfoClass('Syslog.Suppression.Rules.Rule.AppliedTo',
False,
[
_MetaInfoClassMember('all', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Apply to all of the router
''',
'all',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('sources', REFERENCE_CLASS, 'Sources' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Suppression.Rules.Rule.AppliedTo.Sources',
[], [],
''' Table of configured sources to apply
''',
'sources',
'Cisco-IOS-XR-infra-correlator-cfg', False),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'applied-to',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Suppression.Rules.Rule.AlarmCauses.AlarmCause' : {
'meta_info' : _MetaInfoClass('Syslog.Suppression.Rules.Rule.AlarmCauses.AlarmCause',
False,
[
_MetaInfoClassMember('category', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Category
''',
'category',
'Cisco-IOS-XR-infra-correlator-cfg', True),
_MetaInfoClassMember('code', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Code
''',
'code',
'Cisco-IOS-XR-infra-correlator-cfg', True),
_MetaInfoClassMember('group', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Group
''',
'group',
'Cisco-IOS-XR-infra-correlator-cfg', True),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'alarm-cause',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Suppression.Rules.Rule.AlarmCauses' : {
'meta_info' : _MetaInfoClass('Syslog.Suppression.Rules.Rule.AlarmCauses',
False,
[
_MetaInfoClassMember('alarm-cause', REFERENCE_LIST, 'AlarmCause' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Suppression.Rules.Rule.AlarmCauses.AlarmCause',
[], [],
''' Category, Group and Code of alarm/syslog to
be suppressed
''',
'alarm_cause',
'Cisco-IOS-XR-infra-correlator-cfg', False),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'alarm-causes',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Suppression.Rules.Rule' : {
'meta_info' : _MetaInfoClass('Syslog.Suppression.Rules.Rule',
False,
[
_MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Rule name
''',
'name',
'Cisco-IOS-XR-infra-correlator-cfg', True),
_MetaInfoClassMember('alarm-causes', REFERENCE_CLASS, 'AlarmCauses' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Suppression.Rules.Rule.AlarmCauses',
[], [],
''' Causes of alarms to be suppressed
''',
'alarm_causes',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('all-alarms', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Suppress all alarms
''',
'all_alarms',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('applied-to', REFERENCE_CLASS, 'AppliedTo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Suppression.Rules.Rule.AppliedTo',
[], [],
''' Applied to the Rule
''',
'applied_to',
'Cisco-IOS-XR-infra-correlator-cfg', False),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'rule',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Suppression.Rules' : {
'meta_info' : _MetaInfoClass('Syslog.Suppression.Rules',
False,
[
_MetaInfoClassMember('rule', REFERENCE_LIST, 'Rule' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Suppression.Rules.Rule',
[], [],
''' Rule name
''',
'rule',
'Cisco-IOS-XR-infra-correlator-cfg', False),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'rules',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog.Suppression' : {
'meta_info' : _MetaInfoClass('Syslog.Suppression',
False,
[
_MetaInfoClassMember('rules', REFERENCE_CLASS, 'Rules' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Suppression.Rules',
[], [],
''' Table of configured rules
''',
'rules',
'Cisco-IOS-XR-infra-correlator-cfg', False),
],
'Cisco-IOS-XR-infra-correlator-cfg',
'suppression',
_yang_ns._namespaces['Cisco-IOS-XR-infra-correlator-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
'Syslog' : {
'meta_info' : _MetaInfoClass('Syslog',
False,
[
_MetaInfoClassMember('alarm-logger', REFERENCE_CLASS, 'AlarmLogger' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.AlarmLogger',
[], [],
''' Alarm Logger Properties
''',
'alarm_logger',
'Cisco-IOS-XR-infra-alarm-logger-cfg', False),
_MetaInfoClassMember('archive', REFERENCE_CLASS, 'Archive' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Archive',
[], [],
''' Archive attributes configuration
''',
'archive',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('buffered-logging', REFERENCE_CLASS, 'BufferedLogging' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.BufferedLogging',
[], [],
''' Set buffered logging parameters
''',
'buffered_logging',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('console-logging', REFERENCE_CLASS, 'ConsoleLogging' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.ConsoleLogging',
[], [],
''' Set console logging
''',
'console_logging',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('correlator', REFERENCE_CLASS, 'Correlator' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Correlator',
[], [],
''' Configure properties of the event correlator
''',
'correlator',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('enable-console-logging', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Enabled or disabled
''',
'enable_console_logging',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('files', REFERENCE_CLASS, 'Files' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Files',
[], [],
''' Configure logging file destination
''',
'files',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('history-logging', REFERENCE_CLASS, 'HistoryLogging' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.HistoryLogging',
[], [],
''' Set history logging
''',
'history_logging',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('host-name-prefix', ATTRIBUTE, 'str' , None, None,
[], [],
''' Hostname prefix to add on msgs to servers
''',
'host_name_prefix',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('host-server', REFERENCE_CLASS, 'HostServer' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.HostServer',
[], [],
''' Configure logging host
''',
'host_server',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('ipv4', REFERENCE_CLASS, 'Ipv4' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Ipv4',
[], [],
''' Syslog TOS bit for outgoing messages
''',
'ipv4',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('ipv6', REFERENCE_CLASS, 'Ipv6' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Ipv6',
[], [],
''' Syslog traffic class bit for outgoing messages
''',
'ipv6',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('local-log-file-size', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Set size of the local log file
''',
'local_log_file_size',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('logging-facilities', REFERENCE_CLASS, 'LoggingFacilities' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.LoggingFacilities',
[], [],
''' Modify message logging facilities
''',
'logging_facilities',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('monitor-logging', REFERENCE_CLASS, 'MonitorLogging' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.MonitorLogging',
[], [],
''' Set monitor logging
''',
'monitor_logging',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('source-interface-table', REFERENCE_CLASS, 'SourceInterfaceTable' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.SourceInterfaceTable',
[], [],
''' Configure source interface
''',
'source_interface_table',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('suppress-duplicates', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Suppress consecutive duplicate messages
''',
'suppress_duplicates',
'Cisco-IOS-XR-infra-syslog-cfg', False),
_MetaInfoClassMember('suppression', REFERENCE_CLASS, 'Suppression' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.Suppression',
[], [],
''' Configure properties of the syslog/alarm
suppression
''',
'suppression',
'Cisco-IOS-XR-infra-correlator-cfg', False),
_MetaInfoClassMember('trap-logging', REFERENCE_CLASS, 'TrapLogging' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg', 'Syslog.TrapLogging',
[], [],
''' Set trap logging
''',
'trap_logging',
'Cisco-IOS-XR-infra-syslog-cfg', False),
],
'Cisco-IOS-XR-infra-syslog-cfg',
'syslog',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_cfg'
),
},
}
_meta_table['SyslogService.Timestamps.Log.LogDatetime.LogDatetimeValue']['meta_info'].parent =_meta_table['SyslogService.Timestamps.Log.LogDatetime']['meta_info']
_meta_table['SyslogService.Timestamps.Log.LogDatetime']['meta_info'].parent =_meta_table['SyslogService.Timestamps.Log']['meta_info']
_meta_table['SyslogService.Timestamps.Debug.DebugDatetime.DatetimeValue']['meta_info'].parent =_meta_table['SyslogService.Timestamps.Debug.DebugDatetime']['meta_info']
_meta_table['SyslogService.Timestamps.Debug.DebugDatetime']['meta_info'].parent =_meta_table['SyslogService.Timestamps.Debug']['meta_info']
_meta_table['SyslogService.Timestamps.Log']['meta_info'].parent =_meta_table['SyslogService.Timestamps']['meta_info']
_meta_table['SyslogService.Timestamps.Debug']['meta_info'].parent =_meta_table['SyslogService.Timestamps']['meta_info']
_meta_table['SyslogService.Timestamps']['meta_info'].parent =_meta_table['SyslogService']['meta_info']
_meta_table['Syslog.MonitorLogging.MonitorDiscriminator']['meta_info'].parent =_meta_table['Syslog.MonitorLogging']['meta_info']
_meta_table['Syslog.BufferedLogging.BufferedDiscriminator']['meta_info'].parent =_meta_table['Syslog.BufferedLogging']['meta_info']
_meta_table['Syslog.HostServer.Vrfs.Vrf.Ipv6S.Ipv6.Ipv6SeverityLevels.Ipv6SeverityLevel']['meta_info'].parent =_meta_table['Syslog.HostServer.Vrfs.Vrf.Ipv6S.Ipv6.Ipv6SeverityLevels']['meta_info']
_meta_table['Syslog.HostServer.Vrfs.Vrf.Ipv6S.Ipv6.Ipv6SeverityPort']['meta_info'].parent =_meta_table['Syslog.HostServer.Vrfs.Vrf.Ipv6S.Ipv6']['meta_info']
_meta_table['Syslog.HostServer.Vrfs.Vrf.Ipv6S.Ipv6.Ipv6Discriminator']['meta_info'].parent =_meta_table['Syslog.HostServer.Vrfs.Vrf.Ipv6S.Ipv6']['meta_info']
_meta_table['Syslog.HostServer.Vrfs.Vrf.Ipv6S.Ipv6.Ipv6SeverityLevels']['meta_info'].parent =_meta_table['Syslog.HostServer.Vrfs.Vrf.Ipv6S.Ipv6']['meta_info']
_meta_table['Syslog.HostServer.Vrfs.Vrf.Ipv6S.Ipv6']['meta_info'].parent =_meta_table['Syslog.HostServer.Vrfs.Vrf.Ipv6S']['meta_info']
_meta_table['Syslog.HostServer.Vrfs.Vrf.Hosts.Host.HostNameSeverities.HostNameSeverity']['meta_info'].parent =_meta_table['Syslog.HostServer.Vrfs.Vrf.Hosts.Host.HostNameSeverities']['meta_info']
_meta_table['Syslog.HostServer.Vrfs.Vrf.Hosts.Host.HostNameSeverities']['meta_info'].parent =_meta_table['Syslog.HostServer.Vrfs.Vrf.Hosts.Host']['meta_info']
_meta_table['Syslog.HostServer.Vrfs.Vrf.Hosts.Host.HostNameDiscriminator']['meta_info'].parent =_meta_table['Syslog.HostServer.Vrfs.Vrf.Hosts.Host']['meta_info']
_meta_table['Syslog.HostServer.Vrfs.Vrf.Hosts.Host.HostSeverityPort']['meta_info'].parent =_meta_table['Syslog.HostServer.Vrfs.Vrf.Hosts.Host']['meta_info']
_meta_table['Syslog.HostServer.Vrfs.Vrf.Hosts.Host']['meta_info'].parent =_meta_table['Syslog.HostServer.Vrfs.Vrf.Hosts']['meta_info']
_meta_table['Syslog.HostServer.Vrfs.Vrf.Ipv4S.Ipv4.Ipv4SeverityLevels.Ipv4SeverityLevel']['meta_info'].parent =_meta_table['Syslog.HostServer.Vrfs.Vrf.Ipv4S.Ipv4.Ipv4SeverityLevels']['meta_info']
_meta_table['Syslog.HostServer.Vrfs.Vrf.Ipv4S.Ipv4.Ipv4SeverityLevels']['meta_info'].parent =_meta_table['Syslog.HostServer.Vrfs.Vrf.Ipv4S.Ipv4']['meta_info']
_meta_table['Syslog.HostServer.Vrfs.Vrf.Ipv4S.Ipv4.Ipv4SeverityPort']['meta_info'].parent =_meta_table['Syslog.HostServer.Vrfs.Vrf.Ipv4S.Ipv4']['meta_info']
_meta_table['Syslog.HostServer.Vrfs.Vrf.Ipv4S.Ipv4.Ipv4Discriminator']['meta_info'].parent =_meta_table['Syslog.HostServer.Vrfs.Vrf.Ipv4S.Ipv4']['meta_info']
_meta_table['Syslog.HostServer.Vrfs.Vrf.Ipv4S.Ipv4']['meta_info'].parent =_meta_table['Syslog.HostServer.Vrfs.Vrf.Ipv4S']['meta_info']
_meta_table['Syslog.HostServer.Vrfs.Vrf.Ipv6S']['meta_info'].parent =_meta_table['Syslog.HostServer.Vrfs.Vrf']['meta_info']
_meta_table['Syslog.HostServer.Vrfs.Vrf.Hosts']['meta_info'].parent =_meta_table['Syslog.HostServer.Vrfs.Vrf']['meta_info']
_meta_table['Syslog.HostServer.Vrfs.Vrf.Ipv4S']['meta_info'].parent =_meta_table['Syslog.HostServer.Vrfs.Vrf']['meta_info']
_meta_table['Syslog.HostServer.Vrfs.Vrf']['meta_info'].parent =_meta_table['Syslog.HostServer.Vrfs']['meta_info']
_meta_table['Syslog.HostServer.Vrfs']['meta_info'].parent =_meta_table['Syslog.HostServer']['meta_info']
_meta_table['Syslog.ConsoleLogging.ConsoleDiscriminator']['meta_info'].parent =_meta_table['Syslog.ConsoleLogging']['meta_info']
_meta_table['Syslog.Files.File.FileSpecification']['meta_info'].parent =_meta_table['Syslog.Files.File']['meta_info']
_meta_table['Syslog.Files.File.FileLogAttributes']['meta_info'].parent =_meta_table['Syslog.Files.File']['meta_info']
_meta_table['Syslog.Files.File.FileLogDiscriminator']['meta_info'].parent =_meta_table['Syslog.Files.File']['meta_info']
_meta_table['Syslog.Files.File']['meta_info'].parent =_meta_table['Syslog.Files']['meta_info']
_meta_table['Syslog.Ipv4.Dscp']['meta_info'].parent =_meta_table['Syslog.Ipv4']['meta_info']
_meta_table['Syslog.Ipv4.Tos']['meta_info'].parent =_meta_table['Syslog.Ipv4']['meta_info']
_meta_table['Syslog.Ipv4.Precedence']['meta_info'].parent =_meta_table['Syslog.Ipv4']['meta_info']
_meta_table['Syslog.Ipv6.Dscp']['meta_info'].parent =_meta_table['Syslog.Ipv6']['meta_info']
_meta_table['Syslog.Ipv6.TrafficClass']['meta_info'].parent =_meta_table['Syslog.Ipv6']['meta_info']
_meta_table['Syslog.Ipv6.Precedence']['meta_info'].parent =_meta_table['Syslog.Ipv6']['meta_info']
_meta_table['Syslog.SourceInterfaceTable.SourceInterfaceValues.SourceInterfaceValue.SourceInterfaceVrfs.SourceInterfaceVrf']['meta_info'].parent =_meta_table['Syslog.SourceInterfaceTable.SourceInterfaceValues.SourceInterfaceValue.SourceInterfaceVrfs']['meta_info']
_meta_table['Syslog.SourceInterfaceTable.SourceInterfaceValues.SourceInterfaceValue.SourceInterfaceVrfs']['meta_info'].parent =_meta_table['Syslog.SourceInterfaceTable.SourceInterfaceValues.SourceInterfaceValue']['meta_info']
_meta_table['Syslog.SourceInterfaceTable.SourceInterfaceValues.SourceInterfaceValue']['meta_info'].parent =_meta_table['Syslog.SourceInterfaceTable.SourceInterfaceValues']['meta_info']
_meta_table['Syslog.SourceInterfaceTable.SourceInterfaceValues']['meta_info'].parent =_meta_table['Syslog.SourceInterfaceTable']['meta_info']
_meta_table['Syslog.Correlator.Rules.Rule.NonStateful.NonRootCauses.NonRootCause']['meta_info'].parent =_meta_table['Syslog.Correlator.Rules.Rule.NonStateful.NonRootCauses']['meta_info']
_meta_table['Syslog.Correlator.Rules.Rule.NonStateful.NonRootCauses']['meta_info'].parent =_meta_table['Syslog.Correlator.Rules.Rule.NonStateful']['meta_info']
_meta_table['Syslog.Correlator.Rules.Rule.NonStateful.RootCause']['meta_info'].parent =_meta_table['Syslog.Correlator.Rules.Rule.NonStateful']['meta_info']
_meta_table['Syslog.Correlator.Rules.Rule.Stateful.NonRootCauses.NonRootCause']['meta_info'].parent =_meta_table['Syslog.Correlator.Rules.Rule.Stateful.NonRootCauses']['meta_info']
_meta_table['Syslog.Correlator.Rules.Rule.Stateful.NonRootCauses']['meta_info'].parent =_meta_table['Syslog.Correlator.Rules.Rule.Stateful']['meta_info']
_meta_table['Syslog.Correlator.Rules.Rule.Stateful.RootCause']['meta_info'].parent =_meta_table['Syslog.Correlator.Rules.Rule.Stateful']['meta_info']
_meta_table['Syslog.Correlator.Rules.Rule.ApplyTo.Contexts']['meta_info'].parent =_meta_table['Syslog.Correlator.Rules.Rule.ApplyTo']['meta_info']
_meta_table['Syslog.Correlator.Rules.Rule.ApplyTo.Locations']['meta_info'].parent =_meta_table['Syslog.Correlator.Rules.Rule.ApplyTo']['meta_info']
_meta_table['Syslog.Correlator.Rules.Rule.AppliedTo.Contexts.Context']['meta_info'].parent =_meta_table['Syslog.Correlator.Rules.Rule.AppliedTo.Contexts']['meta_info']
_meta_table['Syslog.Correlator.Rules.Rule.AppliedTo.Locations.Location']['meta_info'].parent =_meta_table['Syslog.Correlator.Rules.Rule.AppliedTo.Locations']['meta_info']
_meta_table['Syslog.Correlator.Rules.Rule.AppliedTo.Contexts']['meta_info'].parent =_meta_table['Syslog.Correlator.Rules.Rule.AppliedTo']['meta_info']
_meta_table['Syslog.Correlator.Rules.Rule.AppliedTo.Locations']['meta_info'].parent =_meta_table['Syslog.Correlator.Rules.Rule.AppliedTo']['meta_info']
_meta_table['Syslog.Correlator.Rules.Rule.Definition']['meta_info'].parent =_meta_table['Syslog.Correlator.Rules.Rule']['meta_info']
_meta_table['Syslog.Correlator.Rules.Rule.NonStateful']['meta_info'].parent =_meta_table['Syslog.Correlator.Rules.Rule']['meta_info']
_meta_table['Syslog.Correlator.Rules.Rule.Stateful']['meta_info'].parent =_meta_table['Syslog.Correlator.Rules.Rule']['meta_info']
_meta_table['Syslog.Correlator.Rules.Rule.ApplyTo']['meta_info'].parent =_meta_table['Syslog.Correlator.Rules.Rule']['meta_info']
_meta_table['Syslog.Correlator.Rules.Rule.AppliedTo']['meta_info'].parent =_meta_table['Syslog.Correlator.Rules.Rule']['meta_info']
_meta_table['Syslog.Correlator.Rules.Rule']['meta_info'].parent =_meta_table['Syslog.Correlator.Rules']['meta_info']
_meta_table['Syslog.Correlator.RuleSets.RuleSet.Rulenames.Rulename']['meta_info'].parent =_meta_table['Syslog.Correlator.RuleSets.RuleSet.Rulenames']['meta_info']
_meta_table['Syslog.Correlator.RuleSets.RuleSet.AppliedTo.Contexts.Context']['meta_info'].parent =_meta_table['Syslog.Correlator.RuleSets.RuleSet.AppliedTo.Contexts']['meta_info']
_meta_table['Syslog.Correlator.RuleSets.RuleSet.AppliedTo.Locations.Location']['meta_info'].parent =_meta_table['Syslog.Correlator.RuleSets.RuleSet.AppliedTo.Locations']['meta_info']
_meta_table['Syslog.Correlator.RuleSets.RuleSet.AppliedTo.Contexts']['meta_info'].parent =_meta_table['Syslog.Correlator.RuleSets.RuleSet.AppliedTo']['meta_info']
_meta_table['Syslog.Correlator.RuleSets.RuleSet.AppliedTo.Locations']['meta_info'].parent =_meta_table['Syslog.Correlator.RuleSets.RuleSet.AppliedTo']['meta_info']
_meta_table['Syslog.Correlator.RuleSets.RuleSet.Rulenames']['meta_info'].parent =_meta_table['Syslog.Correlator.RuleSets.RuleSet']['meta_info']
_meta_table['Syslog.Correlator.RuleSets.RuleSet.AppliedTo']['meta_info'].parent =_meta_table['Syslog.Correlator.RuleSets.RuleSet']['meta_info']
_meta_table['Syslog.Correlator.RuleSets.RuleSet']['meta_info'].parent =_meta_table['Syslog.Correlator.RuleSets']['meta_info']
_meta_table['Syslog.Correlator.Rules']['meta_info'].parent =_meta_table['Syslog.Correlator']['meta_info']
_meta_table['Syslog.Correlator.RuleSets']['meta_info'].parent =_meta_table['Syslog.Correlator']['meta_info']
_meta_table['Syslog.Suppression.Rules.Rule.AppliedTo.Sources.Source']['meta_info'].parent =_meta_table['Syslog.Suppression.Rules.Rule.AppliedTo.Sources']['meta_info']
_meta_table['Syslog.Suppression.Rules.Rule.AppliedTo.Sources']['meta_info'].parent =_meta_table['Syslog.Suppression.Rules.Rule.AppliedTo']['meta_info']
_meta_table['Syslog.Suppression.Rules.Rule.AlarmCauses.AlarmCause']['meta_info'].parent =_meta_table['Syslog.Suppression.Rules.Rule.AlarmCauses']['meta_info']
_meta_table['Syslog.Suppression.Rules.Rule.AppliedTo']['meta_info'].parent =_meta_table['Syslog.Suppression.Rules.Rule']['meta_info']
_meta_table['Syslog.Suppression.Rules.Rule.AlarmCauses']['meta_info'].parent =_meta_table['Syslog.Suppression.Rules.Rule']['meta_info']
_meta_table['Syslog.Suppression.Rules.Rule']['meta_info'].parent =_meta_table['Syslog.Suppression.Rules']['meta_info']
_meta_table['Syslog.Suppression.Rules']['meta_info'].parent =_meta_table['Syslog.Suppression']['meta_info']
_meta_table['Syslog.MonitorLogging']['meta_info'].parent =_meta_table['Syslog']['meta_info']
_meta_table['Syslog.HistoryLogging']['meta_info'].parent =_meta_table['Syslog']['meta_info']
_meta_table['Syslog.LoggingFacilities']['meta_info'].parent =_meta_table['Syslog']['meta_info']
_meta_table['Syslog.TrapLogging']['meta_info'].parent =_meta_table['Syslog']['meta_info']
_meta_table['Syslog.BufferedLogging']['meta_info'].parent =_meta_table['Syslog']['meta_info']
_meta_table['Syslog.HostServer']['meta_info'].parent =_meta_table['Syslog']['meta_info']
_meta_table['Syslog.ConsoleLogging']['meta_info'].parent =_meta_table['Syslog']['meta_info']
_meta_table['Syslog.Files']['meta_info'].parent =_meta_table['Syslog']['meta_info']
_meta_table['Syslog.Ipv4']['meta_info'].parent =_meta_table['Syslog']['meta_info']
_meta_table['Syslog.Archive']['meta_info'].parent =_meta_table['Syslog']['meta_info']
_meta_table['Syslog.Ipv6']['meta_info'].parent =_meta_table['Syslog']['meta_info']
_meta_table['Syslog.SourceInterfaceTable']['meta_info'].parent =_meta_table['Syslog']['meta_info']
_meta_table['Syslog.AlarmLogger']['meta_info'].parent =_meta_table['Syslog']['meta_info']
_meta_table['Syslog.Correlator']['meta_info'].parent =_meta_table['Syslog']['meta_info']
_meta_table['Syslog.Suppression']['meta_info'].parent =_meta_table['Syslog']['meta_info']
| 50.424041
| 266
| 0.52498
|
2599b659a00a03060626c3dacf71789a4baa1878
| 2,072
|
py
|
Python
|
fixture/group.py
|
rgurevych/ironpython_training
|
e6752699874cbcfb5db7962682b07c7a76bfcfc8
|
[
"Apache-2.0"
] | null | null | null |
fixture/group.py
|
rgurevych/ironpython_training
|
e6752699874cbcfb5db7962682b07c7a76bfcfc8
|
[
"Apache-2.0"
] | null | null | null |
fixture/group.py
|
rgurevych/ironpython_training
|
e6752699874cbcfb5db7962682b07c7a76bfcfc8
|
[
"Apache-2.0"
] | null | null | null |
import clr
import sys
import os.path
import time
project_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(project_dir, "TestStack.White.0.13.3\\lib\\net40\\"))
sys.path.append(os.path.join(project_dir, "Castle.Core.3.3.0\\lib\\net40-client\\"))
clr.AddReferenceByName("TestStack.White")
clr.AddReferenceByName("UIAutomationTypes, Version=3.0.0.0, Culture=neutral, PublicKeyToken=31bf3856ad364e35")
from TestStack.White.InputDevices import Keyboard
from TestStack.White.WindowsAPI import KeyboardInput
from TestStack.White.UIItems.Finders import *
from System.Windows.Automation import *
def add_new_group(main_window, group_name):
modal = open_groups_editor(main_window)
modal.Get(SearchCriteria.ByAutomationId("uxNewAddressButton")).Click()
modal.Get(SearchCriteria.ByControlType(ControlType.Edit)).Enter(group_name)
Keyboard.Instance.PressSpecialKey(KeyboardInput.SpecialKeys.RETURN)
close_groups_editor(modal)
def delete_group(main_window, index):
modal = open_groups_editor(main_window)
tree = modal.Get(SearchCriteria.ByAutomationId("uxAddressTreeView"))
root = tree.Nodes[0]
root.Nodes[index].Select()
modal.Get(SearchCriteria.ByAutomationId("uxDeleteAddressButton")).Click()
deletion_modal = modal.ModalWindow("Delete group")
deletion_modal.Get(SearchCriteria.ByAutomationId("uxDeleteAllRadioButton")).Click()
deletion_modal.Get(SearchCriteria.ByAutomationId("uxOKAddressButton")).Click()
close_groups_editor(modal)
def open_groups_editor(main_window):
main_window.Get(SearchCriteria.ByAutomationId("groupButton")).Click()
modal = main_window.ModalWindow("Group editor")
return modal
def close_groups_editor(modal):
modal.Get(SearchCriteria.ByAutomationId("uxCloseAddressButton")).Click()
def get_group_list(main_window):
modal = open_groups_editor(main_window)
tree = modal.Get(SearchCriteria.ByAutomationId("uxAddressTreeView"))
groups_list = [node.Text for node in tree.Nodes[0].Nodes]
close_groups_editor(modal)
return groups_list
| 40.627451
| 110
| 0.785714
|
1906625496881413ffa4ac665c543099cab6fee8
| 23,258
|
py
|
Python
|
catkin_tools/jobs/catkin.py
|
ruvu/catkin_tools
|
cbd9ffc497a1880026c88ce3b24d390f238d8835
|
[
"Apache-2.0"
] | null | null | null |
catkin_tools/jobs/catkin.py
|
ruvu/catkin_tools
|
cbd9ffc497a1880026c88ce3b24d390f238d8835
|
[
"Apache-2.0"
] | null | null | null |
catkin_tools/jobs/catkin.py
|
ruvu/catkin_tools
|
cbd9ffc497a1880026c88ce3b24d390f238d8835
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import os
try:
from md5 import md5
except ImportError:
from hashlib import md5
from catkin_tools.argument_parsing import handle_make_arguments
from catkin_tools.common import mkdir_p
from catkin_tools.execution.jobs import Job
from catkin_tools.execution.stages import CommandStage
from catkin_tools.execution.stages import FunctionStage
from .commands.cmake import CMAKE_EXEC
from .commands.cmake import CMakeIOBufferProtocol
from .commands.cmake import CMakeMakeIOBufferProtocol
from .commands.cmake import get_installed_files
from .commands.make import MAKE_EXEC
from .utils import copyfiles
from .utils import loadenv
from .utils import makedirs
from .utils import require_command
from .utils import rmfiles
def get_prebuild_package(build_space_abs, devel_space_abs, force):
"""This generates a minimal Catkin package used to generate Catkin
environment setup files in a merged devel space.
:param build_space_abs: The path to a merged build space
:param devel_space_abs: The path to a merged devel space
:param force: Overwrite files if they exist
:returns: source directory path
"""
# Get the path to the prebuild package
prebuild_path = os.path.join(build_space_abs, 'catkin_tools_prebuild')
if not os.path.exists(prebuild_path):
mkdir_p(prebuild_path)
# Create CMakeLists.txt file
cmakelists_txt_path = os.path.join(prebuild_path, 'CMakeLists.txt')
if force or not os.path.exists(cmakelists_txt_path):
with open(cmakelists_txt_path, 'wb') as cmakelists_txt:
cmakelists_txt.write(SETUP_PREBUILD_CMAKELISTS_TEMPLATE.encode('utf-8'))
# Create package.xml file
package_xml_path = os.path.join(prebuild_path, 'package.xml')
if force or not os.path.exists(package_xml_path):
with open(package_xml_path, 'wb') as package_xml:
package_xml.write(SETUP_PREBUILD_PACKAGE_XML_TEMPLATE.encode('utf-8'))
# Create the build directory for this package
mkdir_p(os.path.join(build_space_abs, 'catkin_tools_prebuild'))
return prebuild_path
def clean_linked_files(
logger,
event_queue,
metadata_path,
files_that_collide,
files_to_clean,
dry_run):
"""Removes a list of files and adjusts collison counts for colliding files.
This function synchronizes access to the devel collisions file.
:param devel_space_abs: absolute path to merged devel space
:param files_that_collide: list of absolute paths to files that collide
:param files_to_clean: list of absolute paths to files to clean
"""
# Get paths
devel_collisions_file_path = os.path.join(metadata_path, 'devel_collisions.txt')
# Map from dest files to number of collisions
dest_collisions = dict()
# Load destination collisions file
if os.path.exists(devel_collisions_file_path):
with open(devel_collisions_file_path, 'r') as collisions_file:
collisions_reader = csv.reader(collisions_file, delimiter=' ', quotechar='"')
dest_collisions = dict([(path, int(count)) for path, count in collisions_reader])
# Add collisions
for dest_file in files_that_collide:
if dest_file in dest_collisions:
dest_collisions[dest_file] += 1
else:
dest_collisions[dest_file] = 1
# Remove files that no longer collide
for dest_file in files_to_clean:
# Get the collisions
n_collisions = dest_collisions.get(dest_file, 0)
# Check collisions
if n_collisions == 0:
logger.out('Unlinking: {}'.format(dest_file))
# Remove this link
if not dry_run:
if os.path.exists(dest_file):
try:
os.unlink(dest_file)
except OSError:
logger.err('Could not unlink: {}'.format(dest_file))
raise
# Remove any non-empty directories containing this file
try:
os.removedirs(os.path.split(dest_file)[0])
except OSError:
pass
else:
logger.out('Already unlinked: {}')
# Update collisions
if n_collisions > 1:
# Decrement the dest collisions dict
dest_collisions[dest_file] -= 1
elif n_collisions == 1:
# Remove it from the dest collisions dict
del dest_collisions[dest_file]
# Load destination collisions file
if not dry_run:
with open(devel_collisions_file_path, 'w') as collisions_file:
collisions_writer = csv.writer(collisions_file, delimiter=' ', quotechar='"')
for dest_file, count in dest_collisions.items():
collisions_writer.writerow([dest_file, count])
def unlink_devel_products(
logger,
event_queue,
devel_space_abs,
private_devel_path,
metadata_path,
package_metadata_path,
dry_run):
"""
Remove all files listed in the devel manifest for the given package, as
well as any empty directories containing those files.
:param devel_space_abs: Path to a merged devel space.
:param private_devel_path: Path to the private devel space
:param devel_manifest_path: Path to the directory containing the package's
catkin_tools metadata
"""
# Check paths
if not os.path.exists(private_devel_path):
logger.err('Warning: No private devel path found at `{}`'.format(private_devel_path))
return 0
devel_manifest_file_path = os.path.join(package_metadata_path, DEVEL_MANIFEST_FILENAME)
if not os.path.exists(devel_manifest_file_path):
logger.err('Error: No devel manifest found at `{}`'.format(devel_manifest_file_path))
return 1
# List of files to clean
files_to_clean = []
# Read in devel_manifest.txt
with open(devel_manifest_file_path, 'r') as devel_manifest:
devel_manifest.readline()
manifest_reader = csv.reader(devel_manifest, delimiter=' ', quotechar='"')
# Remove all listed symlinks and empty directories
for source_file, dest_file in manifest_reader:
if not os.path.exists(dest_file):
logger.err("Warning: Dest file doesn't exist, so it can't be removed: " + dest_file)
elif not os.path.islink(dest_file):
logger.err("Error: Dest file isn't a symbolic link: " + dest_file)
return -1
elif False and os.path.realpath(dest_file) != source_file:
logger.err("Error: Dest file isn't a symbolic link to the expected file: " + dest_file)
return -1
else:
# Clean the file or decrement the collision count
files_to_clean.append(dest_file)
# Remove all listed symli and empty directories which have been removed
# after this build, and update the collision file
clean_linked_files(logger, event_queue, metadata_path, [], files_to_clean, dry_run)
return 0
def link_devel_products(
logger, event_queue,
package,
package_path,
devel_manifest_path,
source_devel_path,
dest_devel_path,
metadata_path,
prebuild):
"""Link files from an isolated devel space into a merged one.
This creates directories and symlinks in a merged devel space to a
package's linked devel space.
"""
# Create the devel manifest path if necessary
mkdir_p(devel_manifest_path)
# Construct manifest file path
devel_manifest_file_path = os.path.join(devel_manifest_path, DEVEL_MANIFEST_FILENAME)
# Pair of source/dest files or directories
products = list()
# List of files to clean
files_to_clean = []
# List of files that collide
files_that_collide = []
# Select the blacklist
blacklist = DEVEL_LINK_PREBUILD_BLACKLIST if prebuild else DEVEL_LINK_BLACKLIST
# Gather all of the files in the devel space
for source_path, dirs, files in os.walk(source_devel_path):
# compute destination path
dest_path = os.path.join(dest_devel_path, os.path.relpath(source_path, source_devel_path))
# create directories in the destination develspace
for dirname in dirs:
source_dir = os.path.join(source_path, dirname)
dest_dir = os.path.join(dest_path, dirname)
if os.path.islink(source_dir):
# Store the source/dest pair
products.append((source_dir, dest_dir))
if os.path.exists(dest_dir):
if os.path.realpath(dest_dir) != os.path.realpath(source_dir):
files_that_collide.append(dest_dir)
else:
logger.out('Linked: ({}, {})'.format(source_dir, dest_dir))
else:
# Create a symlink
logger.out('Symlinking %s' % (dest_dir))
try:
os.symlink(source_dir, dest_dir)
except OSError:
logger.err('Could not create symlink `{}` referencing `{}`'.format(dest_dir, source_dir))
raise
else:
if not os.path.exists(dest_dir):
# Create the dest directory if it doesn't exist
os.mkdir(dest_dir)
elif not os.path.isdir(dest_dir):
logger.err('Error: Cannot create directory: {}'.format(dest_dir))
return -1
# create symbolic links from the source to the dest
for filename in files:
# Don't link files on the blacklist unless this is a prebuild package
if os.path.relpath(os.path.join(source_path, filename), source_devel_path) in blacklist:
continue
source_file = os.path.join(source_path, filename)
dest_file = os.path.join(dest_path, filename)
# Store the source/dest pair
products.append((source_file, dest_file))
# Check if the symlink exists
if os.path.exists(dest_file):
if os.path.realpath(dest_file) != os.path.realpath(source_file):
# Compute hashes for colliding files
source_hash = md5(open(os.path.realpath(source_file), "rb").read()).hexdigest()
dest_hash = md5(open(os.path.realpath(dest_file), "rb").read()).hexdigest()
# If the link links to a different file, report a warning and increment
# the collision counter for this path
if dest_hash != source_hash:
logger.err('Warning: Cannot symlink from %s to existing file %s' % (source_file, dest_file))
logger.err('Warning: Source hash: {}'.format(source_hash))
logger.err('Warning: Dest hash: {}'.format(dest_hash))
# Increment link collision counter
files_that_collide.append(dest_file)
else:
logger.out('Linked: ({}, {})'.format(source_file, dest_file))
else:
# Create the symlink
logger.out('Symlinking %s' % (dest_file))
try:
os.symlink(source_file, dest_file)
except OSError:
logger.err('Could not create symlink `{}` referencing `{}`'.format(dest_file, source_file))
raise
# Load the old list of symlinked files for this package
if os.path.exists(devel_manifest_file_path):
with open(devel_manifest_file_path, 'r') as devel_manifest:
manifest_reader = csv.reader(devel_manifest, delimiter=' ', quotechar='"')
# Skip the package source directory
devel_manifest.readline()
# Read the previously-generated products
for source_file, dest_file in manifest_reader:
# print('Checking (%s, %s)' % (source_file, dest_file))
if (source_file, dest_file) not in products:
# Clean the file or decrement the collision count
logger.out('Cleaning: (%s, %s)' % (source_file, dest_file))
files_to_clean.append(dest_file)
# Remove all listed symlinks and empty directories which have been removed
# after this build, and update the collision file
try:
clean_linked_files(logger, event_queue, metadata_path, files_that_collide, files_to_clean, dry_run=False)
except: # noqa: E722
# Silencing E722 here since we immediately re-raise the exception.
logger.err('Could not clean linked files.')
raise
# Save the list of symlinked files
with open(devel_manifest_file_path, 'w') as devel_manifest:
# Write the path to the package source directory
devel_manifest.write('%s\n' % package_path)
# Write all the products
manifest_writer = csv.writer(devel_manifest, delimiter=' ', quotechar='"')
for source_file, dest_file in products:
manifest_writer.writerow([source_file, dest_file])
return 0
def create_catkin_build_job(context, package, package_path, dependencies, force_cmake, pre_clean, prebuild=False):
"""Job class for building catkin packages"""
# Package source space path
pkg_dir = os.path.join(context.source_space_abs, package_path)
# Package build space path
build_space = context.package_build_space(package)
# Package devel space path
devel_space = context.package_devel_space(package)
# Package install space path
install_space = context.package_install_space(package)
# Package metadata path
metadata_path = context.package_metadata_path(package)
# Environment dictionary for the job, which will be built
# up by the executions in the loadenv stage.
job_env = dict(os.environ)
# Create job stages
stages = []
# Load environment for job.
stages.append(FunctionStage(
'loadenv',
loadenv,
locked_resource=None if context.isolate_install else 'installspace',
job_env=job_env,
package=package,
context=context
))
# Create package build space
stages.append(FunctionStage(
'mkdir',
makedirs,
path=build_space
))
# Create package metadata dir
stages.append(FunctionStage(
'mkdir',
makedirs,
path=metadata_path
))
# Copy source manifest
stages.append(FunctionStage(
'cache-manifest',
copyfiles,
source_paths=[os.path.join(context.source_space_abs, package_path, 'package.xml')],
dest_path=os.path.join(metadata_path, 'package.xml')
))
# Only run CMake if the Makefile doesn't exist or if --force-cmake is given
# TODO: This would need to be different with `cmake --build`
makefile_path = os.path.join(build_space, 'Makefile')
if not os.path.isfile(makefile_path) or force_cmake:
require_command('cmake', CMAKE_EXEC)
# CMake command
stages.append(CommandStage(
'cmake',
[
CMAKE_EXEC,
pkg_dir,
'--no-warn-unused-cli',
'-DCATKIN_DEVEL_PREFIX=' + devel_space,
'-DCMAKE_INSTALL_PREFIX=' + install_space
] + context.cmake_args,
cwd=build_space,
logger_factory=CMakeIOBufferProtocol.factory_factory(pkg_dir),
occupy_job=True
))
else:
# Check buildsystem command
stages.append(CommandStage(
'check',
[MAKE_EXEC, 'cmake_check_build_system'],
cwd=build_space,
logger_factory=CMakeIOBufferProtocol.factory_factory(pkg_dir),
occupy_job=True
))
# Filter make arguments
make_args = handle_make_arguments(
context.make_args +
context.catkin_make_args)
# Pre-clean command
if pre_clean:
# TODO: Remove target args from `make_args`
stages.append(CommandStage(
'preclean',
[MAKE_EXEC, 'clean'] + make_args,
cwd=build_space,
))
require_command('make', MAKE_EXEC)
# Make command
stages.append(CommandStage(
'make',
[MAKE_EXEC] + make_args,
cwd=build_space,
logger_factory=CMakeMakeIOBufferProtocol.factory
))
# Symlink command if using a linked develspace
if context.link_devel:
stages.append(FunctionStage(
'symlink',
link_devel_products,
locked_resource='symlink-collisions-file',
package=package,
package_path=package_path,
devel_manifest_path=context.package_metadata_path(package),
source_devel_path=context.package_devel_space(package),
dest_devel_path=context.devel_space_abs,
metadata_path=context.metadata_path(),
prebuild=prebuild
))
# Make install command, if installing
if context.install:
stages.append(CommandStage(
'install',
[MAKE_EXEC, 'install'],
cwd=build_space,
logger_factory=CMakeMakeIOBufferProtocol.factory,
locked_resource=None if context.isolate_install else 'installspace'
))
return Job(
jid=package.name,
deps=dependencies,
env=job_env,
stages=stages)
def create_catkin_clean_job(
context,
package,
package_path,
dependencies,
dry_run,
clean_build,
clean_devel,
clean_install):
"""Generate a Job that cleans a catkin package"""
stages = []
# Package build space path
build_space = context.package_build_space(package)
# Package metadata path
metadata_path = context.package_metadata_path(package)
# Environment dictionary for the job, empty for a clean job
job_env = {}
# Remove installed files
if clean_install:
installed_files = get_installed_files(context.package_metadata_path(package))
stages.append(FunctionStage(
'cleaninstall',
rmfiles,
paths=sorted(installed_files),
remove_empty=True,
empty_root=context.install_space_abs,
dry_run=dry_run))
# Remove products in develspace
if clean_devel:
if context.merge_devel:
# Remove build targets from devel space
stages.append(CommandStage(
'clean',
[MAKE_EXEC, 'clean'],
cwd=build_space,
))
elif context.link_devel:
# Remove symlinked products
stages.append(FunctionStage(
'unlink',
unlink_devel_products,
locked_resource='symlink-collisions-file',
devel_space_abs=context.devel_space_abs,
private_devel_path=context.package_private_devel_path(package),
metadata_path=context.metadata_path(),
package_metadata_path=context.package_metadata_path(package),
dry_run=dry_run
))
# Remove devel space
stages.append(FunctionStage(
'rmdevel',
rmfiles,
paths=[context.package_private_devel_path(package)],
dry_run=dry_run))
elif context.isolate_devel:
# Remove devel space
stages.append(FunctionStage(
'rmdevel',
rmfiles,
paths=[context.package_devel_space(package)],
dry_run=dry_run))
# Remove build space
if clean_build:
stages.append(FunctionStage(
'rmbuild',
rmfiles,
paths=[build_space],
dry_run=dry_run))
# Remove cached metadata
if clean_build and clean_devel and clean_install:
stages.append(FunctionStage(
'rmmetadata',
rmfiles,
paths=[metadata_path],
dry_run=dry_run))
return Job(
jid=package.name,
deps=dependencies,
env=job_env,
stages=stages)
description = dict(
build_type='catkin',
description="Builds a catkin package.",
create_build_job=create_catkin_build_job,
create_clean_job=create_catkin_clean_job
)
DEVEL_MANIFEST_FILENAME = 'devel_manifest.txt'
# List of files which shouldn't be copied
DEVEL_LINK_PREBUILD_BLACKLIST = [
'.catkin',
'.rosinstall',
]
DEVEL_LINK_BLACKLIST = DEVEL_LINK_PREBUILD_BLACKLIST + [
os.path.join('etc', 'catkin', 'profile.d', '05.catkin_make.bash'),
os.path.join('etc', 'catkin', 'profile.d', '05.catkin_make_isolated.bash'),
os.path.join('etc', 'catkin', 'profile.d', '05.catkin-test-results.sh'),
'env.sh',
'setup.bash',
'setup.zsh',
'setup.sh',
'local_setup.bash',
'local_setup.zsh',
'local_setup.sh',
'_setup_util.py',
]
# CMakeLists.txt for prebuild package
SETUP_PREBUILD_CMAKELISTS_TEMPLATE = """\
cmake_minimum_required(VERSION 2.8.12)
project(catkin_tools_prebuild)
find_package(catkin QUIET)
if(catkin_FOUND)
catkin_package()
else()
# Generate an error here which is more helpful than the normal one generated by CMake.
# TODO: It's possible that we could just do this silently, instead.
message(FATAL_ERROR
"The catkin CMake module was not found, but it is required to build a linked workspace.\
To resolve this, please do one of the following, and try building again.
1. Source the setup.sh file from an existing catkin workspace:
source SETUP_FILE
2. Extend another catkin workspace's result (install or devel) space:
catkin config --extend RESULT_SPACE
3. Set `catkin_DIR` to the directory containing `catkin-config.cmake`:
catkin config --cmake-args -Dcatkin_DIR=CATKIN_CMAKE_CONFIG_PATH
4. Add the catkin source package to your workspace's source space:
cd SOURCE_SPACE && git clone https://github.com/ros/catkin.git")
endif()
"""
# package.xml file for prebuild package
SETUP_PREBUILD_PACKAGE_XML_TEMPLATE = """\
<package>
<name>catkin_tools_prebuild</name>
<description>
This package is used to generate catkin setup files.
</description>
<version>0.0.0</version>
<license>BSD</license>
<maintainer email="jbo@jhu.edu">jbohren</maintainer>
<buildtool_depend>catkin</buildtool_depend>
</package>
"""
| 35.671779
| 116
| 0.640038
|
8338449aaa0a3ef843f18411081b6bb836b92542
| 31,164
|
py
|
Python
|
Lib/asyncio/proactor_events.py
|
chexca/cpython
|
cfc6ce4d40f2f01314b7e283fb972a7bb3ed3faa
|
[
"CNRI-Python-GPL-Compatible"
] | 4
|
2017-12-11T08:20:15.000Z
|
2022-03-08T09:35:04.000Z
|
Lib/asyncio/proactor_events.py
|
chexca/cpython
|
cfc6ce4d40f2f01314b7e283fb972a7bb3ed3faa
|
[
"CNRI-Python-GPL-Compatible"
] | 4
|
2020-03-13T22:24:05.000Z
|
2020-03-19T15:08:18.000Z
|
Lib/asyncio/proactor_events.py
|
chexca/cpython
|
cfc6ce4d40f2f01314b7e283fb972a7bb3ed3faa
|
[
"CNRI-Python-GPL-Compatible"
] | 5
|
2018-12-29T15:43:57.000Z
|
2020-12-14T15:29:43.000Z
|
"""Event loop using a proactor and related classes.
A proactor is a "notify-on-completion" multiplexer. Currently a
proactor is only implemented on Windows with IOCP.
"""
__all__ = 'BaseProactorEventLoop',
import io
import os
import socket
import warnings
import signal
import threading
import collections
from . import base_events
from . import constants
from . import futures
from . import exceptions
from . import protocols
from . import sslproto
from . import transports
from . import trsock
from .log import logger
def _set_socket_extra(transport, sock):
transport._extra['socket'] = trsock.TransportSocket(sock)
try:
transport._extra['sockname'] = sock.getsockname()
except socket.error:
if transport._loop.get_debug():
logger.warning(
"getsockname() failed on %r", sock, exc_info=True)
if 'peername' not in transport._extra:
try:
transport._extra['peername'] = sock.getpeername()
except socket.error:
# UDP sockets may not have a peer name
transport._extra['peername'] = None
class _ProactorBasePipeTransport(transports._FlowControlMixin,
transports.BaseTransport):
"""Base class for pipe and socket transports."""
def __init__(self, loop, sock, protocol, waiter=None,
extra=None, server=None):
super().__init__(extra, loop)
self._set_extra(sock)
self._sock = sock
self.set_protocol(protocol)
self._server = server
self._buffer = None # None or bytearray.
self._read_fut = None
self._write_fut = None
self._pending_write = 0
self._conn_lost = 0
self._closing = False # Set when close() called.
self._eof_written = False
if self._server is not None:
self._server._attach()
self._loop.call_soon(self._protocol.connection_made, self)
if waiter is not None:
# only wake up the waiter when connection_made() has been called
self._loop.call_soon(futures._set_result_unless_cancelled,
waiter, None)
def __repr__(self):
info = [self.__class__.__name__]
if self._sock is None:
info.append('closed')
elif self._closing:
info.append('closing')
if self._sock is not None:
info.append(f'fd={self._sock.fileno()}')
if self._read_fut is not None:
info.append(f'read={self._read_fut!r}')
if self._write_fut is not None:
info.append(f'write={self._write_fut!r}')
if self._buffer:
info.append(f'write_bufsize={len(self._buffer)}')
if self._eof_written:
info.append('EOF written')
return '<{}>'.format(' '.join(info))
def _set_extra(self, sock):
self._extra['pipe'] = sock
def set_protocol(self, protocol):
self._protocol = protocol
def get_protocol(self):
return self._protocol
def is_closing(self):
return self._closing
def close(self):
if self._closing:
return
self._closing = True
self._conn_lost += 1
if not self._buffer and self._write_fut is None:
self._loop.call_soon(self._call_connection_lost, None)
if self._read_fut is not None:
self._read_fut.cancel()
self._read_fut = None
def __del__(self, _warn=warnings.warn):
if self._sock is not None:
_warn(f"unclosed transport {self!r}", ResourceWarning, source=self)
self.close()
def _fatal_error(self, exc, message='Fatal error on pipe transport'):
try:
if isinstance(exc, OSError):
if self._loop.get_debug():
logger.debug("%r: %s", self, message, exc_info=True)
else:
self._loop.call_exception_handler({
'message': message,
'exception': exc,
'transport': self,
'protocol': self._protocol,
})
finally:
self._force_close(exc)
def _force_close(self, exc):
if self._empty_waiter is not None and not self._empty_waiter.done():
if exc is None:
self._empty_waiter.set_result(None)
else:
self._empty_waiter.set_exception(exc)
if self._closing:
return
self._closing = True
self._conn_lost += 1
if self._write_fut:
self._write_fut.cancel()
self._write_fut = None
if self._read_fut:
self._read_fut.cancel()
self._read_fut = None
self._pending_write = 0
self._buffer = None
self._loop.call_soon(self._call_connection_lost, exc)
def _call_connection_lost(self, exc):
try:
self._protocol.connection_lost(exc)
finally:
# XXX If there is a pending overlapped read on the other
# end then it may fail with ERROR_NETNAME_DELETED if we
# just close our end. First calling shutdown() seems to
# cure it, but maybe using DisconnectEx() would be better.
if hasattr(self._sock, 'shutdown'):
self._sock.shutdown(socket.SHUT_RDWR)
self._sock.close()
self._sock = None
server = self._server
if server is not None:
server._detach()
self._server = None
def get_write_buffer_size(self):
size = self._pending_write
if self._buffer is not None:
size += len(self._buffer)
return size
class _ProactorReadPipeTransport(_ProactorBasePipeTransport,
transports.ReadTransport):
"""Transport for read pipes."""
def __init__(self, loop, sock, protocol, waiter=None,
extra=None, server=None):
self._pending_data = None
self._paused = True
super().__init__(loop, sock, protocol, waiter, extra, server)
self._loop.call_soon(self._loop_reading)
self._paused = False
def is_reading(self):
return not self._paused and not self._closing
def pause_reading(self):
if self._closing or self._paused:
return
self._paused = True
# bpo-33694: Don't cancel self._read_fut because cancelling an
# overlapped WSASend() loss silently data with the current proactor
# implementation.
#
# If CancelIoEx() fails with ERROR_NOT_FOUND, it means that WSASend()
# completed (even if HasOverlappedIoCompleted() returns 0), but
# Overlapped.cancel() currently silently ignores the ERROR_NOT_FOUND
# error. Once the overlapped is ignored, the IOCP loop will ignores the
# completion I/O event and so not read the result of the overlapped
# WSARecv().
if self._loop.get_debug():
logger.debug("%r pauses reading", self)
def resume_reading(self):
if self._closing or not self._paused:
return
self._paused = False
if self._read_fut is None:
self._loop.call_soon(self._loop_reading, None)
data = self._pending_data
self._pending_data = None
if data is not None:
# Call the protocol methode after calling _loop_reading(),
# since the protocol can decide to pause reading again.
self._loop.call_soon(self._data_received, data)
if self._loop.get_debug():
logger.debug("%r resumes reading", self)
def _eof_received(self):
if self._loop.get_debug():
logger.debug("%r received EOF", self)
try:
keep_open = self._protocol.eof_received()
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
self._fatal_error(
exc, 'Fatal error: protocol.eof_received() call failed.')
return
if not keep_open:
self.close()
def _data_received(self, data):
if self._paused:
# Don't call any protocol method while reading is paused.
# The protocol will be called on resume_reading().
assert self._pending_data is None
self._pending_data = data
return
if not data:
self._eof_received()
return
if isinstance(self._protocol, protocols.BufferedProtocol):
try:
protocols._feed_data_to_buffered_proto(self._protocol, data)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
self._fatal_error(exc,
'Fatal error: protocol.buffer_updated() '
'call failed.')
return
else:
self._protocol.data_received(data)
def _loop_reading(self, fut=None):
data = None
try:
if fut is not None:
assert self._read_fut is fut or (self._read_fut is None and
self._closing)
self._read_fut = None
if fut.done():
# deliver data later in "finally" clause
data = fut.result()
else:
# the future will be replaced by next proactor.recv call
fut.cancel()
if self._closing:
# since close() has been called we ignore any read data
data = None
return
if data == b'':
# we got end-of-file so no need to reschedule a new read
return
# bpo-33694: buffer_updated() has currently no fast path because of
# a data loss issue caused by overlapped WSASend() cancellation.
if not self._paused:
# reschedule a new read
self._read_fut = self._loop._proactor.recv(self._sock, 32768)
except ConnectionAbortedError as exc:
if not self._closing:
self._fatal_error(exc, 'Fatal read error on pipe transport')
elif self._loop.get_debug():
logger.debug("Read error on pipe transport while closing",
exc_info=True)
except ConnectionResetError as exc:
self._force_close(exc)
except OSError as exc:
self._fatal_error(exc, 'Fatal read error on pipe transport')
except exceptions.CancelledError:
if not self._closing:
raise
else:
if not self._paused:
self._read_fut.add_done_callback(self._loop_reading)
finally:
if data is not None:
self._data_received(data)
class _ProactorBaseWritePipeTransport(_ProactorBasePipeTransport,
transports.WriteTransport):
"""Transport for write pipes."""
_start_tls_compatible = True
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self._empty_waiter = None
def write(self, data):
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError(
f"data argument must be a bytes-like object, "
f"not {type(data).__name__}")
if self._eof_written:
raise RuntimeError('write_eof() already called')
if self._empty_waiter is not None:
raise RuntimeError('unable to write; sendfile is in progress')
if not data:
return
if self._conn_lost:
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
logger.warning('socket.send() raised exception.')
self._conn_lost += 1
return
# Observable states:
# 1. IDLE: _write_fut and _buffer both None
# 2. WRITING: _write_fut set; _buffer None
# 3. BACKED UP: _write_fut set; _buffer a bytearray
# We always copy the data, so the caller can't modify it
# while we're still waiting for the I/O to happen.
if self._write_fut is None: # IDLE -> WRITING
assert self._buffer is None
# Pass a copy, except if it's already immutable.
self._loop_writing(data=bytes(data))
elif not self._buffer: # WRITING -> BACKED UP
# Make a mutable copy which we can extend.
self._buffer = bytearray(data)
self._maybe_pause_protocol()
else: # BACKED UP
# Append to buffer (also copies).
self._buffer.extend(data)
self._maybe_pause_protocol()
def _loop_writing(self, f=None, data=None):
try:
if f is not None and self._write_fut is None and self._closing:
# XXX most likely self._force_close() has been called, and
# it has set self._write_fut to None.
return
assert f is self._write_fut
self._write_fut = None
self._pending_write = 0
if f:
f.result()
if data is None:
data = self._buffer
self._buffer = None
if not data:
if self._closing:
self._loop.call_soon(self._call_connection_lost, None)
if self._eof_written:
self._sock.shutdown(socket.SHUT_WR)
# Now that we've reduced the buffer size, tell the
# protocol to resume writing if it was paused. Note that
# we do this last since the callback is called immediately
# and it may add more data to the buffer (even causing the
# protocol to be paused again).
self._maybe_resume_protocol()
else:
self._write_fut = self._loop._proactor.send(self._sock, data)
if not self._write_fut.done():
assert self._pending_write == 0
self._pending_write = len(data)
self._write_fut.add_done_callback(self._loop_writing)
self._maybe_pause_protocol()
else:
self._write_fut.add_done_callback(self._loop_writing)
if self._empty_waiter is not None and self._write_fut is None:
self._empty_waiter.set_result(None)
except ConnectionResetError as exc:
self._force_close(exc)
except OSError as exc:
self._fatal_error(exc, 'Fatal write error on pipe transport')
def can_write_eof(self):
return True
def write_eof(self):
self.close()
def abort(self):
self._force_close(None)
def _make_empty_waiter(self):
if self._empty_waiter is not None:
raise RuntimeError("Empty waiter is already set")
self._empty_waiter = self._loop.create_future()
if self._write_fut is None:
self._empty_waiter.set_result(None)
return self._empty_waiter
def _reset_empty_waiter(self):
self._empty_waiter = None
class _ProactorWritePipeTransport(_ProactorBaseWritePipeTransport):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self._read_fut = self._loop._proactor.recv(self._sock, 16)
self._read_fut.add_done_callback(self._pipe_closed)
def _pipe_closed(self, fut):
if fut.cancelled():
# the transport has been closed
return
assert fut.result() == b''
if self._closing:
assert self._read_fut is None
return
assert fut is self._read_fut, (fut, self._read_fut)
self._read_fut = None
if self._write_fut is not None:
self._force_close(BrokenPipeError())
else:
self.close()
class _ProactorDatagramTransport(_ProactorBasePipeTransport):
max_size = 256 * 1024
def __init__(self, loop, sock, protocol, address=None,
waiter=None, extra=None):
self._address = address
self._empty_waiter = None
# We don't need to call _protocol.connection_made() since our base
# constructor does it for us.
super().__init__(loop, sock, protocol, waiter=waiter, extra=extra)
# The base constructor sets _buffer = None, so we set it here
self._buffer = collections.deque()
self._loop.call_soon(self._loop_reading)
def _set_extra(self, sock):
_set_socket_extra(self, sock)
def get_write_buffer_size(self):
return sum(len(data) for data, _ in self._buffer)
def abort(self):
self._force_close(None)
def sendto(self, data, addr=None):
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError('data argument must be bytes-like object (%r)',
type(data))
if not data:
return
if self._address is not None and addr not in (None, self._address):
raise ValueError(
f'Invalid address: must be None or {self._address}')
if self._conn_lost and self._address:
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
logger.warning('socket.sendto() raised exception.')
self._conn_lost += 1
return
# Ensure that what we buffer is immutable.
self._buffer.append((bytes(data), addr))
if self._write_fut is None:
# No current write operations are active, kick one off
self._loop_writing()
# else: A write operation is already kicked off
self._maybe_pause_protocol()
def _loop_writing(self, fut=None):
try:
if self._conn_lost:
return
assert fut is self._write_fut
self._write_fut = None
if fut:
# We are in a _loop_writing() done callback, get the result
fut.result()
if not self._buffer or (self._conn_lost and self._address):
# The connection has been closed
if self._closing:
self._loop.call_soon(self._call_connection_lost, None)
return
data, addr = self._buffer.popleft()
if self._address is not None:
self._write_fut = self._loop._proactor.send(self._sock,
data)
else:
self._write_fut = self._loop._proactor.sendto(self._sock,
data,
addr=addr)
except OSError as exc:
self._protocol.error_received(exc)
except Exception as exc:
self._fatal_error(exc, 'Fatal write error on datagram transport')
else:
self._write_fut.add_done_callback(self._loop_writing)
self._maybe_resume_protocol()
def _loop_reading(self, fut=None):
data = None
try:
if self._conn_lost:
return
assert self._read_fut is fut or (self._read_fut is None and
self._closing)
self._read_fut = None
if fut is not None:
res = fut.result()
if self._closing:
# since close() has been called we ignore any read data
data = None
return
if self._address is not None:
data, addr = res, self._address
else:
data, addr = res
if self._conn_lost:
return
if self._address is not None:
self._read_fut = self._loop._proactor.recv(self._sock,
self.max_size)
else:
self._read_fut = self._loop._proactor.recvfrom(self._sock,
self.max_size)
except OSError as exc:
self._protocol.error_received(exc)
except exceptions.CancelledError:
if not self._closing:
raise
else:
if self._read_fut is not None:
self._read_fut.add_done_callback(self._loop_reading)
finally:
if data:
self._protocol.datagram_received(data, addr)
class _ProactorDuplexPipeTransport(_ProactorReadPipeTransport,
_ProactorBaseWritePipeTransport,
transports.Transport):
"""Transport for duplex pipes."""
def can_write_eof(self):
return False
def write_eof(self):
raise NotImplementedError
class _ProactorSocketTransport(_ProactorReadPipeTransport,
_ProactorBaseWritePipeTransport,
transports.Transport):
"""Transport for connected sockets."""
_sendfile_compatible = constants._SendfileMode.TRY_NATIVE
def __init__(self, loop, sock, protocol, waiter=None,
extra=None, server=None):
super().__init__(loop, sock, protocol, waiter, extra, server)
base_events._set_nodelay(sock)
def _set_extra(self, sock):
_set_socket_extra(self, sock)
def can_write_eof(self):
return True
def write_eof(self):
if self._closing or self._eof_written:
return
self._eof_written = True
if self._write_fut is None:
self._sock.shutdown(socket.SHUT_WR)
class BaseProactorEventLoop(base_events.BaseEventLoop):
def __init__(self, proactor):
super().__init__()
logger.debug('Using proactor: %s', proactor.__class__.__name__)
self._proactor = proactor
self._selector = proactor # convenient alias
self._self_reading_future = None
self._accept_futures = {} # socket file descriptor => Future
proactor.set_loop(self)
self._make_self_pipe()
if threading.current_thread() is threading.main_thread():
# wakeup fd can only be installed to a file descriptor from the main thread
signal.set_wakeup_fd(self._csock.fileno())
def _make_socket_transport(self, sock, protocol, waiter=None,
extra=None, server=None):
return _ProactorSocketTransport(self, sock, protocol, waiter,
extra, server)
def _make_ssl_transport(
self, rawsock, protocol, sslcontext, waiter=None,
*, server_side=False, server_hostname=None,
extra=None, server=None,
ssl_handshake_timeout=None):
ssl_protocol = sslproto.SSLProtocol(
self, protocol, sslcontext, waiter,
server_side, server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout)
_ProactorSocketTransport(self, rawsock, ssl_protocol,
extra=extra, server=server)
return ssl_protocol._app_transport
def _make_datagram_transport(self, sock, protocol,
address=None, waiter=None, extra=None):
return _ProactorDatagramTransport(self, sock, protocol, address,
waiter, extra)
def _make_duplex_pipe_transport(self, sock, protocol, waiter=None,
extra=None):
return _ProactorDuplexPipeTransport(self,
sock, protocol, waiter, extra)
def _make_read_pipe_transport(self, sock, protocol, waiter=None,
extra=None):
return _ProactorReadPipeTransport(self, sock, protocol, waiter, extra)
def _make_write_pipe_transport(self, sock, protocol, waiter=None,
extra=None):
# We want connection_lost() to be called when other end closes
return _ProactorWritePipeTransport(self,
sock, protocol, waiter, extra)
def close(self):
if self.is_running():
raise RuntimeError("Cannot close a running event loop")
if self.is_closed():
return
if threading.current_thread() is threading.main_thread():
signal.set_wakeup_fd(-1)
# Call these methods before closing the event loop (before calling
# BaseEventLoop.close), because they can schedule callbacks with
# call_soon(), which is forbidden when the event loop is closed.
self._stop_accept_futures()
self._close_self_pipe()
self._proactor.close()
self._proactor = None
self._selector = None
# Close the event loop
super().close()
async def sock_recv(self, sock, n):
return await self._proactor.recv(sock, n)
async def sock_recv_into(self, sock, buf):
return await self._proactor.recv_into(sock, buf)
async def sock_sendall(self, sock, data):
return await self._proactor.send(sock, data)
async def sock_connect(self, sock, address):
return await self._proactor.connect(sock, address)
async def sock_accept(self, sock):
return await self._proactor.accept(sock)
async def _sock_sendfile_native(self, sock, file, offset, count):
try:
fileno = file.fileno()
except (AttributeError, io.UnsupportedOperation) as err:
raise exceptions.SendfileNotAvailableError("not a regular file")
try:
fsize = os.fstat(fileno).st_size
except OSError:
raise exceptions.SendfileNotAvailableError("not a regular file")
blocksize = count if count else fsize
if not blocksize:
return 0 # empty file
blocksize = min(blocksize, 0xffff_ffff)
end_pos = min(offset + count, fsize) if count else fsize
offset = min(offset, fsize)
total_sent = 0
try:
while True:
blocksize = min(end_pos - offset, blocksize)
if blocksize <= 0:
return total_sent
await self._proactor.sendfile(sock, file, offset, blocksize)
offset += blocksize
total_sent += blocksize
finally:
if total_sent > 0:
file.seek(offset)
async def _sendfile_native(self, transp, file, offset, count):
resume_reading = transp.is_reading()
transp.pause_reading()
await transp._make_empty_waiter()
try:
return await self.sock_sendfile(transp._sock, file, offset, count,
fallback=False)
finally:
transp._reset_empty_waiter()
if resume_reading:
transp.resume_reading()
def _close_self_pipe(self):
if self._self_reading_future is not None:
self._self_reading_future.cancel()
self._self_reading_future = None
self._ssock.close()
self._ssock = None
self._csock.close()
self._csock = None
self._internal_fds -= 1
def _make_self_pipe(self):
# A self-socket, really. :-)
self._ssock, self._csock = socket.socketpair()
self._ssock.setblocking(False)
self._csock.setblocking(False)
self._internal_fds += 1
def _loop_self_reading(self, f=None):
try:
if f is not None:
f.result() # may raise
f = self._proactor.recv(self._ssock, 4096)
except exceptions.CancelledError:
# _close_self_pipe() has been called, stop waiting for data
return
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
self.call_exception_handler({
'message': 'Error on reading from the event loop self pipe',
'exception': exc,
'loop': self,
})
else:
self._self_reading_future = f
f.add_done_callback(self._loop_self_reading)
def _write_to_self(self):
try:
self._csock.send(b'\0')
except OSError:
if self._debug:
logger.debug("Fail to write a null byte into the "
"self-pipe socket",
exc_info=True)
def _start_serving(self, protocol_factory, sock,
sslcontext=None, server=None, backlog=100,
ssl_handshake_timeout=None):
def loop(f=None):
try:
if f is not None:
conn, addr = f.result()
if self._debug:
logger.debug("%r got a new connection from %r: %r",
server, addr, conn)
protocol = protocol_factory()
if sslcontext is not None:
self._make_ssl_transport(
conn, protocol, sslcontext, server_side=True,
extra={'peername': addr}, server=server,
ssl_handshake_timeout=ssl_handshake_timeout)
else:
self._make_socket_transport(
conn, protocol,
extra={'peername': addr}, server=server)
if self.is_closed():
return
f = self._proactor.accept(sock)
except OSError as exc:
if sock.fileno() != -1:
self.call_exception_handler({
'message': 'Accept failed on a socket',
'exception': exc,
'socket': trsock.TransportSocket(sock),
})
sock.close()
elif self._debug:
logger.debug("Accept failed on socket %r",
sock, exc_info=True)
except exceptions.CancelledError:
sock.close()
else:
self._accept_futures[sock.fileno()] = f
f.add_done_callback(loop)
self.call_soon(loop)
def _process_events(self, event_list):
# Events are processed in the IocpProactor._poll() method
pass
def _stop_accept_futures(self):
for future in self._accept_futures.values():
future.cancel()
self._accept_futures.clear()
def _stop_serving(self, sock):
future = self._accept_futures.pop(sock.fileno(), None)
if future:
future.cancel()
self._proactor._stop_serving(sock)
sock.close()
| 36.577465
| 87
| 0.569054
|
0088e39753c85f3a93566693289c33cd78e72505
| 944
|
py
|
Python
|
app.py
|
pltzr/devops-bosch-app
|
5940ef317c7c9e6d05a37eafa429d0f32d6d1f52
|
[
"MIT"
] | null | null | null |
app.py
|
pltzr/devops-bosch-app
|
5940ef317c7c9e6d05a37eafa429d0f32d6d1f52
|
[
"MIT"
] | null | null | null |
app.py
|
pltzr/devops-bosch-app
|
5940ef317c7c9e6d05a37eafa429d0f32d6d1f52
|
[
"MIT"
] | null | null | null |
import numpy as np
from flask import Flask, request, jsonify, render_template
import pickle
import os
app = Flask(__name__)
# Here we are fetching the name of the model from an environment variable. In production,
# the model is stored in a Blob storage which is "mounted" to the App Service instance.
model_name = os.environ.get("MODEL_NAME")
model_path = os.environ.get("MODEL_PATH")
model = pickle.load(open(model_path + model_name, 'rb'))
@app.route('/', methods=['GET'])
def home():
return ("To try out this application, please send a POST request to /results. The exact \
requirements can be found inside the Postman collection in the repository!")
@app.route('/results',methods=['POST'])
def results():
data = request.get_json(force=True)
prediction = model.predict([np.array(list(data.values()))])
output = prediction[0]
return jsonify(output)
if __name__ == "__main__":
app.run(debug=True)
| 29.5
| 93
| 0.715042
|
4e429877fdf24eb54a9520d835536c47bd5ce3eb
| 5,940
|
py
|
Python
|
test/torchaudio_unittest/backend/sox_io/torchscript_test.py
|
majabojarska/audio
|
765fde0873ee52be9c1db1fdc7fddedb721f4d0c
|
[
"BSD-2-Clause"
] | null | null | null |
test/torchaudio_unittest/backend/sox_io/torchscript_test.py
|
majabojarska/audio
|
765fde0873ee52be9c1db1fdc7fddedb721f4d0c
|
[
"BSD-2-Clause"
] | null | null | null |
test/torchaudio_unittest/backend/sox_io/torchscript_test.py
|
majabojarska/audio
|
765fde0873ee52be9c1db1fdc7fddedb721f4d0c
|
[
"BSD-2-Clause"
] | null | null | null |
import itertools
from typing import Optional
import torch
import torchaudio
from parameterized import parameterized
from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
skipIfNoExec,
skipIfNoExtension,
get_wav_data,
save_wav,
load_wav,
sox_utils,
)
from .common import (
name_func,
get_enc_params,
)
def py_info_func(filepath: str) -> torchaudio.backend.sox_io_backend.AudioMetaData:
return torchaudio.info(filepath)
def py_load_func(filepath: str, normalize: bool, channels_first: bool):
return torchaudio.load(
filepath, normalize=normalize, channels_first=channels_first)
def py_save_func(
filepath: str,
tensor: torch.Tensor,
sample_rate: int,
channels_first: bool = True,
compression: Optional[float] = None,
encoding: Optional[str] = None,
bits_per_sample: Optional[int] = None,
):
torchaudio.save(
filepath, tensor, sample_rate, channels_first,
compression, None, encoding, bits_per_sample)
@skipIfNoExec('sox')
@skipIfNoExtension
class SoxIO(TempDirMixin, TorchaudioTestCase):
"""TorchScript-ability Test suite for `sox_io_backend`"""
backend = 'sox_io'
@parameterized.expand(list(itertools.product(
['float32', 'int32', 'int16', 'uint8'],
[8000, 16000],
[1, 2],
)), name_func=name_func)
def test_info_wav(self, dtype, sample_rate, num_channels):
"""`sox_io_backend.info` is torchscript-able and returns the same result"""
audio_path = self.get_temp_path(f'{dtype}_{sample_rate}_{num_channels}.wav')
data = get_wav_data(dtype, num_channels, normalize=False, num_frames=1 * sample_rate)
save_wav(audio_path, data, sample_rate)
script_path = self.get_temp_path('info_func.zip')
torch.jit.script(py_info_func).save(script_path)
ts_info_func = torch.jit.load(script_path)
py_info = py_info_func(audio_path)
ts_info = ts_info_func(audio_path)
assert py_info.sample_rate == ts_info.sample_rate
assert py_info.num_frames == ts_info.num_frames
assert py_info.num_channels == ts_info.num_channels
@parameterized.expand(list(itertools.product(
['float32', 'int32', 'int16', 'uint8'],
[8000, 16000],
[1, 2],
[False, True],
[False, True],
)), name_func=name_func)
def test_load_wav(self, dtype, sample_rate, num_channels, normalize, channels_first):
"""`sox_io_backend.load` is torchscript-able and returns the same result"""
audio_path = self.get_temp_path(f'test_load_{dtype}_{sample_rate}_{num_channels}_{normalize}.wav')
data = get_wav_data(dtype, num_channels, normalize=False, num_frames=1 * sample_rate)
save_wav(audio_path, data, sample_rate)
script_path = self.get_temp_path('load_func.zip')
torch.jit.script(py_load_func).save(script_path)
ts_load_func = torch.jit.load(script_path)
py_data, py_sr = py_load_func(
audio_path, normalize=normalize, channels_first=channels_first)
ts_data, ts_sr = ts_load_func(
audio_path, normalize=normalize, channels_first=channels_first)
self.assertEqual(py_sr, ts_sr)
self.assertEqual(py_data, ts_data)
@parameterized.expand(list(itertools.product(
['float32', 'int32', 'int16', 'uint8'],
[8000, 16000],
[1, 2],
)), name_func=name_func)
def test_save_wav(self, dtype, sample_rate, num_channels):
script_path = self.get_temp_path('save_func.zip')
torch.jit.script(py_save_func).save(script_path)
ts_save_func = torch.jit.load(script_path)
expected = get_wav_data(dtype, num_channels, normalize=False)
py_path = self.get_temp_path(f'test_save_py_{dtype}_{sample_rate}_{num_channels}.wav')
ts_path = self.get_temp_path(f'test_save_ts_{dtype}_{sample_rate}_{num_channels}.wav')
enc, bps = get_enc_params(dtype)
py_save_func(py_path, expected, sample_rate, True, None, enc, bps)
ts_save_func(ts_path, expected, sample_rate, True, None, enc, bps)
py_data, py_sr = load_wav(py_path, normalize=False)
ts_data, ts_sr = load_wav(ts_path, normalize=False)
self.assertEqual(sample_rate, py_sr)
self.assertEqual(sample_rate, ts_sr)
self.assertEqual(expected, py_data)
self.assertEqual(expected, ts_data)
@parameterized.expand(list(itertools.product(
[8000, 16000],
[1, 2],
list(range(9)),
)), name_func=name_func)
def test_save_flac(self, sample_rate, num_channels, compression_level):
script_path = self.get_temp_path('save_func.zip')
torch.jit.script(py_save_func).save(script_path)
ts_save_func = torch.jit.load(script_path)
expected = get_wav_data('float32', num_channels)
py_path = self.get_temp_path(f'test_save_py_{sample_rate}_{num_channels}_{compression_level}.flac')
ts_path = self.get_temp_path(f'test_save_ts_{sample_rate}_{num_channels}_{compression_level}.flac')
py_save_func(py_path, expected, sample_rate, True, compression_level, None, None)
ts_save_func(ts_path, expected, sample_rate, True, compression_level, None, None)
# converting to 32 bit because flac file has 24 bit depth which scipy cannot handle.
py_path_wav = f'{py_path}.wav'
ts_path_wav = f'{ts_path}.wav'
sox_utils.convert_audio_file(py_path, py_path_wav, bit_depth=32)
sox_utils.convert_audio_file(ts_path, ts_path_wav, bit_depth=32)
py_data, py_sr = load_wav(py_path_wav, normalize=True)
ts_data, ts_sr = load_wav(ts_path_wav, normalize=True)
self.assertEqual(sample_rate, py_sr)
self.assertEqual(sample_rate, ts_sr)
self.assertEqual(expected, py_data)
self.assertEqual(expected, ts_data)
| 38.076923
| 107
| 0.691077
|
b0efbbcdd16f6901beded3da95826e5e70307dad
| 6,971
|
py
|
Python
|
tests/test_bbclib_assets_v2.py
|
ks91/py-bbclib
|
67bf402445cb44f99cdbd3a1efde8ec0515815d9
|
[
"Apache-2.0"
] | null | null | null |
tests/test_bbclib_assets_v2.py
|
ks91/py-bbclib
|
67bf402445cb44f99cdbd3a1efde8ec0515815d9
|
[
"Apache-2.0"
] | 8
|
2019-07-10T00:58:53.000Z
|
2021-08-25T04:57:06.000Z
|
tests/test_bbclib_assets_v2.py
|
ks91/py-bbclib
|
67bf402445cb44f99cdbd3a1efde8ec0515815d9
|
[
"Apache-2.0"
] | 3
|
2019-07-09T14:45:51.000Z
|
2021-08-04T09:58:27.000Z
|
# -*- coding: utf-8 -*-
import binascii
import sys
sys.path.extend(["../"])
import bbclib
from bbclib import BBcTransaction, BBcRelation, KeyPair, BBcAssetRaw, BBcAssetHash, configure_id_length_all
ID_LEN = 32
configure_id_length_all(ID_LEN)
user_id = bbclib.get_new_id("user_id_test1")[:ID_LEN]
user_id2 = bbclib.get_new_id("user_id_test2")[:ID_LEN]
domain_id = bbclib.get_new_id("testdomain")
asset_group_id = bbclib.get_new_id("asset_group_1")[:ID_LEN]
transaction1_id = bbclib.get_new_id("transaction_1")[:ID_LEN]
transaction2_id = bbclib.get_new_id("transaction_2")[:ID_LEN]
keypair1 = KeyPair()
keypair1.generate()
keypair2 = KeyPair()
keypair2.generate()
asset_content = b'abcdefg'
print("\n")
print("private_key:", binascii.b2a_hex(keypair1.private_key))
print("private_key(pem):\n", keypair1.get_private_key_in_pem())
print("public_key:", binascii.b2a_hex(keypair1.public_key))
class TestBBcLibAssetsV2(object):
def test_00_keypair(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
global keypair1
kp = KeyPair(pubkey=keypair1.public_key)
assert kp.public_key
def test_01_transaction_with_relation_and_witness_and_proof(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
transaction1 = bbclib.make_transaction(relation_num=1, witness=True)
transaction1.version = 2
bbclib.add_relation_asset(transaction1, relation_idx=0, asset_group_id=asset_group_id,
user_id=user_id, asset_body=b'ccccc')
rtn1 = BBcRelation(asset_group_id=asset_group_id)
rtn2 = BBcRelation(asset_group_id=asset_group_id)
transaction1.add(relation=[rtn1, rtn2])
asid = bbclib.get_new_id("assetraw1")[:ID_LEN]
asset_raw = BBcAssetRaw(asset_id=asid, asset_body=b'1234567890abcdefg')
rtn1.add(asset_raw=asset_raw)
ash = [bbclib.get_new_id("assethash%d"%i)[:ID_LEN] for i in range(1, 4)]
asset_hash = BBcAssetHash(asset_ids=ash)
rtn2.add(asset_hash=asset_hash)
bbclib.add_relation_pointer(transaction1, relation_idx=0, ref_transaction_id=bbclib.get_new_id("dummy1"))
bbclib.add_relation_pointer(transaction1, relation_idx=1, ref_transaction_id=bbclib.get_new_id("dummy2"))
bbclib.add_relation_pointer(transaction1, relation_idx=2, ref_transaction_id=bbclib.get_new_id("dummy3"))
transaction1.witness.add_witness(user_id=user_id)
transaction1.witness.add_witness(user_id=user_id2)
sig = transaction1.sign(private_key=keypair2.private_key, public_key=keypair2.public_key)
if sig is None:
print(bbclib.error_text)
assert sig
transaction1.witness.add_signature(user_id=user_id2, signature=sig)
sig = transaction1.sign(private_key=keypair1.private_key, public_key=keypair1.public_key)
if sig is None:
print(bbclib.error_text)
assert sig
transaction1.witness.add_signature(user_id=user_id, signature=sig)
digest = transaction1.digest()
dat = transaction1.pack()
print("Digest:", binascii.b2a_hex(digest))
print("Serialized data:", binascii.b2a_hex(dat))
print(transaction1)
transaction_tmp = BBcTransaction()
transaction_tmp.unpack(dat)
transaction1 = transaction_tmp
print(transaction1)
assert transaction1.relations[1].asset_raw is not None
assert transaction1.relations[1].asset_raw.asset_id == asid
assert transaction1.relations[2].asset_hash is not None
for i, h in enumerate(transaction1.relations[2].asset_hash.asset_ids):
assert ash[i] == h
digest = transaction1.digest()
ret = transaction1.signatures[0].verify(digest)
print("Proof result:", ret)
if not ret:
print(bbclib.error_text)
assert ret
def test_02_create_transaction_by_utility(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
transaction1 = bbclib.make_transaction(relation_num=3, witness=True, version=3)
ash = [bbclib.get_new_id("assethash%d"%i)[:ID_LEN] for i in range(7)]
bbclib.add_relation_asset(transaction1, relation_idx=0, asset_group_id=asset_group_id, user_id=user_id, asset_body=b'ccccc')
bbclib.add_relation_asset_raw(transaction1, relation_idx=1, asset_group_id=asset_group_id, asset_id=ash[0])
bbclib.add_relation_asset_hash(transaction1, relation_idx=2, asset_group_id=asset_group_id, asset_ids=ash[1:3])
rtn3 = bbclib.make_relation_with_asset(asset_group_id, user_id, asset_body=b'xxxxxx')
rtn4 = bbclib.make_relation_with_asset_raw(asset_group_id, asset_id=ash[4], asset_body=b'yyyyyy')
rtn5 = bbclib.make_relation_with_asset_hash(asset_group_id, asset_ids=ash[5:])
transaction1.add(relation=[rtn3, rtn4, rtn5])
bbclib.add_relation_pointer(transaction1, relation_idx=0, ref_transaction_id=bbclib.get_new_id("dummy1"))
bbclib.add_relation_pointer(transaction1, relation_idx=1, ref_transaction_id=bbclib.get_new_id("dummy2"))
bbclib.add_relation_pointer(transaction1, relation_idx=2, ref_transaction_id=bbclib.get_new_id("dummy3"))
transaction1.witness.add_witness(user_id=user_id)
transaction1.witness.add_witness(user_id=user_id2)
sig = transaction1.sign(private_key=keypair2.private_key, public_key=keypair2.public_key)
if sig is None:
print(bbclib.error_text)
assert sig
transaction1.witness.add_signature(user_id=user_id2, signature=sig)
sig = transaction1.sign(private_key=keypair1.private_key, public_key=keypair1.public_key)
if sig is None:
print(bbclib.error_text)
assert sig
transaction1.witness.add_signature(user_id=user_id, signature=sig)
digest = transaction1.digest()
dat = transaction1.pack()
print("Digest:", binascii.b2a_hex(digest))
print("Serialized data:", binascii.b2a_hex(dat))
print(transaction1)
transaction_tmp = BBcTransaction()
transaction_tmp.unpack(dat)
transaction1 = transaction_tmp
print(transaction1)
assert transaction1.relations[1].asset_raw is not None
assert transaction1.relations[1].asset_raw.asset_id == ash[0]
assert transaction1.relations[2].asset_hash is not None
for i, h in enumerate(transaction1.relations[2].asset_hash.asset_ids):
assert ash[i+1] == h
assert transaction1.relations[3].asset.asset_body == b'xxxxxx'
assert transaction1.relations[4].asset_raw.asset_id == ash[4]
assert len(transaction1.relations[5].asset_hash.asset_ids) == 2
digest = transaction1.digest()
ret = transaction1.signatures[0].verify(digest)
print("Proof result:", ret)
if not ret:
print(bbclib.error_text)
assert ret
| 44.401274
| 132
| 0.702482
|
e3837a8033216d187e554f5a9f8e501979350467
| 9,075
|
py
|
Python
|
2_deep_codes/3_Siamese_triplet/Evaluate_embedding_space.py
|
bghojogh/Quantile-Quantile-Embedding
|
5daff878a838f6dbeb04cc0b15da2ad66ab9796c
|
[
"MIT"
] | 1
|
2020-09-19T17:50:50.000Z
|
2020-09-19T17:50:50.000Z
|
2_deep_codes/3_Siamese_triplet/Evaluate_embedding_space.py
|
bghojogh/Quantile-Quantile-Embedding
|
5daff878a838f6dbeb04cc0b15da2ad66ab9796c
|
[
"MIT"
] | null | null | null |
2_deep_codes/3_Siamese_triplet/Evaluate_embedding_space.py
|
bghojogh/Quantile-Quantile-Embedding
|
5daff878a838f6dbeb04cc0b15da2ad66ab9796c
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import os
import numpy as np
import umap
import matplotlib.pyplot as plt
import dataset_characteristics
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
import pickle
import itertools
class Evaluate_embedding_space():
def __init__(self, checkpoint_dir, model_dir_):
self.checkpoint_dir = checkpoint_dir
self.model_dir_ = model_dir_
self.batch_size = 32
self.n_samples = 100
self.feature_space_dimension = 128
def embed_the_data(self, X, labels, siamese, path_save_embeddings_of_test_data):
print("Embedding the data....")
saver_ = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
succesful_load, latest_epoch = self.load_network_model(saver_=saver_, session_=sess, checkpoint_dir=self.checkpoint_dir,
model_dir_=self.model_dir_)
assert (succesful_load == True)
X = self.normalize_images(X)
test_feed_dict = {
siamese.x1: X
}
embedding = sess.run(siamese.o1, feed_dict=test_feed_dict)
if not os.path.exists(path_save_embeddings_of_test_data+"numpy\\"):
os.makedirs(path_save_embeddings_of_test_data+"numpy\\")
np.save(path_save_embeddings_of_test_data+"numpy\\embedding.npy", embedding)
np.save(path_save_embeddings_of_test_data+"numpy\\labels.npy", labels)
if not os.path.exists(path_save_embeddings_of_test_data+"plots\\"):
os.makedirs(path_save_embeddings_of_test_data+"plots\\")
# plt.figure(200)
plt = self.plot_embedding_of_points(embedding=embedding, labels=labels, n_samples_plot=2000)
plt.savefig(path_save_embeddings_of_test_data+"plots\\" + 'embedding.png')
plt.clf()
plt.close()
return embedding, labels
def normalize_images(self, X_batch):
# also see normalize_images() method in Utils.py
X_batch = X_batch * (1. / 255) - 0.5
return X_batch
def plot_embedding_of_points(self, embedding, labels, n_samples_plot=None):
n_samples = embedding.shape[0]
if n_samples_plot != None:
indices_to_plot = np.random.choice(range(n_samples), min(n_samples_plot, n_samples), replace=False)
embedding_sampled = embedding[indices_to_plot, :]
else:
indices_to_plot = [i for i in range(n_samples)]
embedding_sampled = embedding
if embedding.shape[1] == 2:
pass
else:
embedding_sampled = umap.UMAP(n_neighbors=500).fit_transform(embedding_sampled)
n_points = embedding.shape[0]
# n_points_sampled = embedding_sampled.shape[0]
labels_sampled = labels[indices_to_plot]
_, ax = plt.subplots(1, figsize=(14, 10))
classes = dataset_characteristics.get_class_names()
n_classes = len(classes)
plt.scatter(embedding_sampled[:, 0], embedding_sampled[:, 1], s=10, c=labels_sampled, cmap='Spectral',
alpha=1.0)
# plt.setp(ax, xticks=[], yticks=[])
cbar = plt.colorbar(boundaries=np.arange(n_classes + 1) - 0.5)
cbar.set_ticks(np.arange(n_classes))
cbar.set_ticklabels(classes)
return plt
def load_network_model(self, saver_, session_, checkpoint_dir, model_dir_):
# https://stackoverflow.com/questions/33759623/tensorflow-how-to-save-restore-a-model
print(" [*] Reading checkpoints...")
checkpoint_dir = os.path.join(checkpoint_dir, model_dir_)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
saver_.restore(session_, os.path.join(checkpoint_dir, ckpt_name))
print(" [*] Success to read {}".format(ckpt_name))
latest_epoch = int(ckpt_name.split("-")[-1])
return True, latest_epoch
else:
print(" [*] Failed to find a checkpoint")
return False, 0
def classify_with_1NN(self, embedding, labels, path_to_save):
print("KNN on embedding data....")
unique_labels = np.unique(labels)
n_classes = len(unique_labels)
neigh = KNeighborsClassifier(n_neighbors=2) #--> it includes itself too
neigh.fit(embedding, labels)
y_pred = neigh.predict(embedding)
accuracy_test = accuracy_score(y_true=labels, y_pred=y_pred)
conf_matrix_test = confusion_matrix(y_true=labels, y_pred=y_pred)
self.save_np_array_to_txt(variable=np.asarray(accuracy_test), name_of_variable="accuracy_test", path_to_save=path_to_save)
self.save_variable(variable=accuracy_test, name_of_variable="accuracy_test", path_to_save=path_to_save)
# self.plot_confusion_matrix(confusion_matrix=conf_matrix_test, class_names=[str(class_index+1) for class_index in range(n_classes)],
# normalize=True, cmap="gray_r", path_to_save=path_to_save, name="test")
self.plot_confusion_matrix(confusion_matrix=conf_matrix_test, class_names=[str(class_index) for class_index in range(n_classes)],
normalize=True, cmap="gray_r", path_to_save=path_to_save, name="test")
def plot_confusion_matrix(self, confusion_matrix, class_names, normalize=False, cmap="gray", path_to_save="./", name="temp"):
# https://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html#sphx-glr-auto-examples-model-selection-plot-confusion-matrix-py
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
confusion_matrix = confusion_matrix.astype('float') / confusion_matrix.sum(axis=1)[:, np.newaxis]
# print("Normalized confusion matrix")
else:
pass
# print('Confusion matrix, without normalization')
# print(cm)
plt.imshow(confusion_matrix, interpolation='nearest', cmap=cmap)
# plt.colorbar()
tick_marks = np.arange(len(class_names))
# plt.xticks(tick_marks, class_names, rotation=45)
plt.xticks(tick_marks, class_names, rotation=0)
plt.yticks(tick_marks, class_names)
# tick_marks = np.arange(len(class_names) - 1)
# plt.yticks(tick_marks, class_names[1:])
fmt = '.2f' if normalize else 'd'
thresh = confusion_matrix.max() / 2.
for i, j in itertools.product(range(confusion_matrix.shape[0]), range(confusion_matrix.shape[1])):
plt.text(j, i, format(confusion_matrix[i, j], fmt),
horizontalalignment="center",
color="white" if confusion_matrix[i, j] > thresh else "black")
# plt.ylabel('True label')
# plt.xlabel('Predicted label')
plt.ylabel('true distortion type')
plt.xlabel('predicted distortion type')
n_classes = len(class_names)
plt.ylim([n_classes - 0.5, -0.5])
plt.tight_layout()
# plt.show()
plt.savefig(path_to_save + name + ".png")
plt.clf()
plt.close()
def save_variable(self, variable, name_of_variable, path_to_save='./'):
# https://stackoverflow.com/questions/6568007/how-do-i-save-and-restore-multiple-variables-in-python
if not os.path.exists(path_to_save): # https://stackoverflow.com/questions/273192/how-can-i-create-a-directory-if-it-does-not-exist
os.makedirs(path_to_save)
file_address = path_to_save + name_of_variable + '.pckl'
f = open(file_address, 'wb')
pickle.dump(variable, f)
f.close()
def load_variable(self, name_of_variable, path='./'):
# https://stackoverflow.com/questions/6568007/how-do-i-save-and-restore-multiple-variables-in-python
file_address = path + name_of_variable + '.pckl'
f = open(file_address, 'rb')
variable = pickle.load(f)
f.close()
return variable
def save_np_array_to_txt(self, variable, name_of_variable, path_to_save='./'):
if type(variable) is list:
variable = np.asarray(variable)
# https://stackoverflow.com/questions/22821460/numpy-save-2d-array-to-text-file/22822701
if not os.path.exists(
path_to_save): # https://stackoverflow.com/questions/273192/how-can-i-create-a-directory-if-it-does-not-exist
os.makedirs(path_to_save)
file_address = path_to_save + name_of_variable + '.txt'
np.set_printoptions(threshold=np.inf, linewidth=np.inf) # turn off summarization, line-wrapping
with open(file_address, 'w') as f:
f.write(np.array2string(variable, separator=', '))
| 49.054054
| 162
| 0.654545
|
0c7a65cfca9772a60e5af9785e5e4f9d1c361645
| 26,654
|
py
|
Python
|
gluon/cache.py
|
btreecat/web2py
|
7ec50c25b2eced11af8bd2d17f7fce41cd7d8a10
|
[
"BSD-3-Clause"
] | null | null | null |
gluon/cache.py
|
btreecat/web2py
|
7ec50c25b2eced11af8bd2d17f7fce41cd7d8a10
|
[
"BSD-3-Clause"
] | null | null | null |
gluon/cache.py
|
btreecat/web2py
|
7ec50c25b2eced11af8bd2d17f7fce41cd7d8a10
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
| This file is part of the web2py Web Framework
| Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Basic caching classes and methods
---------------------------------
- Cache - The generic caching object interfacing with the others
- CacheInRam - providing caching in ram
- CacheOnDisk - provides caches on disk
Memcache is also available via a different module (see gluon.contrib.memcache)
When web2py is running on Google App Engine,
caching will be provided by the GAE memcache
(see gluon.contrib.gae_memcache)
"""
import time
import thread
import os
import gc
import sys
import logging
import re
import random
import hashlib
import datetime
import tempfile
from gluon import recfile
from gluon import portalocker
from collections import defaultdict
try:
from collections import OrderedDict
except ImportError:
from gluon.contrib.ordereddict import OrderedDict
try:
from gluon import settings
have_settings = True
except ImportError:
have_settings = False
try:
import cPickle as pickle
except:
import pickle
try:
import psutil
HAVE_PSUTIL = True
except ImportError:
HAVE_PSUTIL = False
def remove_oldest_entries(storage, percentage=90):
# compute current memory usage (%)
old_mem = psutil.virtual_memory().percent
# if we have data in storage and utilization exceeds 90%
while storage and old_mem > percentage:
# removed oldest entry
storage.popitem(last=False)
# garbage collect
gc.collect(1)
# comute used memory again
new_mem = psutil.virtual_memory().percent
# if the used memory did not decrease stop
if new_mem >= old_mem: break
# net new measurement for memory usage and loop
old_mem = new_mem
logger = logging.getLogger("web2py.cache")
__all__ = ['Cache', 'lazy_cache']
DEFAULT_TIME_EXPIRE = 300
class CacheAbstract(object):
"""
Abstract class for cache implementations.
Main function just provides referenced api documentation.
Use CacheInRam or CacheOnDisk instead which are derived from this class.
Note:
Michele says: there are signatures inside gdbm files that are used
directly by the python gdbm adapter that often are lagging behind in the
detection code in python part.
On every occasion that a gdbm store is probed by the python adapter,
the probe fails, because gdbm file version is newer.
Using gdbm directly from C would work, because there is backward
compatibility, but not from python!
The .shelve file is discarded and a new one created (with new
signature) and it works until it is probed again...
The possible consequences are memory leaks and broken sessions.
"""
cache_stats_name = 'web2py_cache_statistics'
max_ram_utilization = None # percent
def __init__(self, request=None):
"""Initializes the object
Args:
request: the global request object
"""
raise NotImplementedError
def __call__(self, key, f,
time_expire=DEFAULT_TIME_EXPIRE):
"""
Tries to retrieve the value corresponding to `key` from the cache if the
object exists and if it did not expire, else it calls the function `f`
and stores the output in the cache corresponding to `key`. It always
returns the function that is returned.
Args:
key(str): the key of the object to be stored or retrieved
f(function): the function whose output is to be cached.
If `f` is `None` the cache is cleared.
time_expire(int): expiration of the cache in seconds.
It's used to compare the current time with the time
when the requested object was last saved in cache. It does not
affect future requests. Setting `time_expire` to 0 or negative
value forces the cache to refresh.
"""
raise NotImplementedError
def clear(self, regex=None):
"""
Clears the cache of all keys that match the provided regular expression.
If no regular expression is provided, it clears all entries in cache.
Args:
regex: if provided, only keys matching the regex will be cleared,
otherwise all keys are cleared.
"""
raise NotImplementedError
def increment(self, key, value=1):
"""
Increments the cached value for the given key by the amount in value
Args:
key(str): key for the cached object to be incremeneted
value(int): amount of the increment (defaults to 1, can be negative)
"""
raise NotImplementedError
def _clear(self, storage, regex):
"""
Auxiliary function called by `clear` to search and clear cache entries
"""
r = re.compile(regex)
for key in storage.keys():
if r.match(str(key)):
del storage[key]
return
class CacheInRam(CacheAbstract):
"""
Ram based caching
This is implemented as global (per process, shared by all threads)
dictionary.
A mutex-lock mechanism avoid conflicts.
"""
locker = thread.allocate_lock()
meta_storage = {}
stats = {}
def __init__(self, request=None):
self.initialized = False
self.request = request
self.storage = OrderedDict() if HAVE_PSUTIL else {}
self.app = request.application if request else ''
def initialize(self):
if self.initialized:
return
else:
self.initialized = True
self.locker.acquire()
if not self.app in self.meta_storage:
self.storage = self.meta_storage[self.app] = \
OrderedDict() if HAVE_PSUTIL else {}
self.stats[self.app] = {'hit_total': 0, 'misses': 0}
else:
self.storage = self.meta_storage[self.app]
self.locker.release()
def clear(self, regex=None):
self.initialize()
self.locker.acquire()
storage = self.storage
if regex is None:
storage.clear()
else:
self._clear(storage, regex)
if not self.app in self.stats:
self.stats[self.app] = {'hit_total': 0, 'misses': 0}
self.locker.release()
def __call__(self, key, f,
time_expire=DEFAULT_TIME_EXPIRE,
destroyer=None):
"""
Attention! cache.ram does not copy the cached object.
It just stores a reference to it. Turns out the deepcopying the object
has some problems:
- would break backward compatibility
- would be limiting because people may want to cache live objects
- would work unless we deepcopy no storage and retrival which would make
things slow.
Anyway. You can deepcopy explicitly in the function generating the value
to be cached.
"""
self.initialize()
dt = time_expire
now = time.time()
self.locker.acquire()
item = self.storage.get(key, None)
if item and f is None:
del self.storage[key]
if destroyer:
destroyer(item[1])
self.stats[self.app]['hit_total'] += 1
self.locker.release()
if f is None:
return None
if item and (dt is None or item[0] > now - dt):
return item[1]
elif item and (item[0] < now - dt) and destroyer:
destroyer(item[1])
value = f()
self.locker.acquire()
self.storage[key] = (now, value)
self.stats[self.app]['misses'] += 1
if HAVE_PSUTIL and self.max_ram_utilization!=None and random.random()<0.10:
remove_oldest_entries(self.storage, percentage = self.max_ram_utilization)
self.locker.release()
return value
def increment(self, key, value=1):
self.initialize()
self.locker.acquire()
try:
if key in self.storage:
value = self.storage[key][1] + value
self.storage[key] = (time.time(), value)
except BaseException, e:
self.locker.release()
raise e
self.locker.release()
return value
class CacheOnDisk(CacheAbstract):
"""
Disk based cache
This is implemented as a key value store where each key corresponds to a
single file in disk which is replaced when the value changes.
Disk cache provides persistance when web2py is started/stopped but it is
slower than `CacheInRam`
Values stored in disk cache must be pickable.
"""
class PersistentStorage(object):
"""
Implements a key based thread/process-safe safe storage in disk.
"""
def __init__(self, folder, file_lock_time_wait=0.1):
self.folder = folder
self.key_filter_in = lambda key: key
self.key_filter_out = lambda key: key
self.file_lock_time_wait = file_lock_time_wait # How long we should wait before retrying to lock a file held by another process
# We still need a mutex for each file as portalocker only blocks other processes
self.file_locks = defaultdict(thread.allocate_lock)
# Make sure we use valid filenames.
if sys.platform == "win32":
import base64
def key_filter_in_windows(key):
"""
Windows doesn't allow \ / : * ? "< > | in filenames.
To go around this encode the keys with base32.
"""
return base64.b32encode(key)
def key_filter_out_windows(key):
"""
We need to decode the keys so regex based removal works.
"""
return base64.b32decode(key)
self.key_filter_in = key_filter_in_windows
self.key_filter_out = key_filter_out_windows
def wait_portalock(self, val_file):
"""
Wait for the process file lock.
"""
while True:
try:
portalocker.lock(val_file, portalocker.LOCK_EX)
break
except:
time.sleep(self.file_lock_time_wait)
def acquire(self, key):
self.file_locks[key].acquire()
def release(self, key):
self.file_locks[key].release()
def __setitem__(self, key, value):
key = self.key_filter_in(key)
val_file = recfile.open(key, mode='wb', path=self.folder)
self.wait_portalock(val_file)
pickle.dump(value, val_file, pickle.HIGHEST_PROTOCOL)
val_file.close()
def __getitem__(self, key):
key = self.key_filter_in(key)
try:
val_file = recfile.open(key, mode='rb', path=self.folder)
except IOError:
raise KeyError
self.wait_portalock(val_file)
value = pickle.load(val_file)
val_file.close()
return value
def __contains__(self, key):
key = self.key_filter_in(key)
return (key in self.file_locks) or recfile.exists(key, path=self.folder)
def __delitem__(self, key):
key = self.key_filter_in(key)
try:
recfile.remove(key, path=self.folder)
except IOError:
raise KeyError
def __iter__(self):
for dirpath, dirnames, filenames in os.walk(self.folder):
for filename in filenames:
yield self.key_filter_out(filename)
def safe_apply(self, key, function, default_value=None):
"""
Safely apply a function to the value of a key in storage and set
the return value of the function to it.
Return the result of applying the function.
"""
key = self.key_filter_in(key)
exists = True
try:
val_file = recfile.open(key, mode='r+b', path=self.folder)
except IOError:
exists = False
val_file = recfile.open(key, mode='wb', path=self.folder)
self.wait_portalock(val_file)
if exists:
timestamp, value = pickle.load(val_file)
else:
value = default_value
new_value = function(value)
val_file.seek(0)
pickle.dump((time.time(), new_value), val_file, pickle.HIGHEST_PROTOCOL)
val_file.truncate()
val_file.close()
return new_value
def keys(self):
return list(self.__iter__())
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __init__(self, request=None, folder=None):
self.initialized = False
self.request = request
self.folder = folder
self.storage = None
def initialize(self):
if self.initialized:
return
else:
self.initialized = True
folder = self.folder
request = self.request
# Lets test if the cache folder exists, if not
# we are going to create it
folder = os.path.join(folder or (request.folder, 'cache'))
if not os.path.exists(folder):
os.mkdir(folder)
self.storage = CacheOnDisk.PersistentStorage(folder)
def __call__(self, key, f,
time_expire=DEFAULT_TIME_EXPIRE):
self.initialize()
def inc_hit_total(v):
v['hit_total'] += 1
return v
def inc_misses(v):
v['misses'] += 1
return v
dt = time_expire
self.storage.acquire(key)
self.storage.acquire(CacheAbstract.cache_stats_name)
item = self.storage.get(key)
self.storage.safe_apply(CacheAbstract.cache_stats_name, inc_hit_total,
default_value={'hit_total': 0, 'misses': 0})
if item and f is None:
del self.storage[key]
if f is None:
self.storage.release(CacheAbstract.cache_stats_name)
self.storage.release(key)
return None
now = time.time()
if item and ((dt is None) or (item[0] > now - dt)):
value = item[1]
else:
try:
value = f()
except:
self.storage.release(CacheAbstract.cache_stats_name)
self.storage.release(key)
raise
self.storage[key] = (now, value)
self.storage.safe_apply(CacheAbstract.cache_stats_name, inc_misses,
default_value={'hit_total': 0, 'misses': 0})
self.storage.release(CacheAbstract.cache_stats_name)
self.storage.release(key)
return value
def clear(self, regex=None):
self.initialize()
storage = self.storage
if regex is None:
keys = storage
else:
r = re.compile(regex)
keys = (key for key in storage if r.match(key))
for key in keys:
storage.acquire(key)
try:
del storage[key]
except KeyError:
pass
storage.release(key)
def increment(self, key, value=1):
self.initialize()
self.storage.acquire(key)
value = self.storage.safe_apply(key, lambda x: x + value, default_value=0)
self.storage.release(key)
return value
class CacheAction(object):
def __init__(self, func, key, time_expire, cache, cache_model):
self.__name__ = func.__name__
self.__doc__ = func.__doc__
self.func = func
self.key = key
self.time_expire = time_expire
self.cache = cache
self.cache_model = cache_model
def __call__(self, *a, **b):
if not self.key:
key2 = self.__name__ + ':' + repr(a) + ':' + repr(b)
else:
key2 = self.key.replace('%(name)s', self.__name__)\
.replace('%(args)s', str(a)).replace('%(vars)s', str(b))
cache_model = self.cache_model
if not cache_model or isinstance(cache_model, str):
cache_model = getattr(self.cache, cache_model or 'ram')
return cache_model(key2,
lambda a=a, b=b: self.func(*a, **b),
self.time_expire)
class Cache(object):
"""
Sets up generic caching, creating an instance of both CacheInRam and
CacheOnDisk.
In case of GAE will make use of gluon.contrib.gae_memcache.
- self.ram is an instance of CacheInRam
- self.disk is an instance of CacheOnDisk
"""
autokey = ':%(name)s:%(args)s:%(vars)s'
def __init__(self, request):
"""
Args:
request: the global request object
"""
# GAE will have a special caching
if have_settings and settings.global_settings.web2py_runtime_gae:
from gluon.contrib.gae_memcache import MemcacheClient
self.ram = self.disk = MemcacheClient(request)
else:
# Otherwise use ram (and try also disk)
self.ram = CacheInRam(request)
try:
self.disk = CacheOnDisk(request)
except IOError:
logger.warning('no cache.disk (IOError)')
except AttributeError:
# normally not expected anymore, as GAE has already
# been accounted for
logger.warning('no cache.disk (AttributeError)')
def action(self, time_expire=DEFAULT_TIME_EXPIRE, cache_model=None,
prefix=None, session=False, vars=True, lang=True,
user_agent=False, public=True, valid_statuses=None,
quick=None):
"""Better fit for caching an action
Warning:
Experimental!
Currently only HTTP 1.1 compliant
reference : http://code.google.com/p/doctype-mirror/wiki/ArticleHttpCaching
Args:
time_expire(int): same as @cache
cache_model(str): same as @cache
prefix(str): add a prefix to the calculated key
session(bool): adds response.session_id to the key
vars(bool): adds request.env.query_string
lang(bool): adds T.accepted_language
user_agent(bool or dict): if True, adds is_mobile and is_tablet to the key.
Pass a dict to use all the needed values (uses str(.items()))
(e.g. user_agent=request.user_agent()). Used only if session is
not True
public(bool): if False forces the Cache-Control to be 'private'
valid_statuses: by default only status codes starting with 1,2,3 will be cached.
pass an explicit list of statuses on which turn the cache on
quick: Session,Vars,Lang,User-agent,Public:
fast overrides with initials, e.g. 'SVLP' or 'VLP', or 'VLP'
"""
from gluon import current
from gluon.http import HTTP
def wrap(func):
def wrapped_f():
if current.request.env.request_method != 'GET':
return func()
if time_expire:
cache_control = 'max-age=%(time_expire)s, s-maxage=%(time_expire)s' % dict(time_expire=time_expire)
if quick:
session_ = True if 'S' in quick else False
vars_ = True if 'V' in quick else False
lang_ = True if 'L' in quick else False
user_agent_ = True if 'U' in quick else False
public_ = True if 'P' in quick else False
else:
session_, vars_, lang_, user_agent_, public_ = session, vars, lang, user_agent, public
if not session_ and public_:
cache_control += ', public'
expires = (current.request.utcnow + datetime.timedelta(seconds=time_expire)).strftime('%a, %d %b %Y %H:%M:%S GMT')
else:
cache_control += ', private'
expires = 'Fri, 01 Jan 1990 00:00:00 GMT'
if cache_model:
#figure out the correct cache key
cache_key = [current.request.env.path_info, current.response.view]
if session_:
cache_key.append(current.response.session_id)
elif user_agent_:
if user_agent_ is True:
cache_key.append("%(is_mobile)s_%(is_tablet)s" % current.request.user_agent())
else:
cache_key.append(str(user_agent_.items()))
if vars_:
cache_key.append(current.request.env.query_string)
if lang_:
cache_key.append(current.T.accepted_language)
cache_key = hashlib.md5('__'.join(cache_key)).hexdigest()
if prefix:
cache_key = prefix + cache_key
try:
#action returns something
rtn = cache_model(cache_key, lambda : func(), time_expire=time_expire)
http, status = None, current.response.status
except HTTP, e:
#action raises HTTP (can still be valid)
rtn = cache_model(cache_key, lambda : e.body, time_expire=time_expire)
http, status = HTTP(e.status, rtn, **e.headers), e.status
else:
#action raised a generic exception
http = None
else:
#no server-cache side involved
try:
#action returns something
rtn = func()
http, status = None, current.response.status
except HTTP, e:
#action raises HTTP (can still be valid)
status = e.status
http = HTTP(e.status, e.body, **e.headers)
else:
#action raised a generic exception
http = None
send_headers = False
if http and isinstance(valid_statuses, list):
if status in valid_statuses:
send_headers = True
elif valid_statuses is None:
if str(status)[0] in '123':
send_headers = True
if send_headers:
headers = {
'Pragma' : None,
'Expires' : expires,
'Cache-Control' : cache_control
}
current.response.headers.update(headers)
if cache_model and not send_headers:
#we cached already the value, but the status is not valid
#so we need to delete the cached value
cache_model(cache_key, None)
if http:
if send_headers:
http.headers.update(current.response.headers)
raise http
return rtn
wrapped_f.__name__ = func.__name__
wrapped_f.__doc__ = func.__doc__
return wrapped_f
return wrap
def __call__(self,
key=None,
time_expire=DEFAULT_TIME_EXPIRE,
cache_model=None):
"""
Decorator function that can be used to cache any function/method.
Args:
key(str) : the key of the object to be store or retrieved
time_expire(int) : expiration of the cache in seconds
`time_expire` is used to compare the current time with the time
when the requested object was last saved in cache.
It does not affect future requests.
Setting `time_expire` to 0 or negative value forces the cache to
refresh.
cache_model(str): can be "ram", "disk" or other (like "memcache").
Defaults to "ram"
When the function `f` is called, web2py tries to retrieve
the value corresponding to `key` from the cache if the
object exists and if it did not expire, else it calles the function `f`
and stores the output in the cache corresponding to `key`. In the case
the output of the function is returned.
Example: ::
@cache('key', 5000, cache.ram)
def f():
return time.ctime()
Note:
If the function `f` is an action, we suggest using
@cache.action instead
"""
def tmp(func, cache=self, cache_model=cache_model):
return CacheAction(func, key, time_expire, self, cache_model)
return tmp
@staticmethod
def with_prefix(cache_model, prefix):
"""
allow replacing cache.ram with cache.with_prefix(cache.ram,'prefix')
it will add prefix to all the cache keys used.
"""
return lambda key, f, time_expire=DEFAULT_TIME_EXPIRE, prefix=prefix:\
cache_model(prefix + key, f, time_expire)
def lazy_cache(key=None, time_expire=None, cache_model='ram'):
"""
Can be used to cache any function including ones in modules,
as long as the cached function is only called within a web2py request
If a key is not provided, one is generated from the function name
`time_expire` defaults to None (no cache expiration)
If cache_model is "ram" then the model is current.cache.ram, etc.
"""
def decorator(f, key=key, time_expire=time_expire, cache_model=cache_model):
key = key or repr(f)
def g(*c, **d):
from gluon import current
return current.cache(key, time_expire, cache_model)(f)(*c, **d)
g.__name__ = f.__name__
return g
return decorator
| 34.979003
| 139
| 0.569258
|
40009c0ce7cb5ebcbc9a0d3585c2a87f11a471af
| 1,134
|
py
|
Python
|
RecoTauTag/HLTProducers/python/TauRegionalPixelSeedGenerator_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
RecoTauTag/HLTProducers/python/TauRegionalPixelSeedGenerator_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
RecoTauTag/HLTProducers/python/TauRegionalPixelSeedGenerator_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
tauRegionalPixelSeedGenerator = cms.EDProducer("SeedGeneratorFromRegionHitsEDProducer",
OrderedHitsFactoryPSet = cms.PSet(
ComponentName = cms.string('StandardHitPairGenerator'),
SeedingLayers = cms.InputTag('PixelLayerPairs')
),
SeedComparitorPSet = cms.PSet(
ComponentName = cms.string('none')
),
RegionFactoryPSet = cms.PSet(
ComponentName = cms.string('TauRegionalPixelSeedGenerator'),
RegionPSet = cms.PSet(
precise = cms.bool(True),
deltaPhiRegion = cms.double(0.1),
originHalfLength = cms.double(0.2),
originRadius = cms.double(0.2),
deltaEtaRegion = cms.double(0.1),
ptMin = cms.double(5.0),
JetSrc = cms.InputTag("icone5Tau1"),
originZPos = cms.double(0.0),
vertexSrc = cms.InputTag("pixelVertices"),
howToUseMeasurementTracker = cms.string("ForSiStrips"),
measurementTrackerName = cms.InputTag("MeasurementTrackerEvent"),
)
),
TTRHBuilder = cms.string('WithTrackAngle')
)
| 36.580645
| 87
| 0.636684
|
25ce218b60778f89f1a8ca8993737ceddebc15b8
| 19,658
|
py
|
Python
|
v7/sphinx_roles/sphinx_roles.py
|
pluser/nikola_plugins
|
2a95cc21d996e856b24c151d577939de41a65e6c
|
[
"MIT"
] | null | null | null |
v7/sphinx_roles/sphinx_roles.py
|
pluser/nikola_plugins
|
2a95cc21d996e856b24c151d577939de41a65e6c
|
[
"MIT"
] | null | null | null |
v7/sphinx_roles/sphinx_roles.py
|
pluser/nikola_plugins
|
2a95cc21d996e856b24c151d577939de41a65e6c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright © 2012-2013 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import datetime
import re
from docutils import nodes, utils
from docutils.parsers.rst import Directive, directives, roles
from docutils.parsers.rst.directives.admonitions import BaseAdmonition
from docutils.transforms import Transform
from docutils import languages
from nikola.plugin_categories import RestExtension
from nikola.plugins.compile.rest import add_node
class Plugin(RestExtension):
name = "rest_sphinx_roles"
def set_site(self, site):
self.site = site
roles.register_local_role('pep', pep_role)
roles.register_local_role('rfc', rfc_role)
roles.register_local_role('term', term_role)
roles.register_local_role('option', option_role)
roles.register_local_role('ref', ref_role)
# This is copied almost verbatim from Sphinx
generic_docroles = {
'command': nodes.strong,
'dfn': nodes.emphasis,
'kbd': nodes.literal,
'mailheader': nodes.emphasis,
'makevar': nodes.strong,
'manpage': nodes.emphasis,
'mimetype': nodes.emphasis,
'newsgroup': nodes.emphasis,
'program': nodes.strong,
'regexp': nodes.literal,
}
for rolename, nodeclass in generic_docroles.items():
generic = roles.GenericRole(rolename, nodeclass)
role = roles.CustomRole(rolename, generic, {'classes': [rolename]})
roles.register_local_role(rolename, role)
specific_docroles = {
'guilabel': menusel_role,
'menuselection': menusel_role,
'file': emph_literal_role,
'samp': emph_literal_role,
'abbr': abbr_role,
}
for rolename, func in specific_docroles.items():
roles.register_local_role(rolename, func)
# Handle abbr title
add_node(abbreviation, visit_abbreviation, depart_abbreviation)
for name, (base_url, prefix) in self.site.config.get('EXTLINKS', {}).items():
roles.register_local_role(name, make_link_role(base_url, prefix))
directives.register_directive('deprecated', VersionChange)
directives.register_directive('versionadded', VersionChange)
directives.register_directive('versionchanged', VersionChange)
directives.register_directive('centered', Centered)
directives.register_directive('hlist', HList)
directives.register_directive('seealso', SeeAlso)
directives.register_directive('glossary', Glossary)
directives.register_directive('option', Option)
site.rst_transforms.append(Today)
return super(Plugin, self).set_site(site)
# TODO: pep_role and rfc_role are similar enough that they
# should be a generic function called via partial
def pep_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""Enhanced PEP role supporting anchors, for Sphinx compatibility."""
anchor = ''
anchorindex = text.find('#')
if anchorindex > 0:
text, anchor = text[:anchorindex], text[anchorindex:]
try:
pepnum = int(text)
except ValueError:
msg = inliner.reporter.error('invalid PEP number %s' % text, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
ref = inliner.document.settings.pep_base_url + 'pep-%04d' % pepnum
sn = nodes.strong('PEP ' + text, 'PEP ' + text)
rn = nodes.reference('', '', internal=False, refuri=ref + anchor,
classes=[name])
rn += sn
return [rn], []
explicit_title_re = re.compile(r'^(.+?)\s*(?<!\x00)<(.*?)>$', re.DOTALL)
def split_explicit_title(text):
"""Split role content into title and target, if given."""
match = explicit_title_re.match(text)
if match:
return True, match.group(1), match.group(2)
return False, text, text
def rfc_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""Enhanced RFC role supporting anchors, for Sphinx compatibility."""
anchor = ''
anchorindex = text.find('#')
if anchorindex > 0:
text, anchor = text[:anchorindex], text[anchorindex:]
try:
rfcnum = int(text)
except ValueError:
msg = inliner.reporter.error('invalid PEP number %s' % text, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
ref = inliner.document.settings.rfc_base_url + inliner.rfc_url % rfcnum
sn = nodes.strong('RFC ' + text, 'RFC ' + text)
rn = nodes.reference('', '', internal=False, refuri=ref + anchor,
classes=[name])
rn += sn
return [rn], []
# The code below is based in code from Sphinx
# Copyright (c) 2007-2013 by the Sphinx team (see AUTHORS file).
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
_litvar_re = re.compile('{([^}]+)}')
def emph_literal_role(typ, rawtext, text, lineno, inliner,
options={}, content=[]):
text = utils.unescape(text)
pos = 0
retnode = nodes.literal(role=typ.lower(), classes=[typ])
for m in _litvar_re.finditer(text):
if m.start() > pos:
txt = text[pos:m.start()]
retnode += nodes.Text(txt, txt)
retnode += nodes.emphasis(m.group(1), m.group(1))
pos = m.end()
if pos < len(text):
retnode += nodes.Text(text[pos:], text[pos:])
return [retnode], []
_amp_re = re.compile(r'(?<!&)&(?![&\s])')
def menusel_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
text = utils.unescape(text)
if typ == 'menuselection':
text = text.replace('-->', u'\N{TRIANGULAR BULLET}')
spans = _amp_re.split(text)
node = nodes.emphasis(rawtext=rawtext)
for i, span in enumerate(spans):
span = span.replace('&&', '&')
if i == 0:
if len(span) > 0:
textnode = nodes.Text(span)
node += textnode
continue
accel_node = nodes.inline()
letter_node = nodes.Text(span[0])
accel_node += letter_node
accel_node['classes'].append('accelerator')
node += accel_node
textnode = nodes.Text(span[1:])
node += textnode
node['classes'].append(typ)
return [node], []
def make_link_role(base_url, prefix):
def role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
text = utils.unescape(text)
has_explicit_title, title, part = split_explicit_title(text)
try:
full_url = base_url % part
except (TypeError, ValueError):
inliner.reporter.warning(
'unable to expand %s extlink with base URL %r, please make '
'sure the base contains \'%%s\' exactly once'
% (typ, base_url), line=lineno)
full_url = base_url + part
if not has_explicit_title:
if prefix is None:
title = full_url
else:
title = prefix + part
pnode = nodes.reference(title, title, internal=False, refuri=full_url)
return [pnode], []
return role
def set_source_info(directive, node):
node.source, node.line = \
directive.state_machine.get_source_and_line(directive.lineno)
# FIXME: needs translations
versionlabels = {
'versionadded': 'New in version %s',
'versionchanged': 'Changed in version %s',
'versionmodified': 'Changed in version %s',
'deprecated': 'Deprecated since version %s',
}
class VersionChange(Directive):
"""
Directive to describe a change/addition/deprecation in a specific version.
"""
has_content = True
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
option_spec = {}
def run(self):
node = nodes.paragraph()
node['classes'] = ['versionadded']
node.document = self.state.document
set_source_info(self, node)
node['type'] = self.name
node['version'] = self.arguments[0]
text = versionlabels[self.name] % self.arguments[0]
if len(self.arguments) == 2:
inodes, messages = self.state.inline_text(self.arguments[1],
self.lineno + 1)
para = nodes.paragraph(self.arguments[1], '', *inodes)
set_source_info(self, para)
node.append(para)
else:
messages = []
if self.content:
self.state.nested_parse(self.content, self.content_offset, node)
if len(node):
if isinstance(node[0], nodes.paragraph) and node[0].rawsource:
content = nodes.inline(node[0].rawsource, translatable=True)
content.source = node[0].source
content.line = node[0].line
content += node[0].children
node[0].replace_self(nodes.paragraph('', '', content))
node[0].insert(0, nodes.inline('', '%s: ' % text,
classes=['versionmodified']))
else:
para = nodes.paragraph('', '', nodes.inline('', '%s.' % text, classes=['versionmodified']))
node.append(para)
language = languages.get_language(self.state.document.settings.language_code,
self.state.document.reporter)
language.labels.update(versionlabels)
return [node] + messages
class Centered(Directive):
"""
Directive to create a centered line of bold text.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
def run(self):
if not self.arguments:
return []
p_node = nodes.paragraph()
p_node['classes'] = ['centered']
strong_node = nodes.strong()
inodes, messages = self.state.inline_text(self.arguments[0],
self.lineno)
strong_node.extend(inodes)
p_node.children.append(strong_node)
return [p_node] + messages
class HList(Directive):
"""
Directive for a list that gets compacted horizontally.
This differs from Sphinx's implementation in that it generates a table
here at the directive level instead of creating a custom node and doing
it on the writer.
"""
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'columns': int,
}
def run(self):
ncolumns = self.options.get('columns', 2)
node = nodes.Element()
node.document = self.state.document
self.state.nested_parse(self.content, self.content_offset, node)
if len(node.children) != 1 or not isinstance(node.children[0],
nodes.bullet_list):
return [self.state.document.reporter.warning(
'.. hlist content is not a list', line=self.lineno)]
fulllist = node.children[0]
# create a hlist node where the items are distributed
npercol, nmore = divmod(len(fulllist), ncolumns)
index = 0
table = nodes.table()
tg = nodes.tgroup()
table += tg
row = nodes.row()
tbody = nodes.tbody()
for column in range(ncolumns):
endindex = index + (column < nmore and (npercol + 1) or npercol)
colspec = nodes.colspec()
colspec.attributes['stub'] = 0
colspec.attributes['colwidth'] = 100. / ncolumns
col = nodes.entry()
col += nodes.bullet_list()
col[0] += fulllist.children[index:endindex]
index = endindex
tg += colspec
row += col
tbody += row
tg += tbody
table['classes'].append('hlist')
return [table]
class SeeAlso(BaseAdmonition):
"""
An admonition mentioning things to look at as reference.
"""
node_class = nodes.admonition
def run(self):
"""Minor monkeypatch to set the title and classes right."""
self.arguments = ['See also']
node_list = BaseAdmonition.run(self)
node_list[0]['classes'] = ['admonition', 'seealso']
return node_list
class Glossary(Directive):
has_content = True
def run(self):
node = nodes.Element()
node.document = self.state.document
self.state.nested_parse(self.content, self.content_offset, node)
node[0]['classes'] = ['glossary', 'docutils']
# Set correct IDs for terms
for term in node[0]:
new_id = 'term-' + nodes.make_id(term[0].astext())
term[0]['ids'].append(new_id)
return [node[0]]
def term_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
# FIXME add stylable span inside link
text = utils.unescape(text)
target = '#term-' + nodes.make_id(text)
pnode = nodes.reference(text, text, internal=True, refuri=target)
pnode['classes'] = ['reference']
return [pnode], []
class Option(Directive):
has_content = True
required_arguments = 1
def run(self):
refid = 'cmdoption-arg-' + nodes.make_id(self.arguments[0])
target = nodes.target(names=[refid], ids=[refid])
dl = nodes.definition_list()
dt = nodes.definition_list_item()
term = nodes.term()
term += nodes.literal(self.arguments[0], self.arguments[0], classes=["descname"])
dt += term
definition = nodes.definition()
dt += definition
definition.document = self.state.document
self.state.nested_parse(self.content, self.content_offset, definition)
dl += dt
return [target, dl]
def option_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
# FIXME add stylable span inside link
text = utils.unescape(text)
target = '#cmdoption-arg-' + nodes.make_id(text)
pnode = nodes.reference(text, text, internal=True, refuri=target)
pnode['classes'] = ['reference']
return [pnode], []
_ref_re = re.compile('^(.*)<(.*)>$')
def ref_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
"""Reimplementation of Sphinx's ref role,"""
msg_list = []
match = _ref_re.match(text)
if match is not None:
text = match.groups()[0].strip()
target = '#' + match.groups()[1]
pnode = nodes.reference(text, text, internal=True, refuri=target)
else:
class RefVisitor(nodes.NodeVisitor, object):
text = None
def __init__(self, document, label):
self._label = label
super(RefVisitor, self).__init__(document)
def visit_target(self, node):
if self._label not in node.attributes['ids']:
return
else:
sibs = node.parent.children
next_sib = sibs[sibs.index(node) + 1]
if isinstance(next_sib, nodes.figure): # text has to be the figure caption
self.text = [x for x in next_sib.children if isinstance(x, nodes.caption)][0].astext()
elif isinstance(next_sib, nodes.section): # text has to be the title
self.text = next_sib.attributes['names'][0].title()
def unknown_visit(self, node):
pass
visitor = RefVisitor(inliner.document, text)
inliner.document.walk(visitor)
if visitor.text is None:
msg_list.append(inliner.reporter.error("ref label {} is missing or not immediately before figure or section.".format(text)))
target = '#' + text
pnode = nodes.reference(text, visitor.text, internal=True, refuri=target)
pnode['classes'] = ['reference']
return [pnode], msg_list
_abbr_re = re.compile('\((.*)\)$', re.S)
class abbreviation(nodes.Inline, nodes.TextElement):
"""Node for abbreviations with explanations."""
def visit_abbreviation(self, node):
attrs = {}
if node.hasattr('explanation'):
attrs['title'] = node['explanation']
self.body.append(self.starttag(node, 'abbr', '', **attrs))
def depart_abbreviation(self, node):
self.body.append('</abbr>')
def abbr_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
text = utils.unescape(text)
m = _abbr_re.search(text)
if m is None:
return [abbreviation(text, text)], []
abbr = text[:m.start()].strip()
expl = m.group(1)
return [abbreviation(abbr, abbr, explanation=expl)], []
class Today(Transform):
"""
Replace today with the date if it's not defined in the document.
"""
# run before the default Substitutions
default_priority = 210
def apply(self, **kwargs):
# only handle it if not otherwise defined in the document
to_handle = set(['today']) - set(self.document.substitution_defs)
for ref in self.document.traverse(nodes.substitution_reference):
refname = ref['refname']
if refname in to_handle:
txt = datetime.datetime.today().strftime('%x')
node = nodes.Text(txt, txt)
ref.replace_self(node)
| 36.403704
| 136
| 0.623766
|
7eb7abe4f157c7098877b75a9d3d720d3ac9d90b
| 2,578
|
py
|
Python
|
skl2onnx/algebra/type_helper.py
|
ogrisel/sklearn-onnx
|
0afbe295aa3f1abbcea60f582faac31d16bd3ab0
|
[
"Apache-2.0"
] | 1
|
2021-06-11T22:08:57.000Z
|
2021-06-11T22:08:57.000Z
|
skl2onnx/algebra/type_helper.py
|
ogrisel/sklearn-onnx
|
0afbe295aa3f1abbcea60f582faac31d16bd3ab0
|
[
"Apache-2.0"
] | null | null | null |
skl2onnx/algebra/type_helper.py
|
ogrisel/sklearn-onnx
|
0afbe295aa3f1abbcea60f582faac31d16bd3ab0
|
[
"Apache-2.0"
] | null | null | null |
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from scipy.sparse import coo_matrix
from ..proto import TensorProto, ValueInfoProto
from ..common._topology import Variable
from ..common.data_types import (
_guess_numpy_type,
_guess_type_proto,
BooleanTensorType,
DataType,
DoubleType,
DoubleTensorType,
FloatType,
FloatTensorType,
Int64Type,
Int64TensorType, Int32TensorType,
StringTensorType
)
def _guess_type(given_type):
"""
Returns the proper type of an input.
"""
def _guess_dim(value):
if value == 0:
return None
return value
if isinstance(given_type, (np.ndarray, coo_matrix)):
shape = list(given_type.shape)
if len(shape) == 0:
# a number
return _guess_numpy_type(given_type.dtype, tuple())
shape[0] = None
return _guess_numpy_type(given_type.dtype, shape)
if isinstance(given_type, (FloatTensorType, Int64TensorType,
Int32TensorType, StringTensorType,
BooleanTensorType, DoubleTensorType)):
return given_type
if isinstance(given_type, Variable):
return given_type.type
if isinstance(given_type, DataType):
return given_type
if isinstance(given_type, TensorProto):
return _guess_type_proto(given_type.data_type,
given_type.dims)
if isinstance(given_type, ValueInfoProto):
ttype = given_type.type.tensor_type
dims = [_guess_dim(ttype.shape.dim[i].dim_value)
for i in range(len(ttype.shape.dim))]
return _guess_type_proto(ttype.elem_type, dims)
if isinstance(given_type, np.int64):
return Int64Type()
if isinstance(given_type, np.float32):
return FloatType()
if isinstance(given_type, np.float64):
return DoubleType()
if given_type.__class__.__name__.endswith("Categorical"):
# pandas Categorical without important pandas
return Int64TensorType()
raise NotImplementedError(
"Unsupported type '{}'. You may raise an issue "
"at https://github.com/onnx/sklearn-onnx/issues."
"".format(type(given_type)))
def guess_initial_types(X, initial_types):
if X is None and initial_types is None:
raise NotImplementedError("Initial types must be specified.")
if initial_types is None:
if isinstance(X, np.ndarray):
X = X[:1]
gt = _guess_type(X)
initial_types = [('X', gt)]
return initial_types
| 33.051282
| 69
| 0.650504
|
f3d0e56e8261686883d1f93f3814ac41caeb08e8
| 6,725
|
py
|
Python
|
gpgLabs/EM/FEM3loop.py
|
AlainPlattner/gpgLabs
|
2423f0f2a845a5e44304da5e683881c65a9e4792
|
[
"MIT"
] | null | null | null |
gpgLabs/EM/FEM3loop.py
|
AlainPlattner/gpgLabs
|
2423f0f2a845a5e44304da5e683881c65a9e4792
|
[
"MIT"
] | null | null | null |
gpgLabs/EM/FEM3loop.py
|
AlainPlattner/gpgLabs
|
2423f0f2a845a5e44304da5e683881c65a9e4792
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import scipy.io
import warnings
warnings.filterwarnings('ignore')
from ipywidgets import interactive, IntSlider, widget, FloatText, FloatSlider, Checkbox
def mind(x,y,z,dincl,ddecl,x0,y0,z0,aincl,adecl):
x = np.array(x, dtype=float)
y = np.array(y, dtype=float)
z = np.array(z, dtype=float)
x0 = np.array(x0, dtype=float)
y0 = np.array(y0, dtype=float)
z0 = np.array(z0, dtype=float)
dincl = np.array(dincl, dtype=float)
ddecl = np.array(ddecl, dtype=float)
aincl = np.array(aincl, dtype=float)
adecl = np.array(adecl, dtype=float)
di=np.pi*dincl/180.0
dd=np.pi*ddecl/180.0
cx=np.cos(di)*np.cos(dd)
cy=np.cos(di)*np.sin(dd)
cz=np.sin(di)
ai=np.pi*aincl/180.0
ad=np.pi*adecl/180.0
ax=np.cos(ai)*np.cos(ad)
ay=np.cos(ai)*np.sin(ad)
az=np.sin(ai)
# begin the calculation
a=x-x0
b=y-y0
h=z-z0
rt=np.sqrt(a**2.+b**2.+h**2.)**5.
txy=3.*a*b/rt
txz=3.*a*h/rt
tyz=3.*b*h/rt
txx=(2.*a**2.-b**2.-h**2.)/rt
tyy=(2.*b**2.-a**2.-h**2.)/rt
tzz=-(txx+tyy)
bx= (txx*cx+txy*cy+txz*cz)
by= (txy*cx+tyy*cy+tyz*cz)
bz= (txz*cx+tyz*cy+tzz*cz)
return bx*ax+by*ay+bz*az
def fem3loop(L,R,xc,yc,zc,dincl,ddecl,S,ht,f,xmin,xmax,dx,showDataPts=False):
L = np.array(L, dtype=float)
R = np.array(R, dtype=float)
xc = np.array(xc, dtype=float)
yc = np.array(yc, dtype=float)
zc = np.array(zc, dtype=float)
dincl = np.array(dincl, dtype=float)
ddecl = np.array(ddecl, dtype=float)
S = np.array(S, dtype=float)
ht = np.array(ht, dtype=float)
f = np.array(f, dtype=float)
dx = np.array(dx, dtype=float)
ymin = xmin
ymax = xmax
dely = dx
# generate the grid
xp=np.arange(xmin,xmax,dx)
yp=np.arange(ymin,ymax,dely)
[y,x]=np.meshgrid(yp,xp)
z=0.*x-ht
# set up the response arrays
real_response=0.0*x
imag_response=0.0*x
# frequency characteristics
alpha=2.*np.pi*f*L/R
f_factor=(alpha**2.+1j*alpha)/(1+alpha**2.)
amin=0.01
amax=100.
da=4./40.
alf=np.arange(-2.,2.,da)
alf=10.**alf
fre=alf**2./(1.+alf**2.)
fim=alf/(1.+alf**2.)
# simulate anomalies
yt=y-S/2.
yr=y+S/2.
dm=-S/2.
dp= S/2.
M13=mind(0.,dm,0.,90.,0., 0., dp, 0., 90.,0.)
M12=L*mind(x,yt,z,90.,0.,xc,yc,zc,dincl,ddecl)
M23=L*mind(xc,yc,zc,dincl,ddecl,x,yr,z,90.,0.)
c_response=-M12*M23*f_factor/(M13*L)
# scaled to simulate a net volumetric effect
if np.logical_and(dincl==0., ddecl==0.):
real_response=np.real(c_response)*0.
imag_response=np.imag(c_response)*0.
else:
real_response=np.real(c_response)*1000.
imag_response=np.imag(c_response)*1000.
fig, ax = plt.subplots(2,2, figsize = (14,8))
ax[0][0].semilogx(alf,fre,'.-b')
ax[0][0].semilogx(alf,fim,'.--g')
ax[0][0].plot([alpha, alpha],[0., 1.],'-k')
ax[0][0].legend(['Real','Imag'],loc=2)
ax[0][0].set_xlabel('$\\alpha = \\omega L /R$')
ax[0][0].set_ylabel('Frequency Response')
ax[0][0].set_title('Plot 1: EM responses of loop')
ax[0][0].grid(which='major', color = '0.6', linestyle='-',linewidth='0.5')
ax[0][0].grid(which='minor',color='0.6',linestyle='-',linewidth='0.5')
kx = int(np.ceil(xp.size/2.))
ax[0][1].plot(y[kx,:],real_response[kx,:],'.-b') # kx
ax[0][1].plot(y[kx,:],imag_response[kx,:],'.--g')
# ax[0][1].legend(['Real','Imag'],loc=2)
ax[0][1].set_xlabel('Easting')
ax[0][1].set_ylabel('H$_s$/H$_p$')
ax[0][1].set_title('Plot 2: EW cross section along Northing = %1.1f' %(x[kx,0]))
ax[0][1].grid(which='major', color = '0.6', linestyle='-',linewidth='0.5')
ax[0][1].grid(which='minor',color='0.6',linestyle='-',linewidth='0.5')
ax[0][1].set_xlim(np.r_[xmin,xmax])
vminR = real_response.min()
vmaxR = real_response.max()
ax[1][0].plot(np.r_[xp.min(),xp.max()], np.zeros(2), 'k--', lw=1)
clb = plt.colorbar(ax[1][0].imshow(real_response,extent=[xp.min(),xp.max(),yp.min(),yp.max()], vmin = vminR, vmax = vmaxR),ax=ax[1][0])
ax[1][0].set_xlim(np.r_[xmin,xmax])
ax[1][0].set_ylim(np.r_[xmin,xmax])
ax[1][0].set_xlabel('Easting (m)')
ax[1][0].set_ylabel('Northing (m)')
ax[1][0].set_title('Plot 3: Real Component')
# ax[1][0].colorbar()
clb.set_label('H$_s$/H$_p$')
if showDataPts:
XP, YP = np.meshgrid(xp,yp)
ax[1][0].plot(XP,YP,'.',color=[0.2,0.2,0.2])
vminI = imag_response.min()
vmaxI = imag_response.max()
ax[1][1].plot(np.r_[xp.min(),xp.max()], np.zeros(2), 'k--', lw=1)
clb = plt.colorbar(ax[1][1].imshow(imag_response,extent=[xp.min(),xp.max(),yp.min(),yp.max()], vmin = vminI, vmax = vmaxI),ax=ax[1][1])
ax[1][1].set_xlim(np.r_[xmin,xmax])
ax[1][1].set_ylim(np.r_[xmin,xmax])
ax[1][1].set_xlabel('Easting (m)')
ax[1][1].set_ylabel('Northing (m)')
ax[1][1].set_title('Plot 4: Imag Component')
clb.set_label('H$_s$/H$_p$')
if showDataPts:
ax[1][1].plot(XP,YP,'.',color=[0.2,0.2,0.2])
plt.tight_layout()
plt.show()
def interactfem3loop():
S = 4.
ht = 1.
xmin = -10.
xmax = 10.
zmax = 10.
# xmin = lambda dx: -40.*dx
# xmax = lambda dx: 40.*dx
fem3loopwrap = lambda L,R,yc,xc,zc,dincl,ddecl,f,dx,showDataPts: fem3loop(L,R,-yc,xc,zc,dincl,ddecl,S,ht,f,xmin,xmax,dx,showDataPts)
Q = interactive(fem3loopwrap,
L = FloatSlider(min=0.00,max=0.20,step=0.01,value=0.10, continuous_update=False),
R = FloatSlider(min=0.0,max=20000.,step=1000.,value=2000., continuous_update=False),
xc = FloatSlider(min=-10.,max=10.,step=1.,value=0.0, continuous_update=False),
yc = FloatSlider(min=-10.,max=10.,step=1.,value=0.0, continuous_update=False),
zc = FloatSlider(min=0.,max=zmax,step=0.5,value=1., continuous_update=False),
dincl = FloatSlider(
min=-90.,max=90.,step=1.,value=0., continuous_update=False,
description='I'
),
ddecl = FloatSlider(
min=0.,max=180.,step=1.,value=90., continuous_update=False,
description='D'
),
f = FloatSlider(min=10.,max=19990.,step=10.,value=10000., continuous_update=False),
dx = FloatSlider(min=0.25,max=5.,step=0.25,value=0.25, continuous_update=False),
showDataPts = Checkbox(value=False)
)
return Q
if __name__ == '__main__':
L = 0.1
R = 2000
xc = 0.
yc = 0.
zc = 2.
dincl = 0.
ddecl = 90.
S = 4.
ht = 0.
f = 10000.
xmin = -10.
xmax = 10.
dx = 0.25
fem3loop(L,R,xc,yc,zc,dincl,ddecl,S,ht,f,xmin,xmax,dx)
| 28.862661
| 139
| 0.581115
|
832d92d854d28c97d98d7e79994fbd6c92939c90
| 814
|
py
|
Python
|
yatube/yatube/urls.py
|
annrud/post_publishing_platform
|
c065799203c0800ce05d038f3491fb6184b5cbc7
|
[
"MIT"
] | 1
|
2021-11-09T09:40:37.000Z
|
2021-11-09T09:40:37.000Z
|
yatube/yatube/urls.py
|
annrud/post_publishing_platform
|
c065799203c0800ce05d038f3491fb6184b5cbc7
|
[
"MIT"
] | null | null | null |
yatube/yatube/urls.py
|
annrud/post_publishing_platform
|
c065799203c0800ce05d038f3491fb6184b5cbc7
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
handler404 = 'posts.views.page_not_found'
handler500 = 'posts.views.server_error'
urlpatterns = [
path('auth/', include('users.urls')),
path('auth/', include('django.contrib.auth.urls')),
path('admin/', admin.site.urls),
path('about/', include('about.urls', namespace='about')),
path('', include('posts.urls')),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns += (path("__debug__/", include(debug_toolbar.urls)),)
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL,
document_root=settings.STATIC_ROOT)
| 33.916667
| 69
| 0.681818
|
a580ff97f41ceaf8a427f21c1385fd26dfa4d308
| 8,987
|
py
|
Python
|
chiplotter.py
|
mrawls/ELCtools
|
7e15bf24c453ed4300c6f19f01cff74c041158b8
|
[
"MIT"
] | 2
|
2018-10-08T04:59:38.000Z
|
2021-01-19T08:10:04.000Z
|
chiplotter.py
|
mrawls/ELCtools
|
7e15bf24c453ed4300c6f19f01cff74c041158b8
|
[
"MIT"
] | 1
|
2021-07-20T16:42:43.000Z
|
2021-07-20T16:42:43.000Z
|
chiplotter.py
|
mrawls/ELCtools
|
7e15bf24c453ed4300c6f19f01cff74c041158b8
|
[
"MIT"
] | 1
|
2018-07-31T14:36:24.000Z
|
2018-07-31T14:36:24.000Z
|
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter, MaxNLocator
'''
This handy program turns your ELC output file party into something more useful.
--> Makes a plot of chi^2 vs. fit parameters from a markovELC / geneticELC run
(Only parameters in the generation files with the lowest 10k chi2 values are plotted)
--> Spits out the fit parameters with a measure of uncertainty (+ and -) for all
the parameters in both the generation and ELCparm files
Required file: 'generation.all', created by 'cat generation.1* > generation.all'
Required file: 'ELCparm.all', created by 'cat ELCparm.1* > ELCparm.all'
Required file: 'key.ELCparm' (this is automatically generated by ELC)
Required file: 'gridloop.opt' (you need this to run ELC to start with)
NOTE that in the case of demcmc-ELC, substitute demcmc_fitparm.1* for generation.1*,
substitute demcmc_starparm.1* for ELCparm.1*, and also make a chi.all file from chi.1*.
IF THERE ARE TOO MANY FILES, you may need to use these commands:
find . -name "demcmc_fitparm.1*" -print0 | xargs -0 cat > fitparm.all
find . -name "demcmc_starparm.1*" -print0 | xargs -0 cat > starparm.all
find . -name "chi.1*" -print0 | xargs -0 cat > chi.all
The fit parameters in gridloop.opt are reported in the generation files.
Other parameters of interest which follow from these are reported in the ELCparm files.
The plot will be a 4 x 5 grid. If you are fitting more than 20 parameters in gridloop.opt,
the last ones will be omitted.
'''
# Important filename definitions2
gridloopfile = '../../RG_ELCmodeling/9246715/demcmc001_jerry/gridloop.opt'
generationfile = '../../RG_ELCmodeling/9246715/demcmc001_jerry/fitparm.all'
parmfile = '../../RG_ELCmodeling/9246715/demcmc001_jerry/starparm.all'
parmkeyfile = '../../RG_ELCmodeling/9246715/demcmc001_jerry/key.ELCparm'
chi2file = '../../RG_ELCmodeling/9246715/demcmc001_jerry/chi.all' # OMIT FOR MARKOVELC
outfile = '../../RG_ELCmodeling/9246715/demcmc001_jerry/chiplotout_WTF.txt'
out = open(outfile, 'w')
gridloop = [line.rstrip('\n') for line in open(gridloopfile)]
nvars = int(gridloop[10]) # reads the number of fit variables from gridloop file
print('Working, please be patient...')
print('Results will be written to {0}'.format(outfile))
# Read in names and limits for fit variables from gridloop file
varnames = []; varlower = []; varupper = []; varlist = []
for i in range(0, nvars):
varnames.append(gridloop[10+i+1].rstrip())
varlimits = gridloop[10+nvars+i+1]
values = varlimits.split()
varlower.append(float(values[0]))
varupper.append(float(values[1]))
# manually include 2 systemic velocity columns and 4 final columns (t0, tconj, ecc, argper)
varnames.append('gamma1'); varnames.append('gamma2'); varnames.append('t0v2')
varnames.append('tconjv2'); varnames.append('ecc'); varnames.append('argper')
varlower.append(-100); varlower.append(-100); varlower.append(0); varlower.append(0)
varlower.append(0); varlower.append(0)
varupper.append(100); varupper.append(100); varupper.append(1000); varupper.append(1000)
varupper.append(1); varupper.append(360)
# Read in chi^2 and parameter values from generation/fitparm file
# The number of cols in the generation/fitparm file varies for markovELC/demcmcELC
try:
varlist_gen = np.loadtxt(generationfile, usecols=(range(1,nvars+8)), dtype=np.float64, unpack=True)
chi2_gen = varlist_gen[0]
varlist_gen = np.delete(varlist_gen, 0, 0)
demcmc = False
except:
varlist_gen = np.loadtxt(generationfile, usecols=(range(0,nvars+5)), dtype=np.float64, unpack=True)
demcmc = True
print('Read in generation/fitparm file')
# Read in chi^2 and parameter values from ELCparm/starparm file
# The number of cols in the ELCparm/starparm file varies for markovELC/demcmcELC
parmkeys = np.loadtxt(parmkeyfile, comments='#', usecols=(1,), dtype={'names':('parmkeys',),'formats':('|S11',)}, unpack=True)
for idx, entry in enumerate(parmkeys): # remove 'index' from parmkeyfile
entry = str(entry)
if ('index' in entry):
parmkeys = np.delete(parmkeys, idx, axis=0) # remove 'chi^2' from parmkeyfile
for idx, entry in enumerate(parmkeys):
entry = str(entry)
if ('chi^2' in entry):
parmkeys = np.delete(parmkeys, idx, axis=0)
nparms = len(parmkeys)
if demcmc == False:
varlist_par = np.loadtxt(parmfile, usecols=(range(1,nparms)), dtype=np.float64, unpack=True)
chi2_par = varlist_par[0]
varlist_par = np.delete(varlist_par, 0, 0)
chi2s = chi2_par # higher precision than chi2_gen, but otherwise identical
elif demcmc == True:
varlist_par = np.loadtxt(parmfile, usecols=(range(0,nparms-1)), dtype=np.float64, unpack=True)
chi2s = np.loadtxt(chi2file, usecols=(1,), dtype=np.float64, unpack=True)
print('Read in ELCparm/starparm file (and chi2 file)')
# Ensure the gen/fitparm, ELC/starparm, and chi2 arrays are all the same length
newlength = min(varlist_gen.shape[1], varlist_par.shape[1], chi2s.shape[0]) # find length of shortest array
varlist_gen = varlist_gen[:,0:newlength]
varlist_par = varlist_par[:,0:newlength]
chi2s = chi2s[0:newlength]
# Sort parameter arrays by chi2, and only keep values with the lowest chi2 values
sorted_varlist_gen = []; sorted_varlist_par = []
chi2cutval = 10000
for array in varlist_gen:
sorted_varlist_gen.append(array[np.argsort(chi2s)][:chi2cutval])
for array in varlist_par:
sorted_varlist_par.append(array[np.argsort(chi2s)][:chi2cutval])
sorted_chi2s = chi2s[np.argsort(chi2s)][:chi2cutval]
deltachi = np.min(sorted_chi2s) + 1
print('Sorted by chi2 and kept only the {0} values with lowest chi2 values'.format(chi2cutval))
# Loop over generation file things (fit parameters) to define x-variables for plot
for i in range (0, nvars+5):
if demcmc == False:
i = i + 1
xvalues = sorted_varlist_gen[i-1]
elif demcmc == True:
xvalues = sorted_varlist_gen[i]
##############################
### PLOTTING STUFF IS HERE ###
if i <= 5:
ax = plt.subplot2grid((4,5),(0,i-1))
elif i > 5 and i <= 10:
ax = plt.subplot2grid((4,5),(1,i-5-1))
elif i > 10 and i <= 15:
ax = plt.subplot2grid((4,5),(2,i-10-1))
elif i > 15 and i <= 20:
ax = plt.subplot2grid((4,5),(3,i-15-1))
#elif i > 20 and i <= 25:
# ax = plt.subplot2grid((5,5),(4,i-20-1))
ax.xaxis.set_major_locator(MaxNLocator(3)) # no more than 2 ticks per axis
#ax.yaxis.set_major_formatter(FormatStrFormatter('%.3f'))
ax.xaxis.set_major_formatter(FormatStrFormatter('%.2f'))
plt.subplots_adjust(wspace=0, hspace=0.4)
if i % 5 != 1: ax.set_yticklabels(())
plt.plot(xvalues, sorted_chi2s, marker='o', color='k', mec=None, ms=2, ls='None', markevery=10)
plt.xlabel(varnames[i])
#plt.text(xmin*1.1, ymin*1.1, varnames[i-1])
#xmin = np.min(xfilter)
#xmax = np.max(xfilter)
#plt.axis([xmin, xmax, ymin, ymax])
#plt.ylim((plotymin, plotymax))
#plt.axis([np.min(errorxs), np.max(errorxs), plotymin, plotymax])
### PLOTTING STUFF IS HERE ###
##############################
# Print out fit variables from generation file with uncertainties
bestx = xvalues[0]
errorx = []
for idx, (value, chi2) in enumerate(zip(xvalues, chi2s)):
if (chi2 - chi2s[0]) <= deltachi:
errorx.append(value)
errorxplus = np.max(errorx)
errorxminus = np.min(errorx)
print(varnames[i], '\t =', bestx, '+', errorxplus-bestx, '\ -', bestx-errorxminus, file=out)
# Loop over ELCparm file things
# Print out fit variables from ELCparm file with uncertainties
# Two options for different number of cols, as before
if demcmc == False:
for i in range(1, nparms+1):
xvalues = sorted_varlist_par[i-1]
bestx = xvalues[0]
errorx = []
for idx, (value, chi2) in enumerate(zip(xvalues, chi2s)):
if (chi2 - chi2s[0]) <= deltachi:
errorx.append(value)
errorxplus = np.max(errorx)
errorxminus = np.min(errorx)
print(parmkeys[i-1][0], '\t =', bestx, '+', errorxplus-bestx, '\ -', bestx-errorxminus, file=out)
elif demcmc == True:
for i in range(0, nparms-1):
xvalues = sorted_varlist_par[i]
bestx = xvalues[0]
errorx = []
for idx, (value, chi2) in enumerate(zip(xvalues, chi2s)):
if (chi2 - chi2s[0]) <= deltachi:
errorx.append(value)
errorxplus = np.max(errorx)
errorxminus = np.min(errorx)
print(parmkeys[i][0], '\t =', bestx, '+', errorxplus-bestx, '\ -', bestx-errorxminus, file=out)
out.close()
print('Chi2 ranges from {0} to {1}'.format(np.min(chi2s), np.max(chi2s)))
print('There are {0} parameters explicitly being fit in gridloop.'.format(nvars))
print('Error bars assume a delta-chi2 threshold of {0}.'.format(deltachi))
print('Skipping a plot because that is ugly and slow.')
#print('Here comes a plot...')
#plt.show()
| 46.807292
| 126
| 0.687326
|
7b659ab8e3e537a7500d660ae695fbff9d7922af
| 1,362
|
py
|
Python
|
interpolate_map_traffic.py
|
frongk/sa-air
|
4def5f7734a73e6be03ad842e9eb15275c063ab1
|
[
"MIT"
] | null | null | null |
interpolate_map_traffic.py
|
frongk/sa-air
|
4def5f7734a73e6be03ad842e9eb15275c063ab1
|
[
"MIT"
] | null | null | null |
interpolate_map_traffic.py
|
frongk/sa-air
|
4def5f7734a73e6be03ad842e9eb15275c063ab1
|
[
"MIT"
] | null | null | null |
import pandas
import pickle
from tqdm import tqdm
INTERPOLATION_MAPPING_FILE = 'mapping_coordinates/traffic_map_sa_100.pkl'
DATA_FILE = 'data/traffic/traffic_data.csv'
GRID_FILE = 'data/interpolation_grid_100.csv'
GRID_INDEX = ['Index']
DATA_FIELDS = ['Volume', 'Occupancy', 'Speed']
DATA_INDEX = ['AssetNum']
# time filters
WEEKDAY_NAME = 'Weekday'
WEEKDAY_FILTER = 5
TIME_NAME = 'Time'
TIME_FILTER = 7
OUTPUT_DATA_PATH = 'data'
OUTPUT_DATA_SET_NAME = 'mapgen_traffic.csv'
grid = pandas.read_csv(GRID_FILE,index_col = 0)
raw_data = pandas.read_csv(DATA_FILE)
data = raw_data[(raw_data[WEEKDAY_NAME]==WEEKDAY_FILTER) &
(raw_data[TIME_NAME]==TIME_FILTER)]
data.set_index(DATA_INDEX, inplace=True)
mapping_dict = pickle.load(open(INTERPOLATION_MAPPING_FILE,'rb'))
grid_interpolation_output = []
for point in tqdm(grid.index):
weight_set = mapping_dict[point]['weight_vals']
idx_set = mapping_dict[point]['triangle_idx']
values = data[DATA_FIELDS].loc[idx_set].transpose()
output_vals = [point] + list((values*weight_set).sum(axis=1)/sum(weight_set))
grid_interpolation_output.append(output_vals)
out_df = pandas.DataFrame.from_records(grid_interpolation_output, columns=GRID_INDEX + DATA_FIELDS)
out_df.set_index(*GRID_INDEX,inplace=True)
out_df.to_csv(f'{OUTPUT_DATA_PATH}/{OUTPUT_DATA_SET_NAME}.csv')
| 29.608696
| 99
| 0.76138
|
c9ca343323239ef3b9dedcd8e17cb5ca6e578d0a
| 7,388
|
py
|
Python
|
core/core50_inc_finetuning.py
|
akashgokul/core50
|
4afc491db32c3c58e515e5f6d2bd0425b76aedb9
|
[
"CC-BY-4.0"
] | 103
|
2017-05-11T03:54:58.000Z
|
2022-03-14T03:14:32.000Z
|
core/core50_inc_finetuning.py
|
akashgokul/core50
|
4afc491db32c3c58e515e5f6d2bd0425b76aedb9
|
[
"CC-BY-4.0"
] | 5
|
2018-11-19T09:59:38.000Z
|
2022-03-15T11:19:38.000Z
|
core/core50_inc_finetuning.py
|
akashgokul/core50
|
4afc491db32c3c58e515e5f6d2bd0425b76aedb9
|
[
"CC-BY-4.0"
] | 27
|
2017-08-14T13:16:37.000Z
|
2022-02-23T13:34:30.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
# Copyright (c) 2017. Vincenzo Lomonaco. All rights reserved. #
# See the accompanying LICENSE file for terms. #
# #
# Date: 28-04-2017 #
# Author: Vincenzo Lomonaco #
# E-mail: vincenzo.lomonaco@unibo.it #
# Website: vincenzolomonaco.com #
################################################################################
"""
This sacred script can be used for recording all the incremental experiments
on CORe50 on an external DB and output file. Sacred is also very useful in order
to guarantee the reproducibility of the experiments and to keep track of them
while they are still running.
This main script can be used independently of the underline Deep Learning
framework and model implementation. It orchestrates each experiment performing:
1. The connection and data transfer to the sacred observer.
2. On-the-fly creation of the batches train and test filelists for each run.
3. The multi-run training and testing with the IncFtModel wrapper.
"""
# Sacred dependecies
from sacred import Experiment
from sacred.observers import MongoObserver
# Other dependencies
import numpy as np
import os
# Local dependencies
from inc_finetuning import IncFtModel
from create_sI_filelist import create_sI_run_filelist
from create_sII_filelist import create_sII_run_filelist
from create_sIII_filelist import create_sIII_run_filelist
# Add the Ingredient while creating the experiment
ex = Experiment('core50 incremental finetuning')
# We add the observer (if you don't have a configured DB
# then simply comment the line below line).
# ex.observers.append(MongoObserver.create(db_name='experiments_db'))
@ex.config
def cfg():
""" Default configuration parameters. Overwritten by specific exps
configurations. See the docs for more information about type and
semantics of each parameter. """
name = 'core50 incremental finetuning - sI'
scenario = 1
img_dim = 128
data_path = '/insert/your/path/'
lmdb_bp = '/insert/your/path/'
filelist_bp = '/insert/your/path/sI'
conf_bp = '/insert/your/path/'
snapshots_bp = '/insert/your/path/'
starting_weights = '/insert/your/path/bvlc_reference_caffenet.caffemodel'
batches_num = 8
num_runs = 10
conf_files = {
'solver_filename': conf_bp + 'inc_solver.prototxt',
'net_filename': conf_bp + 'inc_train_val.prototxt'
}
# first batch learning rate
first_batch_lr = 0.001
# in order of importance
lrs = [
0.001,
0.00005,
0.00001
]
num_inc_it = 100
first_batch_it = 2000
test_minibatch_size = 100
stepsize = first_batch_it
weights_mult = 3
# naive if not specified
strategy = 'fromscratch'
fixed_test_set = True
# random sees
seed = 1
@ex.automain
def main(img_dim, conf_files, data_path, lmdb_bp, filelist_bp, snapshots_bp,
first_batch_lr, lrs, num_inc_it, first_batch_it, test_minibatch_size,
batches_num, starting_weights, stepsize, weights_mult, strategy,
fixed_test_set, num_runs, seed, scenario):
""" Main script which create the train/test filelists for each batch
and run on-the-fly. Then it trains the model continuously via the
IncFtModel class. """
# setting the seed for reproducibility
batches_idx = [str(x).zfill(2) for x in range(batches_num)]
batch_order = [x for x in range(batches_num)]
np.random.seed(seed)
# For each run we operate sequentially
for run in range(num_runs):
run = str(run)
ex.info[run] = {}
# In scenario 2 (NC) and 3 (NIC) the first batch stay fixed
# otherwise we shuffle everything
if scenario != 1:
inc_batch_order = batch_order[1:]
np.random.shuffle(inc_batch_order)
batch_order = [0] + inc_batch_order
else:
np.random.shuffle(batch_order)
print("----------- Run " + run + " -----------")
print("batches order: ", batch_order)
print("-----------------------------")
# Setting the meta filelists parameters
path = filelist_bp + '_inc/run' + run + '/'
cpath = filelist_bp + '_cum/run' + run + '/'
for x in [path, cpath]:
if not os.path.exists(x):
os.makedirs(x)
if strategy == 'fromscratch':
cum = True
curr_filelist_bp = cpath
else:
cum = False
curr_filelist_bp = path
# Actually creating the filelists depending on
# the scenario sI (NI), sII (NC), sIII (NIC)
if scenario == 1:
create_sI_run_filelist(
dest_bp=path,
dest_cum_bp=cpath,
cumulative=cum,
batch_order=batch_order
)
elif scenario == 2:
create_sII_run_filelist(
dest_bp=path,
dest_cum_bp=cpath,
cumulative=cum,
batch_order=batch_order
)
elif scenario == 3:
create_sIII_run_filelist(
dest_bp=path,
dest_cum_bp=cpath,
cumulative=cum,
batch_order=batch_order
)
else:
print("Error: scenario not known.")
# Create object incFTModel
inc_model = IncFtModel(img_dim, conf_files, data_path, lmdb_bp,
snapshots_bp, first_batch_lr, lrs, num_inc_it,
first_batch_it, test_minibatch_size,
starting_weights, stepsize, weights_mult,
use_lmdb=False, debug=False, strategy=strategy)
# Now we can train it incrementally
for idx in batches_idx:
if fixed_test_set:
test_filelist = curr_filelist_bp + 'test_filelist.txt'
else:
test_filelist = curr_filelist_bp + 'test_batch_' + \
idx + "_filelist.txt"
s, acc, accs = inc_model.train_batch(
curr_filelist_bp + 'train_batch_' + idx
+ "_filelist.txt", test_filelist)
# Printing batch results
print(s)
print("Acc. per class:")
for i, single_acc in enumerate(accs):
print(str(i) + ': ' + str(round(single_acc, 3)).ljust(
10) + "\t", end="")
if (i + 1) % 5 == 0:
print("")
print("----------------------------")
# Saving them on the DB
if 'inc_accuracy' not in ex.info[run].keys():
ex.info[run]['inc_accuracy'] = []
if 'inc_accuracy per class' not in ex.info[run].keys():
ex.info[run]['inc_accuracy per class'] = []
ex.info[run]['inc_accuracy'].append(acc)
ex.info[run]['inc_accuracy per class'].append(accs.tolist())
| 35.349282
| 80
| 0.563617
|
927a3943588dcafd0b1cc437b548c33a92bf9376
| 2,598
|
py
|
Python
|
examples/texture/receiver.py
|
jlai/Python-SpoutGL
|
01003424b9e6d6a07b346698aae1508853409a45
|
[
"BSD-3-Clause"
] | 5
|
2021-12-30T15:03:52.000Z
|
2022-03-08T14:34:39.000Z
|
examples/texture/receiver.py
|
jlai/Python-SpoutGL
|
01003424b9e6d6a07b346698aae1508853409a45
|
[
"BSD-3-Clause"
] | 1
|
2021-12-08T01:41:17.000Z
|
2021-12-08T01:41:17.000Z
|
examples/texture/receiver.py
|
jlai/Python-SpoutGL
|
01003424b9e6d6a07b346698aae1508853409a45
|
[
"BSD-3-Clause"
] | 1
|
2021-11-24T23:12:49.000Z
|
2021-11-24T23:12:49.000Z
|
import SpoutGL
import pygame
from OpenGL.GL import *
DISPLAY_WIDTH = 800
DISPLAY_HEIGHT = 600
SENDER_NAME = "SpoutGL-texture-test"
def setProjection(width, height):
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, width, height, 0, 1, -1)
glMatrixMode(GL_MODELVIEW)
def drawSquare(width, height):
glEnable(GL_TEXTURE_2D)
glBegin(GL_QUADS)
glTexCoord(0, 0)
glVertex2f(0, 0)
glTexCoord(1, 0)
glVertex2f(width, 0)
glTexCoord(1, 1)
glVertex2f(width, height)
glTexCoord(0, 1)
glVertex2f(0, height)
glEnd()
glDisable(GL_TEXTURE_2D)
pygame.init()
pygame.display.set_caption('Texture Receiver Example')
pygame.display.set_mode((DISPLAY_WIDTH, DISPLAY_HEIGHT),
pygame.OPENGL | pygame.DOUBLEBUF)
receiveTextureID = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, receiveTextureID)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glCopyTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 0, 0,
DISPLAY_WIDTH, DISPLAY_HEIGHT, 0)
setProjection(DISPLAY_WIDTH, DISPLAY_HEIGHT)
glClearColor(0.0, 0.0, 0.0, 1.0)
with SpoutGL.SpoutReceiver() as receiver:
receiver.setReceiverName(SENDER_NAME)
buffer = None
width = 0
height = 0
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
result = receiver.receiveTexture(
receiveTextureID, GL_TEXTURE_2D, False, 0)
if receiver.isUpdated():
width = receiver.getSenderWidth()
height = receiver.getSenderHeight()
print("Updated")
# Initialize or update texture size
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_2D, receiveTextureID)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width,
height, 0, GL_RGBA, GL_UNSIGNED_BYTE, None)
setProjection(width, height)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# Draw texture
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_2D, receiveTextureID)
drawSquare(width, height)
pygame.display.flip()
# Wait until the next frame is ready
# Wait time is in milliseconds; note that 0 will return immediately
receiver.waitFrameSync(SENDER_NAME, 1000)
| 26.783505
| 75
| 0.680523
|
4a48ad23cddfa21b6765717dad6ea9da9a7a1a3b
| 1,790
|
py
|
Python
|
contrib/stack/topsStack/plotBursts_reference_secondaries.py
|
vincentschut/isce2
|
1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c
|
[
"ECL-2.0",
"Apache-2.0"
] | 1,133
|
2022-01-07T21:24:57.000Z
|
2022-01-07T21:33:08.000Z
|
contrib/stack/topsStack/plotBursts_reference_secondaries.py
|
vincentschut/isce2
|
1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c
|
[
"ECL-2.0",
"Apache-2.0"
] | 276
|
2019-02-10T07:18:28.000Z
|
2022-03-31T21:45:55.000Z
|
contrib/stack/topsStack/plotBursts_reference_secondaries.py
|
vincentschut/isce2
|
1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c
|
[
"ECL-2.0",
"Apache-2.0"
] | 235
|
2019-02-10T05:00:53.000Z
|
2022-03-18T07:37:24.000Z
|
#!/usr/bin/env python3
#
# Author: David Bekaert
# Copyright 2018
import os
import glob
import sys
import argparse
def createParser():
'''
Create command line parser.
'''
parser = argparse.ArgumentParser(description='Generate all kml files for the reference and secondary slc')
parser.add_argument('-i', '--i', dest='inputdir', type=str, default="secondarys", help='Input directory')
parser.add_argument('-o', '--o', dest='outputdir', type=str, default="kml_slcs", help='Output directory')
return parser
def cmdLineParse(iargs=None):
'''
Command line parser.
'''
parser = createParser()
return parser.parse_args(args = iargs)
def main(iargs=None):
'''
The main driver.
'''
inps = cmdLineParse(iargs)
outputdir = os.path.abspath(inps.outputdir)
inputdir = os.path.abspath(inps.inputdir)
if not os.path.isdir(outputdir):
os.mkdir(outputdir)
# see if the referencedir also exist
indir = os.path.abspath(os.path.join(inputdir, '..',"reference"))
if os.path.isdir(inputdir):
outfile = os.path.join(outputdir,'reference.kml')
cmd = "plotBursts.py -i " + indir + " -k " + outfile
print("reference date:")
print(cmd)
os.system(cmd)
### Loop over the different date folders
if os.path.isdir(inputdir):
for dirf in glob.glob(os.path.join(inputdir, '2*')):
vals = dirf.split(os.path.sep)
date = vals[-1]
print(date + ":")
infile = os.path.join(inputdir,date)
outfile = os.path.join(outputdir,date + '.kml')
cmd = "plotBursts.py -i " + infile + " -k " + outfile
print(cmd)
os.system(cmd)
if __name__ == '__main__':
main()
| 25.942029
| 110
| 0.604469
|
e72b712207a337cf52cc892447d203e2e642176f
| 16,011
|
py
|
Python
|
log_casp_act/model_176.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
log_casp_act/model_176.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
log_casp_act/model_176.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
# exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('ParpC')
Monomer('Xiap', ['Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('C6A', ['C8pro'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 44000.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('C6A_0', 0.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('C6A_obs', C6A())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6pro(C3A=None), C6pro_0)
| 84.268421
| 614
| 0.812254
|
5952f0e24032f8847a088d92547663f4bee710ad
| 9,087
|
py
|
Python
|
python/hsfs/core/feature_group_api.py
|
robzor92/feature-store-api
|
d1a7c52d9996e2b0d0d42fbee1d687575796d339
|
[
"Apache-2.0"
] | null | null | null |
python/hsfs/core/feature_group_api.py
|
robzor92/feature-store-api
|
d1a7c52d9996e2b0d0d42fbee1d687575796d339
|
[
"Apache-2.0"
] | null | null | null |
python/hsfs/core/feature_group_api.py
|
robzor92/feature-store-api
|
d1a7c52d9996e2b0d0d42fbee1d687575796d339
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2020 Logical Clocks AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from hsfs import client
from hsfs import feature_group, feature_group_commit
from hsfs.core import ingestion_job
class FeatureGroupApi:
CACHED = "cached"
ONDEMAND = "ondemand"
def __init__(self, feature_store_id):
self._feature_store_id = feature_store_id
def save(self, feature_group_instance):
"""Save feature group metadata to the feature store.
:param feature_group_instance: metadata object of feature group to be
saved
:type feature_group_instance: FeatureGroup
:return: updated metadata object of the feature group
:rtype: FeatureGroup
"""
_client = client.get_instance()
path_params = [
"project",
_client._project_id,
"featurestores",
self._feature_store_id,
"featuregroups",
]
headers = {"content-type": "application/json"}
return feature_group_instance.update_from_response_json(
_client._send_request(
"POST",
path_params,
headers=headers,
data=feature_group_instance.json(),
),
)
def get(self, name, version, fg_type):
"""Get the metadata of a feature group with a certain name and version.
:param name: name of the feature group
:type name: str
:param version: version of the feature group
:type version: int
:param fg_type: type of the feature group to return
:type version: string
:return: feature group metadata object
:rtype: FeatureGroup
"""
_client = client.get_instance()
path_params = [
"project",
_client._project_id,
"featurestores",
self._feature_store_id,
"featuregroups",
name,
]
query_params = None if version is None else {"version": version}
json_list = _client._send_request("GET", path_params, query_params)
if fg_type == self.CACHED:
fg_list = feature_group.FeatureGroup.from_response_json(json_list)
else:
fg_list = feature_group.OnDemandFeatureGroup.from_response_json(json_list)
if version is not None:
return fg_list[0]
else:
return fg_list
def delete_content(self, feature_group_instance):
"""Delete the content of a feature group.
This endpoint serves to simulate the overwrite/insert mode.
:param feature_group_instance: metadata object of feature group to clear
the content for
:type feature_group_instance: FeatureGroup
"""
_client = client.get_instance()
path_params = [
"project",
_client._project_id,
"featurestores",
self._feature_store_id,
"featuregroups",
feature_group_instance.id,
"clear",
]
_client._send_request("POST", path_params)
def delete(self, feature_group_instance):
"""Drop a feature group from the feature store.
Drops the metadata and data of a version of a feature group.
:param feature_group_instance: metadata object of feature group
:type feature_group_instance: FeatureGroup
"""
_client = client.get_instance()
path_params = [
"project",
_client._project_id,
"featurestores",
self._feature_store_id,
"featuregroups",
feature_group_instance.id,
]
_client._send_request("DELETE", path_params)
def update_metadata(
self,
feature_group_instance,
feature_group_copy,
query_parameter,
query_parameter_value=True,
):
"""Update the metadata of a feature group.
This only updates description and schema/features. The
`feature_group_copy` is the metadata object sent to the backend, while
`feature_group_instance` is the user object, which is only updated
after a successful REST call.
# Arguments
feature_group_instance: FeatureGroup. User metadata object of the
feature group.
feature_group_copy: FeatureGroup. Metadata object of the feature
group with the information to be updated.
query_parameter: str. Query parameter that controls which information is updated. E.g. "updateMetadata",
or "validationType".
query_parameter_value: Str. Value of the query_parameter.
# Returns
FeatureGroup. The updated feature group metadata object.
"""
_client = client.get_instance()
path_params = [
"project",
_client._project_id,
"featurestores",
self._feature_store_id,
"featuregroups",
feature_group_instance.id,
]
headers = {"content-type": "application/json"}
query_params = {query_parameter: query_parameter_value}
return feature_group_instance.update_from_response_json(
_client._send_request(
"PUT",
path_params,
query_params,
headers=headers,
data=feature_group_copy.json(),
),
)
def commit(self, feature_group_instance, feature_group_commit_instance):
"""
Save feature group commit metadata.
# Arguments
feature_group_instance: FeatureGroup, required
metadata object of feature group.
feature_group_commit_instance: FeatureGroupCommit, required
metadata object of feature group commit.
# Returns
`FeatureGroupCommit`.
"""
_client = client.get_instance()
path_params = [
"project",
_client._project_id,
"featurestores",
self._feature_store_id,
"featuregroups",
feature_group_instance.id,
"commits",
]
headers = {"content-type": "application/json"}
return feature_group_commit_instance.update_from_response_json(
_client._send_request(
"POST",
path_params,
headers=headers,
data=feature_group_commit_instance.json(),
),
)
def get_commit_details(self, feature_group_instance, wallclock_timestamp, limit):
"""
Get feature group commit metadata.
# Arguments
feature_group_instance: FeatureGroup, required
metadata object of feature group.
limit: number of commits to retrieve
wallclock_timestamp: specific point in time.
# Returns
`FeatureGroupCommit`.
"""
_client = client.get_instance()
path_params = [
"project",
_client._project_id,
"featurestores",
self._feature_store_id,
"featuregroups",
feature_group_instance.id,
"commits",
]
headers = {"content-type": "application/json"}
query_params = {"sort_by": "committed_on:desc", "offset": 0, "limit": limit}
if wallclock_timestamp is not None:
query_params["filter_by"] = "commited_on_ltoeq:" + str(wallclock_timestamp)
return feature_group_commit.FeatureGroupCommit.from_response_json(
_client._send_request("GET", path_params, query_params, headers=headers),
)
def ingestion(self, feature_group_instance, ingestion_conf):
"""
Setup a Hopsworks job for dataframe ingestion
Args:
feature_group_instance: FeatureGroup, required
metadata object of feature group.
ingestion_conf: the configuration for the ingestion job application
"""
_client = client.get_instance()
path_params = [
"project",
_client._project_id,
"featurestores",
self._feature_store_id,
"featuregroups",
feature_group_instance.id,
"ingestion",
]
headers = {"content-type": "application/json"}
return ingestion_job.IngestionJob.from_response_json(
_client._send_request(
"POST", path_params, headers=headers, data=ingestion_conf.json()
),
)
| 34.290566
| 116
| 0.608121
|
1c783afe42f30f97bfc5d152af4ac63f2c86b6a4
| 3,601
|
py
|
Python
|
dataset.py
|
UMBCvision/Contextual-Adversarial-Patches
|
602fd267c2562f45ba65d10edb856a1144b8ca5f
|
[
"MIT"
] | 33
|
2020-04-22T18:35:54.000Z
|
2022-02-09T06:41:02.000Z
|
dataset.py
|
UMBCvision/Contextual-Adversarial-Patches
|
602fd267c2562f45ba65d10edb856a1144b8ca5f
|
[
"MIT"
] | 3
|
2020-06-22T11:58:33.000Z
|
2021-09-07T00:44:57.000Z
|
dataset.py
|
UMBCvision/Contextual-Adversarial-Patches
|
602fd267c2562f45ba65d10edb856a1144b8ca5f
|
[
"MIT"
] | 6
|
2020-07-07T07:22:42.000Z
|
2022-02-09T06:41:04.000Z
|
#!/usr/bin/python
# encoding: utf-8
import os
import random
import torch
import numpy as np
from torch.utils.data import Dataset
from PIL import Image
from utils import read_truths_args, read_truths
from image import *
import pdb
# WARNING: for physical world attack
class listDataset(Dataset):
def __init__(self, root, shape=None, shuffle=True, transform=None, target_transform=None, train=False, seen=0, batch_size=64, num_workers=4):
with open(root, 'r') as file:
self.lines = file.readlines()
if shuffle:
random.shuffle(self.lines)
self.nSamples = len(self.lines)
self.transform = transform
self.target_transform = target_transform
self.train = train
self.shape = shape
self.seen = seen
self.batch_size = batch_size
self.num_workers = num_workers
def __len__(self):
return self.nSamples
def __getitem__(self, index):
assert index <= len(self), 'index range error'
imgpath = self.lines[index].rstrip()
''' Fix the width to be 13*32=416 and do not randomize
'''
width = 13*32
if self.train and index % 64== 0:
if self.seen < 4000*64:
width = 13*32
self.shape = (width, width)
elif self.seen < 8000*64:
width = (random.randint(0,3) + 13)*32
self.shape = (width, width)
elif self.seen < 12000*64:
width = (random.randint(0,5) + 12)*32
self.shape = (width, width)
elif self.seen < 16000*64:
width = (random.randint(0,7) + 11)*32
self.shape = (width, width)
else: # self.seen < 20000*64:
width = (random.randint(0,9) + 10)*32
self.shape = (width, width)
if self.train:
jitter = 0.2
hue = 0.1
saturation = 1.5
exposure = 1.5
img, label = load_data_detection(imgpath, self.shape, jitter, hue, saturation, exposure)
label = torch.from_numpy(label)
else:
img = Image.open(imgpath).convert('RGB')
if self.shape:
img = img.resize(self.shape)
labpath = imgpath.replace('images', 'labels').replace('JPEGImages', 'labels').replace('.jpg', '.txt').replace('.png','.txt')
# # for KITTI
# labpath = imgpath.replace('images', 'labels').replace('PNGImages_cropped', 'labels_cropped_car_person').replace('.jpg', '.txt').replace('.png','.txt')
#labpath = imgpath.replace('images', 'labels').replace('train', 'labels').replace('.jpg', '.txt').replace('.png','.txt')
label = torch.zeros(50*5)
try:
tmp = torch.from_numpy(read_truths_args(labpath, 8.0/img.width).astype('float32'))
except Exception:
tmp = torch.zeros(1,5)
#tmp = torch.from_numpy(read_truths(labpath))
# # for KITTI
# if tmp.size() == 0:
# tmp = torch.zeros(1,5)
tmp = tmp.view(-1)
tsz = tmp.numel()
# print('labpath = %s , tsz = %d' % (labpath, tsz))
if tsz > 50*5:
label = tmp[0:50*5]
elif tsz > 0:
label[0:tsz] = tmp
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
label = self.target_transform(label)
self.seen = self.seen + self.num_workers
return (img, label, imgpath)
| 33.971698
| 164
| 0.549292
|
1d28c04aec93ee44b93a4796be781d894b46bf4c
| 1,172
|
bzl
|
Python
|
maistra/vendor/proxy_wasm_cpp_host/bazel/variables.bzl
|
knm3000/proxy
|
f2bb57b7294aea2cb344824785be42849d7d63c9
|
[
"Apache-2.0"
] | 3
|
2020-11-30T15:35:37.000Z
|
2022-01-06T14:17:18.000Z
|
maistra/vendor/proxy_wasm_cpp_host/bazel/variables.bzl
|
knm3000/proxy
|
f2bb57b7294aea2cb344824785be42849d7d63c9
|
[
"Apache-2.0"
] | 54
|
2020-06-23T17:34:04.000Z
|
2022-03-31T02:04:06.000Z
|
maistra/vendor/proxy_wasm_cpp_host/bazel/variables.bzl
|
knm3000/proxy
|
f2bb57b7294aea2cb344824785be42849d7d63c9
|
[
"Apache-2.0"
] | 12
|
2020-07-14T23:59:57.000Z
|
2022-03-22T09:59:18.000Z
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
COPTS = select({
"@bazel_tools//src/conditions:windows": [
"/std:c++17",
],
"//conditions:default": [
"-std=c++17",
],
})
# https://bytecodealliance.github.io/wasmtime/c-api/
LINKOPTS = select({
"@bazel_tools//src/conditions:windows": [
"-",
"ws2_32.lib",
"advapi32.lib",
"userenv.lib",
"ntdll.lib",
"shell32.lib",
"ole32.lib",
],
"@bazel_tools//src/conditions:darwin": [],
"//conditions:default": [
# required for linux
"-lpthread",
"-ldl",
"-lm",
],
})
| 27.255814
| 74
| 0.620307
|
ec15f73268120324bc423abf76d765a3c3918ba9
| 9,024
|
py
|
Python
|
taxdata/cps/finalprep.py
|
jdebacker/taxdata
|
c32d401a10a6c8f6e889d87c6cc72fd4338017b2
|
[
"CC0-1.0"
] | 12
|
2019-02-07T14:06:28.000Z
|
2021-12-04T19:19:50.000Z
|
taxdata/cps/finalprep.py
|
jdebacker/taxdata
|
c32d401a10a6c8f6e889d87c6cc72fd4338017b2
|
[
"CC0-1.0"
] | 230
|
2015-10-20T18:38:10.000Z
|
2018-12-05T16:04:04.000Z
|
taxdata/cps/finalprep.py
|
jdebacker/taxdata
|
c32d401a10a6c8f6e889d87c6cc72fd4338017b2
|
[
"CC0-1.0"
] | 19
|
2015-12-21T18:25:11.000Z
|
2018-11-10T16:53:38.000Z
|
"""
Clean up the CPS file and make it ready for Tax-Calculator
"""
import numpy as np
import pandas as pd
import copy
from pathlib import Path
from .helpers import CUR_PATH
from .constants import USEABLE_VARS
ADJ_TARGETS = pd.read_csv(Path(CUR_PATH, "adjustment_targets.csv"))
def drop_vars(data):
"""
Returns Pandas DataFrame of data without unuseable variables
"""
drop_vars = list(set(data.columns) - USEABLE_VARS)
data = data.drop(drop_vars, axis=1)
return data
def add_agi_bin(data, col_name):
"""
Add an AGI bin indicator used in Tax-Calc to apply adjustment factors
"""
THRESHOLDS_K = [
-np.inf,
0,
5,
10,
15,
20,
25,
30,
40,
50,
75,
100,
200,
500,
1000,
1500,
2000,
np.inf,
]
thresholds = [x * 1000 for x in THRESHOLDS_K]
data["agi_bin"] = pd.cut(
data[col_name],
thresholds,
labels=np.arange(0, len(THRESHOLDS_K) - 1),
right=False,
)
return data
def deduction_limits(data):
"""
Apply limits on itemized deductions
"""
# Split charitable contributions into cash and non-cash using ratio in PUF
cash = 0.82013
non_cash = 1.0 - cash
data["e19800"] = data["CHARITABLE"] * cash
data["e20100"] = data["CHARITABLE"] * non_cash
# Apply student loan interest deduction limit
data["e03210"] = np.where(data["SLINT"] > 2500, 2500, data["SLINT"])
# Apply IRA contribution limit
deductable_ira = np.where(
data["age_head"] >= 50,
np.where(data["ADJIRA"] > 6500, 6500, data["ADJIRA"]),
np.where(data["ADJIRA"] > 5500, 5500, data["ADJIRA"]),
)
data["e03150"] = deductable_ira
return data
def adjust_helper(agi, var, target, weight, agi_bin):
"""
Parameters
----------
agi: AGI provided in the CPS
var: variable being adjusted
target: target bin levels
weight: weights
Returns
-------
Series containing the adjusted values of the variable
"""
# Goal total ensures the weighted sum of the variable wont change
goal_total = (var * weight).sum()
# Goal distribution based on IRS data
distribution = target / target.sum()
# Find the goal amount in each bin
goal_amts = goal_total * distribution
assert np.allclose(goal_amts.sum(), goal_total)
# Find current totals in each bin
bin_0 = np.where(agi < 0, var * weight, 0).sum()
bin_1 = np.where((agi >= 0) & (agi < 5000), var * weight, 0).sum()
bin_2 = np.where((agi >= 5000) & (agi < 10000), var * weight, 0).sum()
bin_3 = np.where((agi >= 10000) & (agi < 15000), var * weight, 0).sum()
bin_4 = np.where((agi >= 15000) & (agi < 20000), var * weight, 0).sum()
bin_5 = np.where((agi >= 20000) & (agi < 25000), var * weight, 0).sum()
bin_6 = np.where((agi >= 25000) & (agi < 30000), var * weight, 0).sum()
bin_7 = np.where((agi >= 30000) & (agi < 40000), var * weight, 0).sum()
bin_8 = np.where((agi >= 40000) & (agi < 50000), var * weight, 0).sum()
bin_9 = np.where((agi >= 50000) & (agi < 75000), var * weight, 0).sum()
bin_10 = np.where((agi >= 75000) & (agi < 100_000), var * weight, 0).sum()
bin_11 = np.where((agi >= 100_000) & (agi < 200_000), var * weight, 0).sum()
bin_12 = np.where((agi >= 200_000) & (agi < 500_000), var * weight, 0).sum()
bin_13 = np.where((agi >= 500_000) & (agi < 1e6), var * weight, 0).sum()
bin_14 = np.where((agi >= 1e6) & (agi < 1.5e6), var * weight, 0).sum()
bin_15 = np.where((agi >= 1.5e6) & (agi < 2e6), var * weight, 0).sum()
bin_16 = np.where((agi >= 2e6), var * weight, 0).sum()
# Create series holding each of the current totals
actual_amts = pd.Series(
[
bin_0,
bin_1,
bin_2,
bin_3,
bin_4,
bin_5,
bin_6,
bin_7,
bin_8,
bin_9,
bin_10,
bin_11,
bin_12,
bin_13,
bin_14,
bin_15,
bin_16,
],
index=goal_amts.index,
)
ratios_index = [num for num in range(0, len(actual_amts))]
# Determine the ratios
ratios = pd.Series(goal_amts / actual_amts)
ratios.index = ratios_index
# Apply adjustment ratios
var_array = np.array(var)
var_array = np.nan_to_num(var_array)
ratios = np.where(ratios == np.inf, 1.0, ratios)
adj_array = ratios[agi_bin]
_var = var * adj_array
# assert that we don't lose any of the variable
tot = (_var * weight).sum()
m = f"{tot:,.2f} != {goal_total:,.2f}"
try:
assert np.allclose(np.array(goal_total), np.array(tot)), m
except AssertionError:
print(m)
print("Reversing Adjustment")
_var = var
tot = (_var * weight).sum()
m = f"{tot:,.2f} != {goal_total:,.2f}"
assert np.allclose(np.array(goal_total), np.array(tot)), m
return _var
def adjust(data, targets):
"""
data: CPS in DataFrame format
targets: targeted totals provided by the IRS
"""
# Make copies of values to avoid pandas warning
inc = copy.deepcopy(data["tot_inc"])
int_inc = copy.deepcopy(data["e00300"])
odiv_inc = copy.deepcopy(data["e00600"])
qdiv_inc = copy.deepcopy(data["e00650"])
biz_inc = copy.deepcopy(data["e00900"])
print("e00300")
data["e00300"] = adjust_helper(
inc, int_inc, targets["INT"], data["s006"], data["agi_bin"]
)
div_ratio = data["e00600"] / (data["e00600"] + data["e00650"])
print("e00600")
data["e00600"] = adjust_helper(
inc, odiv_inc, targets["ODIV"], data["s006"], data["agi_bin"]
)
print("e00650")
data["e00650"] = adjust_helper(
inc, qdiv_inc, targets["QDIV"], data["s006"], data["agi_bin"]
)
total = data["e00600"] + data["e00650"]
data["e00600"] = total * div_ratio
data["e00650"] = total * (1.0 - div_ratio)
biz_ratio_p = data["e00900p"] / data["e00900"]
biz_ratio_s = 1.0 - biz_ratio_p
biz_ratio_p = np.nan_to_num(biz_ratio_p, nan=0, posinf=1, neginf=1)
biz_ratio_s = np.nan_to_num(biz_ratio_s, nan=0, posinf=1, neginf=1)
sub = biz_ratio_s[data["MARS"] != 2]
zeros = np.zeros_like(sub)
assert np.allclose(sub, zeros)
print("e00900")
data["e00900"] = adjust_helper(
inc, biz_inc, targets["BIZ"], data["s006"], data["agi_bin"]
)
data["e00900p"] = data["e00900"] * biz_ratio_p
data["e00900s"] = data["e00900"] * biz_ratio_s
return data
def finalprep(data: pd.DataFrame):
"""
Function for cleaning up the CPS file
Parameters
----------
data: pandas DataFrame with the raw CPS tax unit file
"""
data = data.fillna(0.0)
# recode blind variables
data["blind_head"] = np.where(data["blind_head"] == 1, 1, 0)
data["blind_spouse"] = np.where(data["blind_spouse"] == 1, 1, 0)
# cap EIC
data["EIC"] = np.minimum(data["EIC"], 3)
# apply deduction deduction
data = deduction_limits(data)
# rename variables
RENAMES = {
"mars": "MARS",
"dep_stat": "DSI",
"divs": "e00600",
"CGAGIX": "e01100",
"DPAD": "e03240",
"TIRAD": "e01400",
"SEHEALTH": "e03270",
"KEOGH": "e03300",
"MEDEX": "e17500",
"CDC": "e32800",
"MISCITEM": "e20400",
"realest": "e18500",
"statetax": "e18400",
"cash_char": "e19800",
"non_cash_char": "e20100",
}
data = data.rename(columns=RENAMES)
# assert that no non-married filers have non-zero values for spouse income
sub_data = data[data["MARS"] != 2]
zeros = np.zeros_like(sub_data["MARS"])
assert np.allclose(sub_data["e00200s"], zeros)
assert np.allclose(sub_data["e00900s"], zeros)
assert np.allclose(sub_data["e02100s"], zeros)
# add record ID
data["RECID"] = range(1, len(data.index) + 1)
# add AGI bins
data = add_agi_bin(data, "tot_inc")
# adjust income distributions
print("Adjusting Income Distribution")
data = adjust(data, ADJ_TARGETS)
# assert that no non-married filers have non-zero values for spouse income
sub_data = data[data["MARS"] != 2]
zeros = np.zeros_like(sub_data["MARS"])
assert np.allclose(sub_data["e00200s"], zeros)
assert np.allclose(sub_data["e00900s"], zeros)
data = drop_vars(data)
print("Adding zero pencon_p and pencon_s variables")
data["pencon_p"] = np.zeros(len(data.index), dtype=np.int32)
data["pencon_s"] = np.zeros(len(data.index), dtype=np.int32)
# clean data
data = data.fillna(0.0)
data = data.astype(np.int32)
data["e00200"] = data["e00200p"] + data["e00200s"]
data["e00900"] = data["e00900p"] + data["e00900s"]
data["e02100"] = data["e02100p"] + data["e02100s"]
data["e00650"] = np.minimum(data["e00600"], data["e00650"])
data["s006"] *= 100
return data
| 31.333333
| 80
| 0.587101
|
9065f6b06aea01e08b03cc155662b760da206344
| 2,559
|
py
|
Python
|
riscv-none-gcc/8.2.0-2.2-20190521-0004/riscv-none-embed/lib/rv32imaf/ilp32f/libstdc++.a-gdb.py
|
balanceTWK/GD32VF103C-START
|
606bf274c3d30e4f498982eaa21947e067875792
|
[
"Apache-2.0"
] | 5
|
2019-10-21T09:05:38.000Z
|
2022-02-01T04:23:29.000Z
|
riscv-none-gcc/8.2.0-2.2-20190521-0004/riscv-none-embed/lib/rv32imaf/ilp32f/libstdc++.a-gdb.py
|
balanceTWK/GD32VF103C-START
|
606bf274c3d30e4f498982eaa21947e067875792
|
[
"Apache-2.0"
] | null | null | null |
riscv-none-gcc/8.2.0-2.2-20190521-0004/riscv-none-embed/lib/rv32imaf/ilp32f/libstdc++.a-gdb.py
|
balanceTWK/GD32VF103C-START
|
606bf274c3d30e4f498982eaa21947e067875792
|
[
"Apache-2.0"
] | 3
|
2019-10-22T09:45:34.000Z
|
2020-05-09T12:56:30.000Z
|
# -*- python -*-
# Copyright (C) 2009-2018 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/Host/home/ilg/Work/riscv-none-gcc-8.2.0-2.2/linux-x64/install/riscv-none-gcc/share/gcc-riscv-none-embed'
libdir = '/Host/home/ilg/Work/riscv-none-gcc-8.2.0-2.2/linux-x64/install/riscv-none-gcc/riscv-none-embed/lib/rv32imaf/ilp32f'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Call a function as a plain import would not execute body of the included file
# on repeated reloads of this object file.
from libstdcxx.v6 import register_libstdcxx_printers
register_libstdcxx_printers(gdb.current_objfile())
| 41.274194
| 125
| 0.724893
|
a13cccb2aa154e290b4c42909cb47e441dcd2625
| 18,099
|
py
|
Python
|
classes.py
|
PlanTL-SANIDAD/MEDDOCAN-Evaluation-Script
|
843152c475e7118171028badd16f6342c5560e0f
|
[
"MIT"
] | null | null | null |
classes.py
|
PlanTL-SANIDAD/MEDDOCAN-Evaluation-Script
|
843152c475e7118171028badd16f6342c5560e0f
|
[
"MIT"
] | null | null | null |
classes.py
|
PlanTL-SANIDAD/MEDDOCAN-Evaluation-Script
|
843152c475e7118171028badd16f6342c5560e0f
|
[
"MIT"
] | null | null | null |
import os
from tags import PHITag
from xml.etree import ElementTree
class Annotation(object):
def __init__(self, file_name=None, root="root"):
self.doc_id = ''
self.sys_id = ''
self.text = None
self.num_sentences = None
self.root = root
self.sensitive_spans = []
self.sensitive_spans_merged = []
self.verbose = False
if file_name:
self.sys_id = os.path.basename(os.path.dirname(file_name))
self.doc_id = os.path.splitext(os.path.basename(file_name))[0]
else:
self.doc_id = None
@property
def id(self):
return self.doc_id
def get_phi(self):
return self.phi
def get_phi_spans(self):
return self.sensitive_spans
def get_phi_spans_merged(self):
return self.sensitive_spans_merged
def get_phi_spans_joins(self):
return self.sensitive_spans_joins
def get_number_sentences(self):
try:
self.num_sentences = \
sum(1 for line in open('annotated_corpora/sentence_splitted/' +
self.doc_id +
".ann"))
except IOError:
print("File '" +
'freeling/sentence_splitted/' +
self.doc_id +
".ann' not found.")
return self.num_sentences
def add_spans(self, phi_tags):
for tag in sorted(phi_tags):
self.sensitive_spans.append(tag)
for y in sorted(phi_tags):
if not self.sensitive_spans_merged:
self.sensitive_spans_merged.append(y)
else:
x = self.sensitive_spans_merged.pop()
if self.is_all_non_alphanumeric(self.text[x[1]:y[0]]):
self.sensitive_spans_merged.append((x[0], y[1]))
else:
self.sensitive_spans_merged.append(x)
self.sensitive_spans_merged.append(y)
@staticmethod
def is_all_non_alphanumeric(string):
for i in string:
if i.isalnum():
return False
return True
class i2b2Annotation(Annotation):
""" This class models the i2b2 annotation format."""
def __init__(self, file_name=None, root="root"):
self.doc_id = ''
self.sys_id = ''
self.text = None
self.num_sentences = None
self.root = root
self.phi = []
self.sensitive_spans = []
self.sensitive_spans_merged = []
self.verbose = False
if file_name:
self.sys_id = os.path.basename(os.path.dirname(file_name))
self.doc_id = os.path.splitext(os.path.basename(file_name))[0]
self.parse_text_and_tags(file_name)
self.parse_text_and_spans(file_name)
self.file_name = file_name
else:
self.doc_id = None
def parse_text_and_tags(self, file_name=None):
if file_name is not None:
text = open(file_name, 'r').read()
self.text = text
tree = ElementTree.parse(file_name)
root = tree.getroot()
self.root = root.tag
try:
self.text = root.find("TEXT").text
except AttributeError:
self.text = None
# Handles files where PHI, and AnnotatorTags are all just
# stuffed into tag element.
for t, cls in PHITag.tag_types.items():
if len(root.find("TAGS").findall(t)):
for element in root.find("TAGS").findall(t):
self.phi.append(cls(element))
def parse_text_and_spans(self, file_name=None):
if file_name is not None:
text = open(file_name, 'r').read()
self.text = text
tree = ElementTree.parse(file_name)
root = tree.getroot()
self.root = root.tag
try:
self.text = root.find("TEXT").text
except AttributeError:
self.text = None
# Fill list with tuples (start, end) for each annotation
phi_tags = []
for t, cls in PHITag.tag_types.items():
if len(root.find("TAGS").findall(t)):
for element in root.find("TAGS").findall(t):
phi_tags.append((cls(element).get_start(), cls(element).get_end()))
# Store spans
self.add_spans(phi_tags)
class BratAnnotation(Annotation):
""" This class models the BRAT annotation format."""
def __init__(self, file_name=None, root="root"):
self.doc_id = ''
self.sys_id = ''
self.text = None
self.num_sentences = None
self.root = root
self.phi = []
self.sensitive_spans = []
self.sensitive_spans_merged = []
self.verbose = False
if file_name:
self.sys_id = os.path.basename(os.path.dirname(file_name))
self.doc_id = os.path.splitext(os.path.basename(file_name))[0]
self.parse_text_and_tags(file_name)
self.parse_text_and_spans(file_name)
self.file_name = file_name
else:
self.doc_id = None
def parse_text_and_tags(self, file_name=None):
if file_name is not None:
text = open(os.path.splitext(file_name)[0] + '.txt', 'r').read()
self.text = text
for row in open(file_name, 'r'):
line = row.strip()
if line.startswith("T"): # Lines is a Brat TAG
try:
label = line.split("\t")[1].split()
tag = label[0]
start = int(label[1])
end = int(label[2])
self.phi.append((tag, start, end))
except IndexError:
print("ERROR! Index error while splitting sentence '" +
line + "' in document '" + file_name + "'!")
else: # Line is a Brat comment
if self.verbose:
print("\tSkipping line (comment):\t" + line)
def parse_text_and_spans(self, file_name=None):
if file_name is not None:
text = open(os.path.splitext(file_name)[0] + '.txt', 'r').read()
self.text = text
phi_tags = []
for row in open(file_name, 'r'):
line = row.strip()
if line.startswith("T"): # Lines is a Brat TAG
try:
label = line.split("\t")[1].split()
start = int(label[1])
end = int(label[2])
phi_tags.append((start, end))
except IndexError:
print("ERROR! Index error while splitting sentence '" +
line + "' in document '" + file_name + "'!")
else: # Line is a Brat comment
if self.verbose:
print("\tSkipping line (comment):\t" + line)
# Store spans
self.add_spans(phi_tags)
class Evaluate(object):
"""Base class with all methods to evaluate the different subtracks."""
def __init__(self, sys_ann, gs_ann):
self.tp = []
self.fp = []
self.fn = []
self.doc_ids = []
self.verbose = False
self.sys_id = sys_ann[list(sys_ann.keys())[0]].sys_id
@staticmethod
def get_tagset_ner(annotation):
return annotation.get_phi()
@staticmethod
def get_tagset_span(annotation):
return annotation.get_phi_spans()
@staticmethod
def get_tagset_span_merged(annotation):
return annotation.get_phi_spans_merged()
@staticmethod
def is_contained(content, container):
for element in sorted(container):
if content[0] >= element[0] and content[1] <= element[1]:
return True
return False
@staticmethod
def recall(tp, fn):
try:
return len(tp) / float(len(fn) + len(tp))
except ZeroDivisionError:
return 0.0
@staticmethod
def precision(tp, fp):
try:
return len(tp) / float(len(fp) + len(tp))
except ZeroDivisionError:
return 0.0
@staticmethod
def F_beta(p, r, beta=1):
try:
return (1 + beta**2) * ((p * r) / (p + r))
except ZeroDivisionError:
return 0.0
def micro_recall(self):
try:
return sum([len(t) for t in self.tp]) / \
float(sum([len(t) for t in self.tp]) +
sum([len(t) for t in self.fn]))
except ZeroDivisionError:
return 0.0
def micro_precision(self):
try:
return sum([len(t) for t in self.tp]) / \
float(sum([len(t) for t in self.tp]) +
sum([len(t) for t in self.fp]))
except ZeroDivisionError:
return 0.0
def _print_docs(self):
for i, doc_id in enumerate(self.doc_ids):
mp = Evaluate.precision(self.tp[i], self.fp[i])
mr = Evaluate.recall(self.tp[i], self.fn[i])
str_fmt = "{:<35}{:<15}{:<20}"
print(str_fmt.format(doc_id,
"Precision",
"{:.4}".format(mp)))
print(str_fmt.format("",
"Recall",
"{:.4}".format(mr)))
print(str_fmt.format("",
"F1",
"{:.4}".format(Evaluate.F_beta(mp, mr))))
print("{:-<60}".format(""))
def _print_summary(self):
mp = self.micro_precision()
mr = self.micro_recall()
str_fmt = "{:<35}{:<15}{:<20}"
print(str_fmt.format("", "", ""))
print("Report (" + self.sys_id + "):")
print("{:-<60}".format(""))
print(str_fmt.format(self.label,
"Measure", "Micro"))
print("{:-<60}".format(""))
print(str_fmt.format("Total ({} docs)".format(len(self.doc_ids)),
"Precision",
"{:.4}".format(mp)))
print(str_fmt.format("",
"Recall",
"{:.4}".format(mr)))
print(str_fmt.format("",
"F1",
"{:.4}".format(Evaluate.F_beta(mr, mp))))
print("{:-<60}".format(""))
print("\n")
def print_docs(self):
print("\n")
print("Report ({}):".format(self.sys_id))
print("{:-<60}".format(""))
print("{:<35}{:<15}{:<20}".format("Document ID", "Measure", "Micro"))
print("{:-<60}".format(""))
self._print_docs()
def print_report(self, verbose=False):
self.verbose = verbose
if verbose:
self.print_docs()
self._print_summary()
class EvaluateSubtrack1(Evaluate):
"""Class for running the NER evaluation."""
def __init__(self, sys_sas, gs_sas):
self.tp = []
self.fp = []
self.fn = []
self.num_sentences = []
self.doc_ids = []
self.verbose = False
self.sys_id = sys_sas[list(sys_sas.keys())[0]].sys_id
self.label = "Subtrack 1 [NER]"
for doc_id in sorted(list(set(sys_sas.keys()) & set(gs_sas.keys()))):
gold = set(self.get_tagset_ner(gs_sas[doc_id]))
sys = set(self.get_tagset_ner(sys_sas[doc_id]))
num_sentences = self.get_num_sentences(sys_sas[doc_id])
self.tp.append(gold.intersection(sys))
self.fp.append(sys - gold)
self.fn.append(gold - sys)
self.num_sentences.append(num_sentences)
self.doc_ids.append(doc_id)
@staticmethod
def get_num_sentences(annotation):
return annotation.get_number_sentences()
@staticmethod
def leak_score(fn, num_sentences):
try:
return float(len(fn) / num_sentences)
except ZeroDivisionError:
return 0.0
except TypeError:
return "NA"
def micro_leak(self):
try:
return float(sum([len(t) for t in self.fn]) / sum(t for t in self.num_sentences))
except ZeroDivisionError:
return 0.0
except TypeError:
return "NA"
def _print_docs(self):
for i, doc_id in enumerate(self.doc_ids):
mp = EvaluateSubtrack1.precision(self.tp[i], self.fp[i])
mr = EvaluateSubtrack1.recall(self.tp[i], self.fn[i])
leak = EvaluateSubtrack1.leak_score(self.fn[i], self.num_sentences[i])
str_fmt = "{:<35}{:<15}{:<20}"
print(str_fmt.format(doc_id,
"Leak",
"{:.4}".format(leak)))
print(str_fmt.format("",
"Precision",
"{:.4}".format(mp)))
print(str_fmt.format("",
"Recall",
"{:.4}".format(mr)))
print(str_fmt.format("",
"F1",
"{:.4}".format(Evaluate.F_beta(mp, mr))))
print("{:-<60}".format(""))
def _print_summary(self):
mp = self.micro_precision()
mr = self.micro_recall()
ml = self.micro_leak()
str_fmt = "{:<35}{:<15}{:<20}"
print(str_fmt.format("", "", ""))
print("Report (" + self.sys_id + "):")
print("{:-<60}".format(""))
print(str_fmt.format(self.label,
"Measure", "Micro"))
print("{:-<60}".format(""))
print(str_fmt.format("Total ({} docs)".format(len(self.doc_ids)),
"Leak",
"{:.4}".format(ml)))
print(str_fmt.format("",
"Precision",
"{:.4}".format(mp)))
print(str_fmt.format("",
"Recall",
"{:.4}".format(mr)))
print(str_fmt.format("",
"F1",
"{:.4}".format(Evaluate.F_beta(mr, mp))))
print("{:-<60}".format(""))
print("\n")
class EvaluateSubtrack2(Evaluate):
"""Class for running the SPAN evaluation with strict span mode."""
def __init__(self, sys_sas, gs_sas):
self.tp = []
self.fp = []
self.fn = []
self.doc_ids = []
self.verbose = False
self.sys_id = sys_sas[list(sys_sas.keys())[0]].sys_id
self.label = "Subtrack 2 [strict]"
for doc_id in sorted(list(set(sys_sas.keys()) & set(gs_sas.keys()))):
gold = set(self.get_tagset_span(gs_sas[doc_id]))
sys = set(self.get_tagset_span(sys_sas[doc_id]))
self.tp.append(gold.intersection(sys))
self.fp.append(sys - gold)
self.fn.append(gold - sys)
self.doc_ids.append(doc_id)
class EvaluateSubtrack2merged(Evaluate):
"""Class for running the SPAN evaluation with merged spans mode."""
def __init__(self, sys_sas, gs_sas):
self.tp = []
self.fp = []
self.fn = []
self.doc_ids = []
self.verbose = False
self.sys_id = sys_sas[list(sys_sas.keys())[0]].sys_id
self.label = "Subtrack 2 [merged]"
for doc_id in sorted(list(set(sys_sas.keys()) & set(gs_sas.keys()))):
gold_strict = set(self.get_tagset_span(gs_sas[doc_id]))
sys_strict = set(self.get_tagset_span(sys_sas[doc_id]))
gold_merged = set(self.get_tagset_span_merged(gs_sas[doc_id]))
sys_merged = set(self.get_tagset_span_merged(sys_sas[doc_id]))
intersection = gold_strict.intersection(sys_strict).union(gold_merged.intersection(sys_merged))
fp = sys_strict - gold_strict
for tag in sys_strict:
if self.is_contained(tag, intersection):
if tag in fp:
fp.remove(tag)
fn = gold_strict - sys_strict
for tag in gold_strict:
if self.is_contained(tag, intersection):
if tag in fn:
fn.remove(tag)
self.tp.append(intersection)
self.fp.append(fp)
self.fn.append(fn)
self.doc_ids.append(doc_id)
class MeddocanEvaluation(object):
"""Base class for running the evaluations."""
def __init__(self):
self.evaluations = []
def add_eval(self, e, label=""):
e.sys_id = "SYSTEM: " + e.sys_id
e.label = label
self.evaluations.append(e)
def print_docs(self):
for e in self.evaluations:
e.print_docs()
def print_report(self, verbose=False):
for e in self.evaluations:
e.print_report(verbose=verbose)
class NER_Evaluation(MeddocanEvaluation):
"""Class for running the NER evaluation (Subtrack 1)."""
def __init__(self, annotator_cas, gold_cas, **kwargs):
self.evaluations = []
# Basic Evaluation
self.add_eval(EvaluateSubtrack1(annotator_cas, gold_cas, **kwargs),
label="SubTrack 1 [NER]")
class Span_Evaluation(MeddocanEvaluation):
"""Class for running the SPAN evaluation (Subtrack 2). Calls to 'strict'
and 'merged' evaluations. """
def __init__(self, annotator_cas, gold_cas, **kwargs):
self.evaluations = []
self.add_eval(EvaluateSubtrack2(annotator_cas, gold_cas, **kwargs),
label="SubTrack 2 [strict]")
self.add_eval(EvaluateSubtrack2merged(annotator_cas, gold_cas, **kwargs),
label="SubTrack 2 [merged]")
| 31.259067
| 107
| 0.513012
|
744bf599b43629f30763f92f76315f4c19e53451
| 464
|
py
|
Python
|
inheritance/exercise/project_need_for_speed/test.py
|
ivan-yosifov88/python_oop
|
82b210e427cb80dbab3b9a5c3fceab431ee60164
|
[
"MIT"
] | 1
|
2021-05-21T20:28:55.000Z
|
2021-05-21T20:28:55.000Z
|
inheritance/exercise/project_need_for_speed/test.py
|
ivan-yosifov88/python_oop
|
82b210e427cb80dbab3b9a5c3fceab431ee60164
|
[
"MIT"
] | null | null | null |
inheritance/exercise/project_need_for_speed/test.py
|
ivan-yosifov88/python_oop
|
82b210e427cb80dbab3b9a5c3fceab431ee60164
|
[
"MIT"
] | null | null | null |
from project_need_for_speed.family_car import FamilyCar
from project_need_for_speed.vehicle import Vehicle
vehicle = Vehicle(50, 150)
print(Vehicle.DEFAULT_FUEL_CONSUMPTION)
print(vehicle.fuel)
print(vehicle.horse_power)
print(vehicle.fuel_consumption)
vehicle.drive(100)
print(vehicle.fuel)
family_car = FamilyCar(150, 150)
family_car.drive(50)
print(family_car.fuel)
family_car.drive(50)
print(family_car.fuel)
print(family_car.__class__.__bases__[0].__name__)
| 27.294118
| 55
| 0.838362
|
abb3d1683099db0eee0147fc2d972069068fdf02
| 1,062
|
py
|
Python
|
scrapcore/validator_config.py
|
lantip/SerpScrap
|
b7de07b8ca826bcd941ef89455147c79ed1764bd
|
[
"MIT"
] | 199
|
2017-04-02T00:43:26.000Z
|
2022-03-21T09:10:08.000Z
|
scrapcore/validator_config.py
|
lantip/SerpScrap
|
b7de07b8ca826bcd941ef89455147c79ed1764bd
|
[
"MIT"
] | 49
|
2017-04-11T15:03:14.000Z
|
2022-03-08T18:41:21.000Z
|
scrapcore/validator_config.py
|
lantip/SerpScrap
|
b7de07b8ca826bcd941ef89455147c79ed1764bd
|
[
"MIT"
] | 65
|
2017-05-21T04:03:31.000Z
|
2022-03-23T09:11:41.000Z
|
# -*- coding: utf-8 -*-
from scrapcore.tools import ConfigurationError as Error
class ValidatorConfig():
def validate(self, config):
if not isinstance(config, dict):
raise Error('config is not a dict')
if config.get('num_results_per_page') > 100:
raise Error('num_results_per_page must be lower then 100')
valid_search_types = ['normal', 'video', 'news', 'image']
if config.get('search_type') not in valid_search_types:
raise Error('Invalid search type!')
if config.get('use_own_ip') != True and len(config.get('proxy_file')) == 0:
raise Error('No proxy_file provided and using own IP is disabled.')
if config.get('scrape_method') not in ('selenium'):
raise Error('No such scrape_method {}'.format(config.get('scrape_method')))
if config.get('screenshot') is True and \
(config.get('dir_screenshot') is None or
len(config.get('dir_screenshot')) < 1):
raise Error('No config dir_screenshot found')
| 37.928571
| 87
| 0.627119
|
3b1e23159f009a4648f2a992ec9bf4208e78909e
| 69
|
py
|
Python
|
binrelay/__init__.py
|
nathanjackson/binrelay
|
20420e58e7188723916e7b69cdfad8642dd36134
|
[
"MIT"
] | 4
|
2019-09-06T11:55:10.000Z
|
2021-09-03T11:37:22.000Z
|
binrelay/__init__.py
|
nathanjackson/binrelay
|
20420e58e7188723916e7b69cdfad8642dd36134
|
[
"MIT"
] | null | null | null |
binrelay/__init__.py
|
nathanjackson/binrelay
|
20420e58e7188723916e7b69cdfad8642dd36134
|
[
"MIT"
] | null | null | null |
from .tep import *
from .utils import *
from .race_analysis import *
| 17.25
| 28
| 0.73913
|
91576b2be5d991a435d2f84c1d8b92a4a6657a81
| 1,342
|
py
|
Python
|
clustering.py
|
guipaiva/K-means
|
90f1fe2896e945b173fc7788b7cf4c47feb58a99
|
[
"MIT"
] | null | null | null |
clustering.py
|
guipaiva/K-means
|
90f1fe2896e945b173fc7788b7cf4c47feb58a99
|
[
"MIT"
] | null | null | null |
clustering.py
|
guipaiva/K-means
|
90f1fe2896e945b173fc7788b7cf4c47feb58a99
|
[
"MIT"
] | null | null | null |
import numpy as np
from numpy import linalg as la
from read import *
#Calcula a norma (distancia euclidiana) entre dois arrays
#axis = 1 significa que será utilizado o eixo horizontal para cálculo, considerando elementos de mesma linha
def distance(a,b, ax = 1):
return la.norm((a-b),axis = ax)
#inicia a lista de distancias como zero, no formato [80]X[3], neste caso
distances = np.zeros((size,k))
#Lista para histórico de posições de centroides
c_history = []
#Posição atual da centroide
curr_c = centroids
c_history.append(curr_c)
#Variável de controle para critério de parada do programa
error = 1
#Critério de parada = 0
while error > 0:
#Calcula a distancia entre as centroides e todos os pontos
for i in range(size):
distances[i] = distance(data[i],centroids)
#Define as posições onde as distances são menores para cada centroide
nearest = np.argmin(distances, axis = 1)
#Atualiza o centróide atual para a média das posições onde a distancia é menor
for i in range(k):
curr_c[i] = np.mean(data[nearest == i], axis = 0)
#adiciona ao histórico as posições atuais da centroides
c_history.append(curr_c)
'''Calcula a movimentação da centroide baseado na distância entre
a centroide atual e sua ultima posição'''
error = distance(curr_c, c_history[-1], None)
c_history = np.array(c_history, dtype='f')
| 28.553191
| 108
| 0.742921
|
8b3e6cd9aa8cd6344238677acefe6d82a047b07c
| 3,418
|
py
|
Python
|
app/app/settings.py
|
nagasaichandra/recipe-app-api
|
67ac05687b9ac2b6726d02b6f615566712f2c75a
|
[
"MIT"
] | null | null | null |
app/app/settings.py
|
nagasaichandra/recipe-app-api
|
67ac05687b9ac2b6726d02b6f615566712f2c75a
|
[
"MIT"
] | null | null | null |
app/app/settings.py
|
nagasaichandra/recipe-app-api
|
67ac05687b9ac2b6726d02b6f615566712f2c75a
|
[
"MIT"
] | null | null | null |
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 3.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-=vio63j(86f$hnc4fdswa3b4*!!85^zknn^c+%lp@m&0c%gt33'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS')
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'core.User'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 25.893939
| 91
| 0.694851
|
a77b204691c378e4ffe2fdd08bd1529782d6a1cf
| 19,696
|
py
|
Python
|
exps/NAS-Bench-201/statistics.py
|
EM-AutoML/AutoDL-Projects
|
8ff416fe5d6cb1b310b885fe376e6f2790fbda14
|
[
"MIT"
] | null | null | null |
exps/NAS-Bench-201/statistics.py
|
EM-AutoML/AutoDL-Projects
|
8ff416fe5d6cb1b310b885fe376e6f2790fbda14
|
[
"MIT"
] | null | null | null |
exps/NAS-Bench-201/statistics.py
|
EM-AutoML/AutoDL-Projects
|
8ff416fe5d6cb1b310b885fe376e6f2790fbda14
|
[
"MIT"
] | null | null | null |
#####################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.08 #
#####################################################
import os, sys, time, argparse, collections
from copy import deepcopy
import torch
import torch.nn as nn
from pathlib import Path
from collections import defaultdict
lib_dir = (Path(__file__).parent / '..' / '..' / 'lib').resolve()
if str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir))
from log_utils import AverageMeter, time_string, convert_secs2time
from config_utils import load_config, dict2config
from datasets import get_datasets
# NAS-Bench-201 related module or function
from models import CellStructure, get_cell_based_tiny_net
from nas_201_api import ArchResults, ResultsCount
from functions import pure_evaluate
def create_result_count(used_seed, dataset, arch_config, results, dataloader_dict):
xresult = ResultsCount(dataset, results['net_state_dict'], results['train_acc1es'], results['train_losses'], \
results['param'], results['flop'], arch_config, used_seed, results['total_epoch'], None)
net_config = dict2config({'name': 'infer.tiny', 'C': arch_config['channel'], 'N': arch_config['num_cells'], 'genotype': CellStructure.str2structure(arch_config['arch_str']), 'num_classes':arch_config['class_num']}, None)
network = get_cell_based_tiny_net(net_config)
network.load_state_dict(xresult.get_net_param())
if 'train_times' in results: # new version
xresult.update_train_info(results['train_acc1es'], results['train_acc5es'], results['train_losses'], results['train_times'])
xresult.update_eval(results['valid_acc1es'], results['valid_losses'], results['valid_times'])
else:
if dataset == 'cifar10-valid':
xresult.update_OLD_eval('x-valid' , results['valid_acc1es'], results['valid_losses'])
loss, top1, top5, latencies = pure_evaluate(dataloader_dict['{:}@{:}'.format('cifar10', 'test')], network.cuda())
xresult.update_OLD_eval('ori-test', {results['total_epoch']-1: top1}, {results['total_epoch']-1: loss})
xresult.update_latency(latencies)
elif dataset == 'cifar10':
xresult.update_OLD_eval('ori-test', results['valid_acc1es'], results['valid_losses'])
loss, top1, top5, latencies = pure_evaluate(dataloader_dict['{:}@{:}'.format(dataset, 'test')], network.cuda())
xresult.update_latency(latencies)
elif dataset == 'cifar100' or dataset == 'ImageNet16-120':
xresult.update_OLD_eval('ori-test', results['valid_acc1es'], results['valid_losses'])
loss, top1, top5, latencies = pure_evaluate(dataloader_dict['{:}@{:}'.format(dataset, 'valid')], network.cuda())
xresult.update_OLD_eval('x-valid', {results['total_epoch']-1: top1}, {results['total_epoch']-1: loss})
loss, top1, top5, latencies = pure_evaluate(dataloader_dict['{:}@{:}'.format(dataset, 'test')], network.cuda())
xresult.update_OLD_eval('x-test' , {results['total_epoch']-1: top1}, {results['total_epoch']-1: loss})
xresult.update_latency(latencies)
else:
raise ValueError('invalid dataset name : {:}'.format(dataset))
return xresult
def account_one_arch(arch_index, arch_str, checkpoints, datasets, dataloader_dict):
information = ArchResults(arch_index, arch_str)
for checkpoint_path in checkpoints:
checkpoint = torch.load(checkpoint_path, map_location='cpu')
used_seed = checkpoint_path.name.split('-')[-1].split('.')[0]
for dataset in datasets:
assert dataset in checkpoint, 'Can not find {:} in arch-{:} from {:}'.format(dataset, arch_index, checkpoint_path)
results = checkpoint[dataset]
assert results['finish-train'], 'This {:} arch seed={:} does not finish train on {:} ::: {:}'.format(arch_index, used_seed, dataset, checkpoint_path)
arch_config = {'channel': results['channel'], 'num_cells': results['num_cells'], 'arch_str': arch_str, 'class_num': results['config']['class_num']}
xresult = create_result_count(used_seed, dataset, arch_config, results, dataloader_dict)
information.update(dataset, int(used_seed), xresult)
return information
def GET_DataLoaders(workers):
torch.set_num_threads(workers)
root_dir = (Path(__file__).parent / '..' / '..').resolve()
torch_dir = Path(os.environ['TORCH_HOME'])
# cifar
cifar_config_path = root_dir / 'configs' / 'nas-benchmark' / 'CIFAR.config'
cifar_config = load_config(cifar_config_path, None, None)
print ('{:} Create data-loader for all datasets'.format(time_string()))
print ('-'*200)
TRAIN_CIFAR10, VALID_CIFAR10, xshape, class_num = get_datasets('cifar10', str(torch_dir/'cifar.python'), -1)
print ('original CIFAR-10 : {:} training images and {:} test images : {:} input shape : {:} number of classes'.format(len(TRAIN_CIFAR10), len(VALID_CIFAR10), xshape, class_num))
cifar10_splits = load_config(root_dir / 'configs' / 'nas-benchmark' / 'cifar-split.txt', None, None)
assert cifar10_splits.train[:10] == [0, 5, 7, 11, 13, 15, 16, 17, 20, 24] and cifar10_splits.valid[:10] == [1, 2, 3, 4, 6, 8, 9, 10, 12, 14]
temp_dataset = deepcopy(TRAIN_CIFAR10)
temp_dataset.transform = VALID_CIFAR10.transform
# data loader
trainval_cifar10_loader = torch.utils.data.DataLoader(TRAIN_CIFAR10, batch_size=cifar_config.batch_size, shuffle=True , num_workers=workers, pin_memory=True)
train_cifar10_loader = torch.utils.data.DataLoader(TRAIN_CIFAR10, batch_size=cifar_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(cifar10_splits.train), num_workers=workers, pin_memory=True)
valid_cifar10_loader = torch.utils.data.DataLoader(temp_dataset , batch_size=cifar_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(cifar10_splits.valid), num_workers=workers, pin_memory=True)
test__cifar10_loader = torch.utils.data.DataLoader(VALID_CIFAR10, batch_size=cifar_config.batch_size, shuffle=False, num_workers=workers, pin_memory=True)
print ('CIFAR-10 : trval-loader has {:3d} batch with {:} per batch'.format(len(trainval_cifar10_loader), cifar_config.batch_size))
print ('CIFAR-10 : train-loader has {:3d} batch with {:} per batch'.format(len(train_cifar10_loader), cifar_config.batch_size))
print ('CIFAR-10 : valid-loader has {:3d} batch with {:} per batch'.format(len(valid_cifar10_loader), cifar_config.batch_size))
print ('CIFAR-10 : test--loader has {:3d} batch with {:} per batch'.format(len(test__cifar10_loader), cifar_config.batch_size))
print ('-'*200)
# CIFAR-100
TRAIN_CIFAR100, VALID_CIFAR100, xshape, class_num = get_datasets('cifar100', str(torch_dir/'cifar.python'), -1)
print ('original CIFAR-100: {:} training images and {:} test images : {:} input shape : {:} number of classes'.format(len(TRAIN_CIFAR100), len(VALID_CIFAR100), xshape, class_num))
cifar100_splits = load_config(root_dir / 'configs' / 'nas-benchmark' / 'cifar100-test-split.txt', None, None)
assert cifar100_splits.xvalid[:10] == [1, 3, 4, 5, 8, 10, 13, 14, 15, 16] and cifar100_splits.xtest[:10] == [0, 2, 6, 7, 9, 11, 12, 17, 20, 24]
train_cifar100_loader = torch.utils.data.DataLoader(TRAIN_CIFAR100, batch_size=cifar_config.batch_size, shuffle=True, num_workers=workers, pin_memory=True)
valid_cifar100_loader = torch.utils.data.DataLoader(VALID_CIFAR100, batch_size=cifar_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(cifar100_splits.xvalid), num_workers=workers, pin_memory=True)
test__cifar100_loader = torch.utils.data.DataLoader(VALID_CIFAR100, batch_size=cifar_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(cifar100_splits.xtest) , num_workers=workers, pin_memory=True)
print ('CIFAR-100 : train-loader has {:3d} batch'.format(len(train_cifar100_loader)))
print ('CIFAR-100 : valid-loader has {:3d} batch'.format(len(valid_cifar100_loader)))
print ('CIFAR-100 : test--loader has {:3d} batch'.format(len(test__cifar100_loader)))
print ('-'*200)
imagenet16_config_path = 'configs/nas-benchmark/ImageNet-16.config'
imagenet16_config = load_config(imagenet16_config_path, None, None)
TRAIN_ImageNet16_120, VALID_ImageNet16_120, xshape, class_num = get_datasets('ImageNet16-120', str(torch_dir/'cifar.python'/'ImageNet16'), -1)
print ('original TRAIN_ImageNet16_120: {:} training images and {:} test images : {:} input shape : {:} number of classes'.format(len(TRAIN_ImageNet16_120), len(VALID_ImageNet16_120), xshape, class_num))
imagenet_splits = load_config(root_dir / 'configs' / 'nas-benchmark' / 'imagenet-16-120-test-split.txt', None, None)
assert imagenet_splits.xvalid[:10] == [1, 2, 3, 6, 7, 8, 9, 12, 16, 18] and imagenet_splits.xtest[:10] == [0, 4, 5, 10, 11, 13, 14, 15, 17, 20]
train_imagenet_loader = torch.utils.data.DataLoader(TRAIN_ImageNet16_120, batch_size=imagenet16_config.batch_size, shuffle=True, num_workers=workers, pin_memory=True)
valid_imagenet_loader = torch.utils.data.DataLoader(VALID_ImageNet16_120, batch_size=imagenet16_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(imagenet_splits.xvalid), num_workers=workers, pin_memory=True)
test__imagenet_loader = torch.utils.data.DataLoader(VALID_ImageNet16_120, batch_size=imagenet16_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(imagenet_splits.xtest) , num_workers=workers, pin_memory=True)
print ('ImageNet-16-120 : train-loader has {:3d} batch with {:} per batch'.format(len(train_imagenet_loader), imagenet16_config.batch_size))
print ('ImageNet-16-120 : valid-loader has {:3d} batch with {:} per batch'.format(len(valid_imagenet_loader), imagenet16_config.batch_size))
print ('ImageNet-16-120 : test--loader has {:3d} batch with {:} per batch'.format(len(test__imagenet_loader), imagenet16_config.batch_size))
# 'cifar10', 'cifar100', 'ImageNet16-120'
loaders = {'cifar10@trainval': trainval_cifar10_loader,
'cifar10@train' : train_cifar10_loader,
'cifar10@valid' : valid_cifar10_loader,
'cifar10@test' : test__cifar10_loader,
'cifar100@train' : train_cifar100_loader,
'cifar100@valid' : valid_cifar100_loader,
'cifar100@test' : test__cifar100_loader,
'ImageNet16-120@train': train_imagenet_loader,
'ImageNet16-120@valid': valid_imagenet_loader,
'ImageNet16-120@test' : test__imagenet_loader}
return loaders
def simplify(save_dir, meta_file, basestr, target_dir):
meta_infos = torch.load(meta_file, map_location='cpu')
meta_archs = meta_infos['archs'] # a list of architecture strings
meta_num_archs = meta_infos['total']
meta_max_node = meta_infos['max_node']
assert meta_num_archs == len(meta_archs), 'invalid number of archs : {:} vs {:}'.format(meta_num_archs, len(meta_archs))
sub_model_dirs = sorted(list(save_dir.glob('*-*-{:}'.format(basestr))))
print ('{:} find {:} directories used to save checkpoints'.format(time_string(), len(sub_model_dirs)))
subdir2archs, num_evaluated_arch = collections.OrderedDict(), 0
num_seeds = defaultdict(lambda: 0)
for index, sub_dir in enumerate(sub_model_dirs):
xcheckpoints = list(sub_dir.glob('arch-*-seed-*.pth'))
arch_indexes = set()
for checkpoint in xcheckpoints:
temp_names = checkpoint.name.split('-')
assert len(temp_names) == 4 and temp_names[0] == 'arch' and temp_names[2] == 'seed', 'invalid checkpoint name : {:}'.format(checkpoint.name)
arch_indexes.add( temp_names[1] )
subdir2archs[sub_dir] = sorted(list(arch_indexes))
num_evaluated_arch += len(arch_indexes)
# count number of seeds for each architecture
for arch_index in arch_indexes:
num_seeds[ len(list(sub_dir.glob('arch-{:}-seed-*.pth'.format(arch_index)))) ] += 1
print('{:} There are {:5d} architectures that have been evaluated ({:} in total).'.format(time_string(), num_evaluated_arch, meta_num_archs))
for key in sorted( list( num_seeds.keys() ) ): print ('{:} There are {:5d} architectures that are evaluated {:} times.'.format(time_string(), num_seeds[key], key))
dataloader_dict = GET_DataLoaders( 6 )
to_save_simply = save_dir / 'simplifies'
to_save_allarc = save_dir / 'simplifies' / 'architectures'
if not to_save_simply.exists(): to_save_simply.mkdir(parents=True, exist_ok=True)
if not to_save_allarc.exists(): to_save_allarc.mkdir(parents=True, exist_ok=True)
assert (save_dir / target_dir) in subdir2archs, 'can not find {:}'.format(target_dir)
arch2infos, datasets = {}, ('cifar10-valid', 'cifar10', 'cifar100', 'ImageNet16-120')
evaluated_indexes = set()
target_directory = save_dir / target_dir
target_less_dir = save_dir / '{:}-LESS'.format(target_dir)
arch_indexes = subdir2archs[ target_directory ]
num_seeds = defaultdict(lambda: 0)
end_time = time.time()
arch_time = AverageMeter()
for idx, arch_index in enumerate(arch_indexes):
checkpoints = list(target_directory.glob('arch-{:}-seed-*.pth'.format(arch_index)))
ckps_less = list(target_less_dir.glob('arch-{:}-seed-*.pth'.format(arch_index)))
# create the arch info for each architecture
try:
arch_info_full = account_one_arch(arch_index, meta_archs[int(arch_index)], checkpoints, datasets, dataloader_dict)
arch_info_less = account_one_arch(arch_index, meta_archs[int(arch_index)], ckps_less, ['cifar10-valid'], dataloader_dict)
num_seeds[ len(checkpoints) ] += 1
except:
print('Loading {:} failed, : {:}'.format(arch_index, checkpoints))
continue
assert int(arch_index) not in evaluated_indexes, 'conflict arch-index : {:}'.format(arch_index)
assert 0 <= int(arch_index) < len(meta_archs), 'invalid arch-index {:} (not found in meta_archs)'.format(arch_index)
arch_info = {'full': arch_info_full, 'less': arch_info_less}
evaluated_indexes.add( int(arch_index) )
arch2infos[int(arch_index)] = arch_info
torch.save({'full': arch_info_full.state_dict(),
'less': arch_info_less.state_dict()}, to_save_allarc / '{:}-FULL.pth'.format(arch_index))
arch_info['full'].clear_params()
arch_info['less'].clear_params()
torch.save({'full': arch_info_full.state_dict(),
'less': arch_info_less.state_dict()}, to_save_allarc / '{:}-SIMPLE.pth'.format(arch_index))
# measure elapsed time
arch_time.update(time.time() - end_time)
end_time = time.time()
need_time = '{:}'.format( convert_secs2time(arch_time.avg * (len(arch_indexes)-idx-1), True) )
print('{:} {:} [{:03d}/{:03d}] : {:} still need {:}'.format(time_string(), target_dir, idx, len(arch_indexes), arch_index, need_time))
# measure time
xstrs = ['{:}:{:03d}'.format(key, num_seeds[key]) for key in sorted( list( num_seeds.keys() ) ) ]
print('{:} {:} done : {:}'.format(time_string(), target_dir, xstrs))
final_infos = {'meta_archs' : meta_archs,
'total_archs': meta_num_archs,
'basestr' : basestr,
'arch2infos' : arch2infos,
'evaluated_indexes': evaluated_indexes}
save_file_name = to_save_simply / '{:}.pth'.format(target_dir)
torch.save(final_infos, save_file_name)
print ('Save {:} / {:} architecture results into {:}.'.format(len(evaluated_indexes), meta_num_archs, save_file_name))
def merge_all(save_dir, meta_file, basestr):
meta_infos = torch.load(meta_file, map_location='cpu')
meta_archs = meta_infos['archs']
meta_num_archs = meta_infos['total']
meta_max_node = meta_infos['max_node']
assert meta_num_archs == len(meta_archs), 'invalid number of archs : {:} vs {:}'.format(meta_num_archs, len(meta_archs))
sub_model_dirs = sorted(list(save_dir.glob('*-*-{:}'.format(basestr))))
print ('{:} find {:} directories used to save checkpoints'.format(time_string(), len(sub_model_dirs)))
for index, sub_dir in enumerate(sub_model_dirs):
arch_info_files = sorted( list(sub_dir.glob('arch-*-seed-*.pth') ) )
print ('The {:02d}/{:02d}-th directory : {:} : {:} runs.'.format(index, len(sub_model_dirs), sub_dir, len(arch_info_files)))
arch2infos, evaluated_indexes = dict(), set()
for IDX, sub_dir in enumerate(sub_model_dirs):
ckp_path = sub_dir.parent / 'simplifies' / '{:}.pth'.format(sub_dir.name)
if ckp_path.exists():
sub_ckps = torch.load(ckp_path, map_location='cpu')
assert sub_ckps['total_archs'] == meta_num_archs and sub_ckps['basestr'] == basestr
xarch2infos = sub_ckps['arch2infos']
xevalindexs = sub_ckps['evaluated_indexes']
for eval_index in xevalindexs:
assert eval_index not in evaluated_indexes and eval_index not in arch2infos
#arch2infos[eval_index] = xarch2infos[eval_index].state_dict()
arch2infos[eval_index] = {'full': xarch2infos[eval_index]['full'].state_dict(),
'less': xarch2infos[eval_index]['less'].state_dict()}
evaluated_indexes.add( eval_index )
print ('{:} [{:03d}/{:03d}] merge data from {:} with {:} models.'.format(time_string(), IDX, len(sub_model_dirs), ckp_path, len(xevalindexs)))
else:
raise ValueError('Can not find {:}'.format(ckp_path))
#print ('{:} [{:03d}/{:03d}] can not find {:}, skip.'.format(time_string(), IDX, len(subdir2archs), ckp_path))
evaluated_indexes = sorted( list( evaluated_indexes ) )
print ('Finally, there are {:} architectures that have been trained and evaluated.'.format(len(evaluated_indexes)))
to_save_simply = save_dir / 'simplifies'
if not to_save_simply.exists(): to_save_simply.mkdir(parents=True, exist_ok=True)
final_infos = {'meta_archs' : meta_archs,
'total_archs': meta_num_archs,
'arch2infos' : arch2infos,
'evaluated_indexes': evaluated_indexes}
save_file_name = to_save_simply / '{:}-final-infos.pth'.format(basestr)
torch.save(final_infos, save_file_name)
print ('Save {:} / {:} architecture results into {:}.'.format(len(evaluated_indexes), meta_num_archs, save_file_name))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='NAS-BENCH-201', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--mode' , type=str, choices=['cal', 'merge'], help='The running mode for this script.')
parser.add_argument('--base_save_dir', type=str, default='./output/NAS-BENCH-201-4', help='The base-name of folder to save checkpoints and log.')
parser.add_argument('--target_dir' , type=str, help='The target directory.')
parser.add_argument('--max_node' , type=int, default=4, help='The maximum node in a cell.')
parser.add_argument('--channel' , type=int, default=16, help='The number of channels.')
parser.add_argument('--num_cells' , type=int, default=5, help='The number of cells in one stage.')
args = parser.parse_args()
save_dir = Path( args.base_save_dir )
meta_path = save_dir / 'meta-node-{:}.pth'.format(args.max_node)
assert save_dir.exists(), 'invalid save dir path : {:}'.format(save_dir)
assert meta_path.exists(), 'invalid saved meta path : {:}'.format(meta_path)
print ('start the statistics of our nas-benchmark from {:} using {:}.'.format(save_dir, args.target_dir))
basestr = 'C{:}-N{:}'.format(args.channel, args.num_cells)
if args.mode == 'cal':
simplify(save_dir, meta_path, basestr, args.target_dir)
elif args.mode == 'merge':
merge_all(save_dir, meta_path, basestr)
else:
raise ValueError('invalid mode : {:}'.format(args.mode))
| 66.540541
| 232
| 0.701615
|
32b39b0725e1e0e115d097c18b63af2b780c0ccd
| 333
|
py
|
Python
|
project/asylum/management/commands/generate_all.py
|
jssmk/asylum
|
004b05939784b86ba559968a7cdcedf248edb01f
|
[
"MIT"
] | 1
|
2017-04-08T21:31:37.000Z
|
2017-04-08T21:31:37.000Z
|
project/asylum/management/commands/generate_all.py
|
jssmk/asylum
|
004b05939784b86ba559968a7cdcedf248edb01f
|
[
"MIT"
] | 9
|
2016-01-23T22:40:26.000Z
|
2021-09-13T17:44:11.000Z
|
project/asylum/management/commands/generate_all.py
|
jssmk/asylum
|
004b05939784b86ba559968a7cdcedf248edb01f
|
[
"MIT"
] | 1
|
2017-04-08T22:13:42.000Z
|
2017-04-08T22:13:42.000Z
|
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from asylum.tests.fixtures.full import generate_all
class Command(BaseCommand):
help = 'Generates full set of test data'
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
generate_all()
| 22.2
| 65
| 0.693694
|
744cc94c179c6ab0a795865193e59b39ae081e7c
| 8,493
|
py
|
Python
|
backbone/GhostNet.py
|
weihaoxie/FaceX-Zoo
|
db0b087e4f4d28152e172d6c8d3767a8870733b4
|
[
"Apache-2.0"
] | 1,329
|
2021-01-13T07:06:30.000Z
|
2022-03-31T07:23:39.000Z
|
backbone/GhostNet.py
|
weihaoxie/FaceX-Zoo
|
db0b087e4f4d28152e172d6c8d3767a8870733b4
|
[
"Apache-2.0"
] | 115
|
2021-01-13T10:42:57.000Z
|
2022-03-28T03:57:52.000Z
|
backbone/GhostNet.py
|
weihaoxie/FaceX-Zoo
|
db0b087e4f4d28152e172d6c8d3767a8870733b4
|
[
"Apache-2.0"
] | 351
|
2021-01-13T07:21:00.000Z
|
2022-03-29T14:11:39.000Z
|
"""
@author: Jun Wang
@date: 20210121
@contact: jun21wangustc@gmail.com
"""
# based on:
# https://github.com/huawei-noah/ghostnet/blob/master/ghostnet_pytorch/ghostnet.py
# 2020.06.09-Changed for building GhostNet
# Huawei Technologies Co., Ltd. <foss@huawei.com>
"""
Creates a GhostNet Model as defined in:
GhostNet: More Features from Cheap Operations By Kai Han, Yunhe Wang, Qi Tian, Jianyuan Guo, Chunjing Xu, Chang Xu.
https://arxiv.org/abs/1911.11907
Modified from https://github.com/d-li14/mobilenetv3.pytorch and https://github.com/rwightman/pytorch-image-models
"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Sequential, BatchNorm2d, Dropout, Module, Linear, BatchNorm1d
__all__ = ['ghost_net']
class Flatten(Module):
def forward(self, input):
return input.view(input.size(0), -1)
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def hard_sigmoid(x, inplace: bool = False):
if inplace:
return x.add_(3.).clamp_(0., 6.).div_(6.)
else:
return F.relu6(x + 3.) / 6.
class SqueezeExcite(nn.Module):
def __init__(self, in_chs, se_ratio=0.25, reduced_base_chs=None,
act_layer=nn.ReLU, gate_fn=hard_sigmoid, divisor=4, **_):
super(SqueezeExcite, self).__init__()
self.gate_fn = gate_fn
reduced_chs = _make_divisible((reduced_base_chs or in_chs) * se_ratio, divisor)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True)
self.act1 = act_layer(inplace=True)
self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True)
def forward(self, x):
x_se = self.avg_pool(x)
x_se = self.conv_reduce(x_se)
x_se = self.act1(x_se)
x_se = self.conv_expand(x_se)
x = x * self.gate_fn(x_se)
return x
class ConvBnAct(nn.Module):
def __init__(self, in_chs, out_chs, kernel_size,
stride=1, act_layer=nn.ReLU):
super(ConvBnAct, self).__init__()
self.conv = nn.Conv2d(in_chs, out_chs, kernel_size, stride, kernel_size//2, bias=False)
self.bn1 = nn.BatchNorm2d(out_chs)
self.act1 = act_layer(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn1(x)
x = self.act1(x)
return x
class GhostModule(nn.Module):
def __init__(self, inp, oup, kernel_size=1, ratio=2, dw_size=3, stride=1, relu=True):
super(GhostModule, self).__init__()
self.oup = oup
init_channels = math.ceil(oup / ratio)
new_channels = init_channels*(ratio-1)
self.primary_conv = nn.Sequential(
nn.Conv2d(inp, init_channels, kernel_size, stride, kernel_size//2, bias=False),
nn.BatchNorm2d(init_channels),
nn.ReLU(inplace=True) if relu else nn.Sequential(),
)
self.cheap_operation = nn.Sequential(
nn.Conv2d(init_channels, new_channels, dw_size, 1, dw_size//2, groups=init_channels, bias=False),
nn.BatchNorm2d(new_channels),
nn.ReLU(inplace=True) if relu else nn.Sequential(),
)
def forward(self, x):
x1 = self.primary_conv(x)
x2 = self.cheap_operation(x1)
out = torch.cat([x1,x2], dim=1)
return out[:,:self.oup,:,:]
class GhostBottleneck(nn.Module):
""" Ghost bottleneck w/ optional SE"""
def __init__(self, in_chs, mid_chs, out_chs, dw_kernel_size=3,
stride=1, act_layer=nn.ReLU, se_ratio=0.):
super(GhostBottleneck, self).__init__()
has_se = se_ratio is not None and se_ratio > 0.
self.stride = stride
# Point-wise expansion
self.ghost1 = GhostModule(in_chs, mid_chs, relu=True)
# Depth-wise convolution
if self.stride > 1:
self.conv_dw = nn.Conv2d(mid_chs, mid_chs, dw_kernel_size, stride=stride,
padding=(dw_kernel_size-1)//2,
groups=mid_chs, bias=False)
self.bn_dw = nn.BatchNorm2d(mid_chs)
# Squeeze-and-excitation
if has_se:
self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio)
else:
self.se = None
# Point-wise linear projection
self.ghost2 = GhostModule(mid_chs, out_chs, relu=False)
# shortcut
if (in_chs == out_chs and self.stride == 1):
self.shortcut = nn.Sequential()
else:
self.shortcut = nn.Sequential(
nn.Conv2d(in_chs, in_chs, dw_kernel_size, stride=stride,
padding=(dw_kernel_size-1)//2, groups=in_chs, bias=False),
nn.BatchNorm2d(in_chs),
nn.Conv2d(in_chs, out_chs, 1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_chs),
)
def forward(self, x):
residual = x
# 1st ghost bottleneck
x = self.ghost1(x)
# Depth-wise convolution
if self.stride > 1:
x = self.conv_dw(x)
x = self.bn_dw(x)
# Squeeze-and-excitation
if self.se is not None:
x = self.se(x)
# 2nd ghost bottleneck
x = self.ghost2(x)
x += self.shortcut(residual)
return x
class GhostNet(nn.Module):
def __init__(self, width=1.0, drop_ratio=0.2, feat_dim=512, out_h=7, out_w=7):
super(GhostNet, self).__init__()
# setting of inverted residual blocks
self.cfgs = [
# k, t, c, SE, s
# stage1
[[3, 16, 16, 0, 1]],
# stage2
[[3, 48, 24, 0, 2]],
[[3, 72, 24, 0, 1]],
# stage3
[[5, 72, 40, 0.25, 2]],
[[5, 120, 40, 0.25, 1]],
# stage4
[[3, 240, 80, 0, 2]],
[[3, 200, 80, 0, 1],
[3, 184, 80, 0, 1],
[3, 184, 80, 0, 1],
[3, 480, 112, 0.25, 1],
[3, 672, 112, 0.25, 1]
],
# stage5
[[5, 672, 160, 0.25, 2]],
[[5, 960, 160, 0, 1],
[5, 960, 160, 0.25, 1],
[5, 960, 160, 0, 1],
[5, 960, 160, 0.25, 1]
]
]
# building first layer
output_channel = _make_divisible(16 * width, 4)
#self.conv_stem = nn.Conv2d(3, output_channel, 3, 2, 1, bias=False)
self.conv_stem = nn.Conv2d(3, output_channel, 3, 1, 1, bias=False)
self.bn1 = nn.BatchNorm2d(output_channel)
self.act1 = nn.ReLU(inplace=True)
input_channel = output_channel
# building inverted residual blocks
stages = []
block = GhostBottleneck
for cfg in self.cfgs:
layers = []
for k, exp_size, c, se_ratio, s in cfg:
output_channel = _make_divisible(c * width, 4)
hidden_channel = _make_divisible(exp_size * width, 4)
layers.append(block(input_channel, hidden_channel, output_channel, k, s,
se_ratio=se_ratio))
input_channel = output_channel
stages.append(nn.Sequential(*layers))
output_channel = _make_divisible(exp_size * width, 4)
stages.append(nn.Sequential(ConvBnAct(input_channel, output_channel, 1)))
input_channel = output_channel
self.blocks = nn.Sequential(*stages)
self.output_layer = Sequential(BatchNorm2d(960),
Dropout(drop_ratio),
Flatten(),
Linear(960 * out_h * out_w, feat_dim), # for eye
BatchNorm1d(feat_dim))
def forward(self, x):
x = self.conv_stem(x)
x = self.bn1(x)
x = self.act1(x)
x = self.blocks(x)
x = self.output_layer(x)
return x
| 34.108434
| 115
| 0.568115
|
9d68783df9be16528f599c9aac1a828e21508baf
| 1,049
|
py
|
Python
|
setup.py
|
sfreund-DLR/fatpack
|
aa0d0570d69bf7f8cc5cb70b53a3fdb2357ed82a
|
[
"ISC"
] | 67
|
2019-03-19T11:17:36.000Z
|
2022-03-11T22:20:42.000Z
|
setup.py
|
sfreund-DLR/fatpack
|
aa0d0570d69bf7f8cc5cb70b53a3fdb2357ed82a
|
[
"ISC"
] | 10
|
2019-03-18T09:40:59.000Z
|
2022-03-08T20:26:34.000Z
|
setup.py
|
sfreund-DLR/fatpack
|
aa0d0570d69bf7f8cc5cb70b53a3fdb2357ed82a
|
[
"ISC"
] | 14
|
2019-11-25T16:49:30.000Z
|
2022-02-18T21:21:04.000Z
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
# Parse the version from the module.
with open('fatpack/__init__.py') as f:
for line in f:
if line.find("__version__") >= 0:
version = line.split("=")[1].strip()
version = version.strip('"')
version = version.strip("'")
break
with open('README.rst', 'r') as fin:
long_description = fin.read()
setup(
name='fatpack',
version=version,
author='Gunnstein T. Frøseth',
author_email='gunnstein@mailbox.org',
description='Fatigue analysis in python',
license='ISC',
long_description=long_description,
long_description_content_type="text/x-rst",
url='https://github.com/gunnstein/fatpack',
packages=find_packages(exclude=["test"]),
classifiers=[
"License :: OSI Approved :: ISC License (ISCL)",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering",
],
install_requires=["numpy"])
| 29.971429
| 56
| 0.621544
|
16bd552e48ff35ef0a8d23ab555de9c2f92bf78a
| 243
|
py
|
Python
|
examples/mytestcode.py
|
PeihongKe/PythonExcel
|
12a4ed9d8578b090870e488525b31cb9aa0a15e3
|
[
"MIT",
"Unlicense"
] | null | null | null |
examples/mytestcode.py
|
PeihongKe/PythonExcel
|
12a4ed9d8578b090870e488525b31cb9aa0a15e3
|
[
"MIT",
"Unlicense"
] | null | null | null |
examples/mytestcode.py
|
PeihongKe/PythonExcel
|
12a4ed9d8578b090870e488525b31cb9aa0a15e3
|
[
"MIT",
"Unlicense"
] | null | null | null |
import pyxll
from pyxll import xl_func
@xl_func("int x, int x: int")
def addTwoNumbersByDavidKe(x, y):
"""returns the sum of a range of floats"""
return x + y
@xl_func("int x: int")
def echoByDavidKe(x):
""" """
return x
| 14.294118
| 46
| 0.62963
|
cb2887152787f4349078540eebc6f29bfc97d852
| 2,254
|
py
|
Python
|
dydx_alerts/event_trigger.py
|
maxholloway/dydx-alerts
|
d480edf1c96a6330122eb62b15fb0e5a610d38ba
|
[
"MIT"
] | 3
|
2022-03-07T04:55:16.000Z
|
2022-03-17T18:35:59.000Z
|
dydx_alerts/event_trigger.py
|
maxholloway/dydx-alerts
|
d480edf1c96a6330122eb62b15fb0e5a610d38ba
|
[
"MIT"
] | null | null | null |
dydx_alerts/event_trigger.py
|
maxholloway/dydx-alerts
|
d480edf1c96a6330122eb62b15fb0e5a610d38ba
|
[
"MIT"
] | null | null | null |
from typing import Dict
from constants import EventTriggerTypes, COLLATERAL_REQUIREMENTS
def get_message_generator(event_trigger_config):
"""
Given the event trigger config, return the function that can be used to produce a message.
event_trigger_config: a dict object that corresponds to the
`event_trigger_config` attribute of a messenger blob
"""
if event_trigger_config["trigger"] == EventTriggerTypes.BELOW_THRESH:
return make_below_thresh_event_trigger(event_trigger_config["trigger_options"])
else:
raise Exception(f"Invalid message action {event_trigger_config['trigger']}")
def make_below_thresh_event_trigger(config_options):
"""
Create a function that generates a "below threshold event" message.
"""
def below_thresh_event_trigger(
oracle_prices: Dict[str, float],
user_equity: float,
user_positions: Dict[str, float],
):
account_open_interest = 0
account_margin_requirement = 0
maintenance_colalteral_ratios = COLLATERAL_REQUIREMENTS["maintenance"]
for market_name, pos_size in user_positions.items():
market_open_interest = abs(pos_size) * oracle_prices[market_name]
account_open_interest += market_open_interest
collat_ratio_requirement = float(maintenance_colalteral_ratios[market_name])
account_margin_requirement += (
market_open_interest * collat_ratio_requirement
)
if account_open_interest == 0:
# never alert when there's no open interest
return ""
account_collateral_pct = 100 * (user_equity / account_open_interest)
trigger_collateral_pct = float(config_options["collateral_trigger_pct"])
if account_collateral_pct < trigger_collateral_pct:
approx_liquidation_pct = 100 * (
account_margin_requirement / account_open_interest
)
return f"Account is {account_collateral_pct:.2f}% collateralized and has ${user_equity:,.2f} of equity. It will be liquidated when it goes below approximately {approx_liquidation_pct:.2f}% collateralization."
else:
return ""
return below_thresh_event_trigger
| 40.25
| 220
| 0.704969
|
74c45ecc2a6330f38302699fdc4cd93dc881eacb
| 870
|
py
|
Python
|
storm/n_inversoes.py
|
italo-batista/problems-solving
|
f83ad34f0abebd52925c4020635556f20743ba06
|
[
"MIT"
] | null | null | null |
storm/n_inversoes.py
|
italo-batista/problems-solving
|
f83ad34f0abebd52925c4020635556f20743ba06
|
[
"MIT"
] | null | null | null |
storm/n_inversoes.py
|
italo-batista/problems-solving
|
f83ad34f0abebd52925c4020635556f20743ba06
|
[
"MIT"
] | null | null | null |
def count(array):
if len(array) == 1:
return 0
else:
mid = len(array)//2
left = array[:mid]
right = array[mid:]
x = count(left)
y = count(right)
z = count_split(left, right)
return z + x + y
def count_split(left, right):
i = 0
j = 0
k = 0
z = 0
while i < len(left) and j < len(right):
if left[i] < right[j]:
array[k] = left[i]
i = i + 1
else:
array[k] = right[j]
z = z + len(left) - i
j = j + 1
k = k + 1
while i < len(left):
array[k] = left[i]
i = i + 1
k = k + 1
while j < len(right):
array[k] = right[j]
z = z + len(left) - i
j = j + 1
k = k + 1
return z
array = map(int, raw_input().split())
print count(array)
| 17.755102
| 43
| 0.410345
|
08d41463b52c65e4a8559bfaf4b011e9191bb70a
| 13,923
|
py
|
Python
|
client/src/game_instance.py
|
kshshkim/2021-2-OSSProj-OTS-7
|
d46d56c42de1b9e445b084a627d440f158588088
|
[
"MIT"
] | 7
|
2021-11-05T07:23:27.000Z
|
2022-01-04T04:13:39.000Z
|
client/src/game_instance.py
|
kshshkim/2021-2-OSSProj-OTS-7
|
d46d56c42de1b9e445b084a627d440f158588088
|
[
"MIT"
] | 21
|
2021-10-31T17:30:37.000Z
|
2021-11-30T07:39:00.000Z
|
client/src/game_instance.py
|
kshshkim/2021-2-OSSProj-OTS-7
|
d46d56c42de1b9e445b084a627d440f158588088
|
[
"MIT"
] | 2
|
2021-11-02T13:42:30.000Z
|
2021-11-08T08:20:54.000Z
|
import pygame
from collections import deque
from .components.board import Board
import copy
from random import randint, choice
from .components.mino import Mino
from .consts.custom_events import CUSTOM_EVENTS
from .consts import timer_consts as tv
def new_mino():
return Mino(shape_index=randint(0, 6))
def post_event(custom_event): # variables/custom_events 참조
pygame.event.post(pygame.event.Event(CUSTOM_EVENTS[custom_event]))
class GameInstance:
def __init__(self, is_multiplayer=False):
self.board = Board() # 테트리스 판
self.is_multiplayer = is_multiplayer # 멀티플레이어 여부
self.item_list = ("bomb", "clock") # 가능한 아이템 종류
self.my_item_list = deque([]) # 아이템 보유 리스트, popleft 로 선입선출 사용
self.clock_used = False # 클락 아이템 사용 여부
self.clock_count = tv.BASE_CLOCK_COUNT # 30초, 이벤트는 0.05초마다 생성
self.score = 0
self.level = 1
self.goal = 5
self.freeze_time_count = tv.BASE_FREEZE_COUNT # 미노가 바닥에 닿았을 때 카운트
self.is_hard_dropped = False # 현재 미노를 하드드롭 했는지 여부, freeze 관련해서 필요한 변수
# self.display_update_required = True # 현재는 구현을 안 해놨지만, 화면 갱신이 필요할 때만 True 로 변경되는 방법을 생각해볼 수 있음.
self.x = 3 # current mino 위치
self.y = 0
self.rotation = 0 # current mino 회전 인덱스
self.move_down_count = tv.BASE_MOVE_DOWN_COUNT # 레벨 1일 때의 값. 타이머 이벤트가 5번 발생시 하강. 타이머 이벤트는 0.05 초마다
self.current_mino = new_mino() # 현재 미노 초기화
self.next_mino = new_mino() # 다음 미노 초기화
self.is_hold_used = False # Hold 여부. 연달아가며 계속 hold 하는 꼼수 방지
self.hold_mino = None # Hold 한 mino
self.status = 'start_screen' # start_screen, in_game, pause, game_over 등
# self.former_time = None # 디버그용
# self.current_time = None # 디버그용
def reset(self):
self.__init__(self.is_multiplayer)
# ############## 이하 상태 ##############
def is_stackable(self) -> bool: # 다음 블록을 쌓을 수 있는 상황인지 판별함. 게임 오버 판별기
grid = self.next_mino.shape[0]
for i in range(4):
for j in range(4):
if grid[i][j] != 0 and self.board.frozen_matrix[3 + j][i] != 0:
return False
return True
def get_rotation(self, modifier: int) -> int: # 현재 회전의 index 에 modifier 를 더함. 리스트 범위 넘어가는 것 처리
temp = self.rotation + modifier
if temp < 0:
temp = 3
elif temp > 3:
temp = 0
return temp
def is_rotatable(self, x, y, r_or_l: str) -> bool:
mod = 1 # 변화값
if r_or_l == 'r':
mod = 1
elif r_or_l == 'l':
mod = -1
grid = self.current_mino.shape[self.get_rotation(mod)]
for i in range(4):
for j in range(4):
if grid[i][j] != 0:
if (x + j) < 0 or (x + j) > 9 or (y + i) < 0 or (y + i) > 20:
return False
elif self.board.frozen_matrix[x + j][y + i] != 0:
return False
return True
# Returns true if mino is at the left edge
def is_left_collide(self) -> bool:
grid = self.current_mino.shape[self.rotation]
for i in range(4):
for j in range(4):
if grid[i][j] != 0:
if (self.x + j - 1) < 0:
return True
elif self.board.frozen_matrix[self.x + j - 1][self.y + i] != 0:
return True
return False
# Returns true if mino is at the right edge
def is_right_collide(self) -> bool:
grid = self.current_mino.shape[self.rotation]
for i in range(4):
for j in range(4):
if grid[i][j] != 0:
if (self.x + j + 1) > 9:
return True
elif self.board.frozen_matrix[self.x + j + 1][self.y + i] != 0:
return True
return False
# 바닥 충돌 판정
def is_bottom_collide(self, x, y) -> bool:
grid = self.current_mino.shape[self.rotation]
temp_matrix = copy.deepcopy(self.board.frozen_matrix)
for i in range(4):
for j in range(4):
if grid[i][j] != 0:
if (y + i + 1) > 20:
return True
elif temp_matrix[x + j][y + i + 1] != 0 and temp_matrix[x + j][y + i + 1] != 8:
return True
return False
# move 이벤트 wrapper. 현재 미노가 회전하거나 움직이는 이벤트는 이 래퍼 안에서 동작해야함.
def move(self, func):
self.board.temp_matrix = copy.deepcopy(self.board.frozen_matrix)
func()
self.board.temp_matrix = self.draw_current_mino(self.board.temp_matrix)
self.display_update()
@staticmethod
def display_update():
post_event('DISPLAY_UPDATE_REQUIRED')
# ############ 이하 이벤트 핸들러가 실행하는 메소드 #############
def ev_game_start(self):
self.status = 'in_game'
post_event('GAME_START')
# 게임 오버시
def ev_game_over_screen_out(self):
if not self.is_multiplayer: # 온라인이면 불가
self.reset()
def ev_pause_game(self):
if not self.is_multiplayer:
self.status = 'pause'
post_event('PAUSE')
def ev_unpause_game(self):
self.status = 'in_game'
post_event('UNPAUSE')
def ev_hard_drop(self):
self.hard_drop()
self.is_hard_dropped = True
def ev_timer_event(self):
# 디버그용 주석
# if self.former_time is None:
# self.former_time = time.time()
# self.current_time = time.time()
#
# print(f'{self.current_time - self.former_time} \n {self.move_down_count=}')
#
# self.former_time = self.current_time
self.count_move_down()
self.count_item_clock()
def ev_move_down(self):
if not self.is_bottom_collide(self.x, self.y):
self.move(self.move_down)
else:
self.bottom_count()
def ev_move_down_manual(self):
if not self.is_bottom_collide(self.x, self.y):
self.move(self.move_down)
post_event("MOVE")
def ev_move_left(self):
if not self.is_left_collide():
self.move(self.move_left)
post_event("MOVE")
def ev_move_right(self):
if not self.is_right_collide():
self.move(self.move_right)
post_event("MOVE")
# 우측 회전, mod_list 는 현재 미노의 x, y 값을 조금씩 조정했을 때 회전이 가능한지를 판별하기 위함.
def ev_rotate_right(self):
mod_list = [0, -1, 1, -2, 2]
for mod in mod_list:
if self.is_rotatable(self.x, self.y + mod, 'r'):
self.rotate(y_mod=mod, right_or_left='r')
break
elif self.is_rotatable(self.x + mod, self.y, 'r'):
self.rotate(x_mod=mod, right_or_left='r')
break
self.display_update()
def ev_rotate_left(self):
pass
def ev_hold_current_mino(self):
if not self.is_hold_used:
self.move(self.hold_current_mino)
def ev_use_item(self):
self.move(self.use_item)
# ############ 이하 동작 메소드 #############
def move_down(self):
self.y += 1
self.move_down_count_reset()
def move_down_count_reset(self):
if self.clock_used:
self.move_down_count = (tv.BASE_MOVE_DOWN_COUNT + 2 - self.level*2) * 2
else:
self.move_down_count = tv.BASE_MOVE_DOWN_COUNT + 2 - self.level*2
# 하강 카운트
def count_move_down(self):
self.move_down_count -= 1
if self.move_down_count < 0:
self.ev_move_down()
def move_left(self):
self.x -= 1
def move_right(self):
self.x += 1
def plus_y(self):
self.y += 1
# 바닥에 충돌하기 전까지 한 칸씩 하강 반복
def hard_drop(self):
while not self.is_bottom_collide(self.x, self.y):
self.move(self.move_down)
self.freeze_current_mino()
post_event("HARD_DROP")
# 현재 미노를 hold
def hold_current_mino(self):
self.is_hold_used = True
if self.hold_mino is None:
self.hold_mino = self.current_mino
self.change_to_next_mino()
else:
self.freeze_time_count = tv.BASE_FREEZE_COUNT
self.x, self.y = 3, 0
self.current_mino, self.hold_mino = self.hold_mino, self.current_mino
# 회전, 우회전 'r', 좌회전 'l', 기본값은 'r', x_mod, y_mod 는 현재 미노 위치 기준으로 움직였을 때의 가능 여부 판별을 위해 있음.
def rotate(self, right_or_left='r', x_mod=0, y_mod=0):
self.x += x_mod
self.y += y_mod
if right_or_left == 'r':
self.rotate_right()
elif right_or_left == 'l':
self.rotate_left()
self.board.temp_matrix = copy.deepcopy(self.board.frozen_matrix)
self.draw_current_mino(self.board.temp_matrix)
def rotate_right(self):
self.rotation = self.get_rotation(1)
def rotate_left(self):
self.rotation = self.get_rotation(-1)
# freeze 후, 라인 완성됐는지 확인, 제거.
def check_lines(self):
line_count = 0
for y in range(21): # matrix y 사이즈
if self.is_y_line_full(y_index=y): # line 이 full 이면 line count 값 1 더하고 라인 지움
line_count += 1
self.erase_line(y_index=y)
if line_count == 1:
post_event("LINE_ERASED")
elif line_count == 2:
post_event("LINE_ERASED_2")
elif line_count == 3:
post_event("LINE_ERASED_3")
elif line_count == 4:
post_event("LINE_ERASED_4")
score_list = (50, 150, 350, 1000) # 분리 필요
if line_count != 0: # 지운 라인이 있으면 점수, 골 업데이트
self.update_score(score_list[line_count - 1] * self.level)
self.update_goal(line_count)
# 특정 y 라인이 가득 찼는지 여부 반환
def is_y_line_full(self, y_index: int) -> bool:
for x in range(10): # matrix x 사이즈
if self.board.temp_matrix[x][y_index] == 0: # 0 은 비어있는 칸을 의미
return False # 빈 칸이 나오는 순간 False 반환
return True # 빈 칸이 나오지 않고 for 문이 완료되면 True 반환
# 특정 y 라인 위에 있는 줄을 전부 한 줄 씩 끌어내림.
def erase_line(self, y_index: int):
while y_index > 0:
for x in range(10): # matrix x 사이즈
self.board.frozen_matrix[x][y_index] = self.board.frozen_matrix[x][y_index - 1]
self.board.temp_matrix[x][y_index] = self.board.temp_matrix[x][y_index - 1]
y_index -= 1
# 점수 더하기
def update_score(self, to_add: int):
self.score += to_add
# 바닥에 닿았을 때 카운트.
def bottom_count(self):
if self.is_hard_dropped or self.freeze_time_count < 0: # 바닥에 닿아도 6틱동안 움직일수 있음
self.freeze_current_mino()
else:
self.freeze_time_count -= 1
# 현재 미노를 freeze
def freeze_current_mino(self):
self.check_lines()
self.board.frozen_matrix = copy.deepcopy(self.board.temp_matrix) # 임시 matrix 를 frozen matrix 로
self.is_hard_dropped = False # 다음 미노로 변경됐으니 하드드롭 여부 False
self.freeze_time_count = tv.BASE_FREEZE_COUNT # freeze count 초기화
self.update_score(10 * self.level) # 블럭 하나 freeze 때 마다 기본 점수 10*레벨
if self.is_stackable(): # 다음 미노가 나올 수 있는지 판정
self.change_to_next_mino()
self.rotation = 0
self.is_hold_used = False # 새 미노가 나왔으니 hold 사용 여부 초기화
else:
self.status = 'game_over'
post_event("GAME_OVER")
# 라인 수만큼 현재 goal 감소, level 증가할 때마다 bgm 재생 속도 변경
def update_goal(self, line_count: int):
self.goal -= line_count
if self.goal < 0:
post_event('LEVEL_UP')
def level_up(self):
self.level += 1
self.goal = self.level * 5
self.add_random_item()
def add_random_item(self):
self.my_item_list.append(choice(self.item_list))
def change_to_next_mino(self):
self.x = 3
self.y = 0
self.rotation = 0
self.freeze_time_count = tv.BASE_FREEZE_COUNT
self.current_mino = self.next_mino
self.next_mino = new_mino()
self.draw_current_mino(self.board.temp_matrix)
def draw_current_mino(self, matrix):
grid = self.current_mino.shape[self.rotation]
tx, ty = self.x, self.y
while not self.is_bottom_collide(tx, ty): # 고스트
ty += 1
for i in range(4):
for j in range(4):
if grid[i][j] != 0:
# update ghost
matrix[tx + j][ty + i] = 8
# update current mino
matrix[self.x + j][self.y + i] = grid[i][j]
return matrix
# todo bgm 은 게임 인스턴스에서 재생하면 안 될듯합니다.
# todo bgm 재생이 끝나도 무한 반복
# def play_bgm(self):
# self.bgm = UI_VARIABLES.bgm_list[self.level - 1]
# pygame.mixer.music.load(self.bgm)
# pygame.mixer.music.play()
def use_item(self):
if self.my_item_list:
used_item = self.my_item_list.popleft() # 먼저 들어온 순서대로 아이템 사용
if used_item == "bomb":
self.item_bomb()
elif used_item == "clock":
self.item_clock()
else:
post_event('NO_ITEM_REMAIN')
def item_bomb(self):
print('bomb used')
self.erase_line(20) # 맨 아랫줄 제거, 화면 업데이트는 self.move() 래퍼 안에서 돌리면 해결됩니다. ev_use_item() 메소드에 넣었습니다.
post_event('BOMB_USED')
def item_clock(self):
if self.clock_used:
print('already_used')
post_event('NO_ITEM_REMAIN')
else:
self.clock_used = True
post_event('CLOCK_USED')
def count_item_clock(self):
if self.clock_used and self.clock_count > 0:
self.clock_count -= 1
elif self.clock_count <= 0:
self.clock_used = False
self.clock_count = tv.BASE_MOVE_DOWN_COUNT
| 33.071259
| 107
| 0.560871
|
d3714f4e219544b312bf28765e01102c51f7cf66
| 869
|
py
|
Python
|
examples/main.py
|
rifqidewangga/zephyr-haf
|
a645a7cc5bf58cb76a0b671fbd6c6388413a9d30
|
[
"MIT"
] | null | null | null |
examples/main.py
|
rifqidewangga/zephyr-haf
|
a645a7cc5bf58cb76a0b671fbd6c6388413a9d30
|
[
"MIT"
] | null | null | null |
examples/main.py
|
rifqidewangga/zephyr-haf
|
a645a7cc5bf58cb76a0b671fbd6c6388413a9d30
|
[
"MIT"
] | null | null | null |
import sys
from zephyr import Zephyr
import zephyr
def main():
env_temperature: float = zephyr.c_to_kelvin(20.0)
env_pressure: float = 1024.0 # hPa
if len(sys.argv) == 3:
env_pressure = float(sys.argv[1])
env_temperature = zephyr.c_to_kelvin(float(sys.argv[2]))
sensor = None
try:
sensor = Zephyr()
except zephyr.SensorNotSupported as e:
print(e.message)
if sensor is not None:
while True:
try:
qs = sensor.read_average()
qx = zephyr.compensated_reading(qs, env_temperature, env_pressure)
print(f"{qx:2f}")
except zephyr.InvalidSensorData as e:
print(e.message)
except KeyboardInterrupt:
print("Program terminated")
break
if __name__ == '__main__':
main()
| 24.138889
| 82
| 0.576525
|
bc5a753e8a76b4593507bcdc98bcd09db6c297e3
| 748
|
py
|
Python
|
handsonml_setup.py
|
Jnewgeek/handson-ml
|
8fa44a394604d097d03687737bcaef2af001f542
|
[
"Apache-2.0"
] | null | null | null |
handsonml_setup.py
|
Jnewgeek/handson-ml
|
8fa44a394604d097d03687737bcaef2af001f542
|
[
"Apache-2.0"
] | null | null | null |
handsonml_setup.py
|
Jnewgeek/handson-ml
|
8fa44a394604d097d03687737bcaef2af001f542
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 8 13:42:25 2019
@author: Administrator
setup.py
"""
from __future__ import division,print_function,unicode_literals
import os
import numpy as np
np.random.seed(42)
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc("axes",labelsize=14)
mpl.rc("xtick",labelsize=12)
mpl.rc("ytick",labelsize=12)
plt.rcParams["font.sans-serif"]=["SimHei"]
plt.rcParams["axes.unicode_minus"]=False
PROJECT_ROOT_DIR="."
CHAPTER_ID="classification"
def save_fig(fig_id,CHAPTER_ID,tight_layout=True):
path=os.path.join(PROJECT_ROOT_DIR,"images",CHAPTER_ID,fig_id+".png")
print("Saving figure",fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path,format="png",dpi=300)
| 20.777778
| 73
| 0.733957
|
13a0474e0fa30147051729c3884f32fc0e315a0c
| 9,200
|
py
|
Python
|
qvi/core/vi.py
|
amirdib/quantized-variational-inference
|
50c6bf3511bdb0bca48573a845580c1f10b8ba8f
|
[
"MIT"
] | null | null | null |
qvi/core/vi.py
|
amirdib/quantized-variational-inference
|
50c6bf3511bdb0bca48573a845580c1f10b8ba8f
|
[
"MIT"
] | null | null | null |
qvi/core/vi.py
|
amirdib/quantized-variational-inference
|
50c6bf3511bdb0bca48573a845580c1f10b8ba8f
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow_probability as tfp
from tensorflow_probability import bijectors as tfb
import tensorflow as tf
from tensorflow_probability.python.vi import csiszar_divergence
tfd = tfp.distributions
from functools import partial
import matplotlib.pyplot as plt
from tensorflow_probability.python.internal import nest_util
from qvi.core.distribution import get_gaussian_quantization_weights
from qvi.misc.utils import split_to_nested_tensors
DTYPE = tf.float32
_reparameterized_elbo = partial(
csiszar_divergence.monte_carlo_variational_loss,
discrepancy_fn=csiszar_divergence.kl_reverse,
use_reparameterization=True)
import tensorflow_probability as tfp
import tensorflow as tf
from tensorflow_probability.python.vi import csiszar_divergence
tfd = tfp.distributions
from functools import partial
_reparameterized_elbo = partial(
csiszar_divergence.monte_carlo_variational_loss,
discrepancy_fn=csiszar_divergence.kl_reverse,
use_reparameterization=True)
class VariationalInference:
def __init__(self,
target_log_prob_fn,
surrogate_posterior,
sample_size,
variational_loss_fn,
optimizer,
num_steps,
trace_fn=None,
name=''):
self.target_log_prob_fn=target_log_prob_fn
self.surrogate_posterior=surrogate_posterior
self.trace_fn=trace_fn
self.optimizer=optimizer
self.num_steps=num_steps
self.variational_loss_fn=variational_loss_fn
self.trainable_variables=surrogate_posterior.trainable_variables
self.sample_size=sample_size
self.name = name
def run(self):
#pbar = tqdm(total=num_steps)
self.trace = tfp.vi.fit_surrogate_posterior(
target_log_prob_fn=self.target_log_prob_fn,
surrogate_posterior=self.surrogate_posterior,
trace_fn=self.trace_fn,
optimizer=self.optimizer,
num_steps=self.num_steps,
variational_loss_fn=self.variational_loss_fn,
trainable_variables=self.trainable_variables,
sample_size=self.sample_size)
def plot(self, abscissa='time',name=''):
loss, timestamps, grads = self.trace
if abscissa == 'time':
x = timestamps - timestamps[0]
elif abscissa == 'epochs':
x = np.arange(0,len(loss))
plt.plot(x, -loss, label=name)
class MCVariationalInference(VariationalInference):
def __init__(self,
target_log_prob_fn,
surrogate_posterior,
sample_size,
optimizer,
trace_fn,
num_steps,
name=''):
super().__init__(target_log_prob_fn=target_log_prob_fn,
surrogate_posterior=surrogate_posterior,
sample_size=sample_size,
variational_loss_fn=vi_mc,
optimizer=optimizer,
num_steps=num_steps,
trace_fn=trace_fn,
name='')
class RQMCVariationalInference(VariationalInference):
def __init__(self,
target_log_prob_fn,
surrogate_posterior,
sample_size,
optimizer,
trace_fn,
num_steps,
name=''):
super().__init__(target_log_prob_fn=target_log_prob_fn,
surrogate_posterior=surrogate_posterior,
sample_size=sample_size,
variational_loss_fn=vi_mc,
optimizer=optimizer,
num_steps=num_steps,
trace_fn=trace_fn,
name='')
class QuantizedVariationalInference(VariationalInference):
def __init__(self,
target_log_prob_fn,
surrogate_posterior,
sample_size,
optimizer,
trace_fn,
num_steps,
D,
name=''):
self.D = D
super().__init__(target_log_prob_fn=target_log_prob_fn,
surrogate_posterior=surrogate_posterior,
sample_size=sample_size,
variational_loss_fn=partial(vi_quantized, seed=None, K='', D=self.D),
optimizer=optimizer,
num_steps=num_steps,
trace_fn=trace_fn,
name='')
class QuantizedRichardsonVariationalInference(VariationalInference):
def __init__(self,
target_log_prob_fn,
surrogate_posterior,
sample_size,
optimizer,
num_steps,
trace_fn,
D,
M,
name=''):
self.D = D
self.M = M
super().__init__(target_log_prob_fn=target_log_prob_fn,
surrogate_posterior=surrogate_posterior,
sample_size=sample_size,
variational_loss_fn=partial(vi_quantized_richardson, seed=None, D=self.D, M=self.M),
optimizer=optimizer,
num_steps=num_steps,
trace_fn=trace_fn,
name='')
def vi_quantized_richardson(target_log_prob_fn,
surrogate_posterior,
sample_size,
seed,
D,
M):
#N value is sample_size
N = sample_size
def q_divergence(sample_size):
q_samples = surrogate_posterior.sample(sample_size)
surrogate_posterior_log_prob = surrogate_posterior.log_prob(q_samples)
target_log_prob = nest_util.call_fn(partial(target_log_prob_fn), q_samples)
weights = get_gaussian_quantization_weights(shape= (sample_size,D), dtype=tf.float32)
divergence = tfp.vi.kl_reverse(target_log_prob - surrogate_posterior_log_prob)
return tf.tensordot(weights,divergence, axes=1)
divN = tf.reduce_sum(q_divergence(N))
divM = tf.reduce_sum(q_divergence(M))
power = tf.constant(2.)
coeff_pow = D
reg_M = tf.math.pow(tf.cast(M,dtype=DTYPE),power/coeff_pow)
reg_N = tf.math.pow(tf.cast(N,dtype=DTYPE),power/coeff_pow)
elbo = ( reg_N * divN - reg_M * divM)/(reg_N - reg_M)
return elbo
def vi_quantized(target_log_prob_fn,
surrogate_posterior,
sample_size,
seed,
D,
K):
q_samples = surrogate_posterior.sample(sample_size)
surrogate_posterior_log_prob = surrogate_posterior.log_prob(q_samples)
target_log_prob = nest_util.call_fn(partial(target_log_prob_fn), q_samples)
weights = get_gaussian_quantization_weights(shape= (sample_size,D), dtype=tf.float32)
divergence = tfp.vi.kl_reverse(target_log_prob - surrogate_posterior_log_prob)
elbo = tf.reduce_sum(tf.tensordot(weights,divergence, axes=1))
#tf.print(elbo)
return elbo
def vi_mc(target_log_prob_fn,
surrogate_posterior,
sample_size,
seed=None):
q_samples = surrogate_posterior.sample(sample_size)
surrogate_posterior_log_prob = surrogate_posterior.log_prob(q_samples)
target_log_prob = nest_util.call_fn(partial(target_log_prob_fn), q_samples)
divergence = tfp.vi.kl_reverse(target_log_prob - surrogate_posterior_log_prob)
elbo = tf.reduce_mean(divergence, axis=0)
return elbo
# def trace_bnn(loss, grads, variables):
# pbar.set_description('ELBO: %s' % str(loss.numpy()))
# pbar.update()
# return loss, tf.timestamp(), grads
def build_meanfield_advi(jd_list, observed_node=-1, distribution=tfd.Normal, reinterpreted_batch_ndims_node = 1, **kwargs):
"""
The inputted jointdistribution needs to be a batch version
"""
list_of_values = jd_list.sample(1)
list_of_values.pop(observed_node)
distlist = []
for i, value in enumerate(list_of_values):
dtype = value.dtype
rv_shape = value[0].shape
#print(rv_shape)
loc = tf.Variable(tf.zeros(rv_shape),
name='meanfield_%s_mu' % i,
dtype=dtype)
scale = tfp.util.TransformedVariable(tf.ones(rv_shape), tfb.Softplus(),
name='meanfield_%s_scale' % i)
approx_node = distribution(loc=loc,
scale=scale,
name='meanfield_%s' % i, **kwargs)
if loc.shape == ():
distlist.append(approx_node)
else:
distlist.append(
tfd.Independent(approx_node, reinterpreted_batch_ndims=reinterpreted_batch_ndims_node)
)
meanfield_advi = tfd.JointDistributionSequential(distlist)
return meanfield_advi
def trainable_normal_distribution(shape, name='', distribution=tfd.Normal, **kwargs):
loc = tf.Variable(tf.zeros(shape), name='{}_loc'.format(name))
scale = tfp.util.TransformedVariable(tf.Variable(tf.fill(shape,1.), name='{}_scale'.format(name)),
bijector = tfb.Softplus())
return distribution(loc, scale, name=name, **kwargs)
| 36.078431
| 123
| 0.624674
|
f9b485989d2d8e8b72c6d86ea28a900fde3640de
| 4,369
|
py
|
Python
|
twitchtest/spiders/rn_twitchchannelinfo_spider.py
|
chao-shi-git/scrapy_ttools
|
13a34e3bdee135c3f5513d94161a36491eb4905b
|
[
"MIT"
] | null | null | null |
twitchtest/spiders/rn_twitchchannelinfo_spider.py
|
chao-shi-git/scrapy_ttools
|
13a34e3bdee135c3f5513d94161a36491eb4905b
|
[
"MIT"
] | 1
|
2019-03-18T04:27:40.000Z
|
2021-05-27T08:05:23.000Z
|
twitchtest/spiders/rn_twitchchannelinfo_spider.py
|
chao-shi-git/scrapy_ttools
|
13a34e3bdee135c3f5513d94161a36491eb4905b
|
[
"MIT"
] | null | null | null |
import scrapy
import pandas as pd
from twitchtest.items import TwitchChannelInfoItem
# channel subfolder paths have been scraped from the twitchtest_spider, and saved in a csv file
# here we read in the csv file, assemble the full urls, and loop over them
class TwitchChannelInfoUrlsPrep():
def __init__(self):
pass
def prep_urls(self):
with open("twitchtools_channels.csv", "r") as f:
channels = pd.read_csv(f)
urls = 'https://www.twitchtools.com' + channels['subfolder'] # pandas.core.series.Series
# urls = list('https://www.twitchtools.com' + channels['subfolder']) # list
return urls
class twitchchannelinfoSpider(scrapy.Spider):
name = "twitchchannelinfo_spider"
custom_settings = {
# custom pipeline for each spider for flexibility
'ITEM_PIPELINES': {#'twitchtest.pipelines.ValidateItemPipeline': 100,
'twitchtest.pipelines.WriteChannelInfoItemPipeline': 200}
}
allowed_urls = ['https://www.twitchtools.com/']
#start_urls = ['https://www.twitchtools.com/channels']
start_urls = TwitchChannelInfoUrlsPrep().prep_urls()
def verify(self, content):
if isinstance(content, list):
if len(content) > 0:
content = content[0]
# convert unicode to str
return content.encode('ascii','ignore')
else:
return ""
else:
# convert unicode to str
return content.encode('ascii','ignore')
def parse(self, response):
# extract upper info box (guaranteed to have info for every channel)
display_name = response.xpath('//*[@id="main"]//ul/li[1]/span/text()').extract_first()
account_unique_id = response.xpath('//*[@id="main"]//ul/li[2]/span/text()').extract_first()
channel_followers = response.xpath('//*[@id="main"]//ul/li[3]/span/text()').extract_first()
channel_views = response.xpath('//*[@id="main"]//ul/li[4]/span/text()').extract_first()
mature_flag = response.xpath('//*[@id="main"]//ul/li[5]/span/text()').extract_first()
mature_flag = ''.join(mature_flag).strip()
twitch_partner_flag = response.xpath('//*[@id="main"]//ul/li[6]/span/text()').extract_first()
twitch_partner_flag = ''.join(twitch_partner_flag).strip()
last_game = response.xpath('//*[@id="main"]//ul/li[7]/span/text()').extract_first()
account_created_date = response.xpath('//*[@id="main"]//ul/li[8]/span/text()').extract_first()
account_created_date = ''.join(account_created_date).strip()
account_updated_date = response.xpath('//*[@id="main"]//ul/li[9]/span/text()').extract_first()
account_updated_date = ''.join(account_updated_date).strip()
twitch_url = response.xpath('//*[@id="main"]//ul/li[10]/span/text()').extract_first()
# extract team info (direction: channel-to-multiple-teams), could be empty
teams_joined_ls = response.xpath('//*[@id="main"]//div[@class="boxes"]//div[@class="user"]/a/text()').extract()
if len(teams_joined_ls) > 0:
teams_joined_ls = [''.join(t).strip() for t in teams_joined_ls]
teams_joined_str = ';'.join(teams_joined_ls)
else:
teams_joined_str = 'did not join any team'
# # verify
# display_name = self.verify(display_name)
# account_unique_id = self.verify(account_unique_id)
# channel_followers = self.verify(channel_followers)
# channel_views = self.verify(channel_views)
# mature_flag = self.verify(mature_flag)
# twitch_partner_flag = self.verify(twitch_partner_flag)
# last_game = self.verify(last_game)
# account_created_date = self.verify(account_created_date)
# account_updated_date = self.verify(account_updated_date)
# twitch_url = self.verify(twitch_url)
# prep for export
item = TwitchChannelInfoItem()
item['display_name'] = display_name
item['account_unique_id'] = account_unique_id
item['channel_followers'] = channel_followers
item['channel_views'] = channel_views
item['mature_flag'] = mature_flag
item['twitch_partner_flag'] = twitch_partner_flag
item['last_game'] = last_game
item['account_created_date'] = account_created_date
item['account_updated_date'] = account_updated_date
item['twitch_url'] = twitch_url
item['page_url'] = response.request.url
item['teams_joined'] = teams_joined_str # placeholder
yield item
| 37.34188
| 114
| 0.677272
|
d0ded2388215cbabe89571edc8eb67c193cf7083
| 10,960
|
py
|
Python
|
python/qidoc/sphinx_project.py
|
vbarbaresi/qibuild
|
eab6b815fe0af49ea5c41ccddcd0dff2363410e1
|
[
"BSD-3-Clause"
] | null | null | null |
python/qidoc/sphinx_project.py
|
vbarbaresi/qibuild
|
eab6b815fe0af49ea5c41ccddcd0dff2363410e1
|
[
"BSD-3-Clause"
] | null | null | null |
python/qidoc/sphinx_project.py
|
vbarbaresi/qibuild
|
eab6b815fe0af49ea5c41ccddcd0dff2363410e1
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2012-2018 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the COPYING file.
import os
import sys
from qisys import ui
import qisys.archive
import qisys.sh
import qidoc.project
class SphinxProject(qidoc.project.DocProject):
""" A doc project using Sphinx """
def __init__(self, doc_worktree, project, name,
depends=None, dest=None):
self.doc_type = "sphinx"
self.examples = list()
self.translated = False
super(SphinxProject, self).__init__(doc_worktree, project, name,
depends=depends,
dest=dest)
@property
def source_dir(self):
return os.path.join(self.path, "source")
def configure(self, **kwargs):
""" Create a correct conf.py in self.build_dir """
rel_paths = kwargs.get("rel_paths", False)
in_conf_py = os.path.join(self.source_dir, "conf.in.py")
should_use_template = False
if os.path.exists(in_conf_py):
should_use_template = True
else:
in_conf_py = os.path.join(self.source_dir, "conf.py")
if not os.path.exists(in_conf_py):
ui.error("Could not find a conf.py or a conf.in.py in", self.source_dir)
return
with open(in_conf_py) as fp:
conf = fp.read()
if should_use_template:
if self.template_project:
from_template = self.template_project.sphinx_conf
from_template = from_template.format(**kwargs)
conf = from_template + conf
else:
ui.warning("Found a conf.in.py but no template project found "
"in the worktree")
from_conf = dict()
try:
# quick hack if conf.in.py used __file__
from_conf["__file__"] = in_conf_py
exec(conf, from_conf) # pylint: disable=exec-used
conf = conf.replace("__file__", 'r"%s"' % in_conf_py)
except Exception, e:
ui.error("Could not read", in_conf_py, "\n", e)
return
if "project" not in from_conf:
conf += '\nproject = "%s"\n' % self.name
if "version" not in from_conf:
if kwargs.get("version"):
conf += '\nversion = "%s"\n' % kwargs["version"]
if should_use_template and self.template_project:
if "html_theme_path" not in from_conf:
conf += '\nhtml_theme_path = [r"%s"]\n' % self.template_project.themes_path
conf += self.append_doxylink_settings(conf, rel_paths=rel_paths)
conf += self.append_intersphinx_settings(conf, rel_paths=rel_paths)
conf += self.append_qiapidoc_settings()
conf += self.append_breathe_settings()
out_conf_py = os.path.join(self.build_dir, "conf.py")
qisys.sh.write_file_if_different(conf, out_conf_py)
def append_breathe_settings(self):
breathe_projects = dict()
for x in self.doxydeps:
breathe_projects[x.name] = os.path.join(x.build_dir, 'xml')
return "\nbreathe_projects = %s\n" % breathe_projects
def append_qiapidoc_settings(self):
""" Return a string representing the qiapidoc settings """
path_list = []
self.append_doxy_xml_path(path_list)
return (
"\nqiapidoc_srcs=[" +
','.join(["r'" + x + "'" for x in path_list]) +
"]\n")
def append_doxylink_settings(self, conf, rel_paths=False):
""" Return a string representing the doxylink settings """
res = self.append_extension(conf, "sphinxcontrib.doxylink")
doxylink = dict()
for doxydep in self.doxydeps:
if rel_paths:
dep_path = os.path.relpath(doxydep.dest, self.dest)
dep_path = qisys.sh.to_posix_path(dep_path)
else:
dep_path = r"%s" % doxydep.html_dir
doxylink[doxydep.name] = (doxydep.tagfile, dep_path)
res += "\ndoxylink = %s\n" % str(doxylink)
return res
def append_intersphinx_settings(self, conf, rel_paths=False):
""" Return a string representing the intersphinx settings """
res = self.append_extension(conf, "sphinx.ext.intersphinx")
sphinx_deps = list()
for dep_name in self.depends:
doc_project = self.doc_worktree.get_doc_project(dep_name, raises=False)
if doc_project and doc_project.doc_type == "sphinx":
sphinx_deps.append(doc_project)
intersphinx_mapping = dict()
for sphinx_dep in sphinx_deps:
if rel_paths:
dep_path = os.path.relpath(sphinx_dep.dest, self.dest)
dep_path = qisys.sh.to_posix_path(dep_path)
else:
dep_path = sphinx_dep.html_dir
intersphinx_mapping[sphinx_dep.name] = (
dep_path,
os.path.join(r"%s" % sphinx_dep.html_dir, "objects.inv")
)
res += "\nintersphinx_mapping= " + str(intersphinx_mapping)
return res
@staticmethod
def append_extension(conf, extension_name):
from_conf = dict()
exec(conf, from_conf) # pylint: disable=exec-used
res = ""
if "extensions" not in from_conf:
res += "extensions = list()\n"
res += '\nextensions.append("%s")' % extension_name
return res
def build(self, build_type=None, language=None, # pylint: disable=arguments-differ,too-many-branches
spellcheck=False, werror=False, pdb=False):
""" Run sphinx.main() with the correct arguments """
try:
import sphinx
except ImportError, e:
ui.error(e, "skipping build")
return
if self.prebuild_script:
ui.info(ui.green, "Running pre-build script:",
ui.white, self.prebuild_script)
cmd = [sys.executable, self.prebuild_script]
qisys.command.call(cmd, cwd=self.path)
ui.info()
self.generate_examples_zips()
if self.translated and language and language != "en" \
and language not in self.linguas:
raise UnknownLingua(self, language)
if self.translated:
self.intl_build(language)
qisys.sh.mkdir(self.html_dir, recursive=True)
spell_dir = os.path.join(self.build_dir, "spellcheck")
qisys.sh.mkdir(spell_dir, recursive=True)
cmd = [sys.executable, "-c", self.build_dir]
if spellcheck:
cmd.extend(("-b", "spelling"))
else:
cmd.extend(("-b", "html"))
if werror:
cmd.append("-W")
if language:
cmd.append("-Dlanguage=%s" % language)
if pdb:
cmd.append("-P")
cmd.append(self.source_dir)
if spellcheck:
cmd.append(spell_dir)
else:
cmd.append(self.html_dir)
if build_type:
os.environ["build_type"] = build_type
ui.debug("launching:", cmd)
rc = 0
try:
sphinx.main(argv=cmd)
except SystemExit as e:
rc = e.code
if spellcheck:
num_errors = get_num_spellcheck_errors(self.build_dir)
if num_errors != 0:
raise SphinxBuildError(self)
if rc != 0:
raise SphinxBuildError(self)
def generate_examples_zips(self):
for example_src in self.examples:
example_path = os.path.join(self.source_dir, example_src)
zip_path = os.path.join(self.source_dir, example_src + ".zip")
if not qisys.sh.up_to_date(zip_path, example_path):
ui.info("Generating", zip_path)
qisys.archive.compress(example_path, algo="zip", quiet=True)
def intl_update(self):
ui.info(ui.blue, "::", ui.reset, "Generating message catalogs ...")
import sphinx
from sphinx_intl.commands import run as sphinx_intl_run
# First step: run sphinx-build -b gettext
cmd = [sys.executable, "-c", self.build_dir, "-b", "gettext"]
cmd.append(self.source_dir)
locale_dir = os.path.join(self.source_dir, "locale")
cmd.append(locale_dir)
rc = 0
try:
sphinx.main(argv=cmd)
except SystemExit as e:
rc = e.code
if rc != 0:
raise SphinxBuildError(self)
ui.info()
# Second step: run sphinx-intl update -l <lingua> for every lingua
ui.info(ui.blue, "::", ui.reset, "Updating .po files ...")
for i, lingua in enumerate(self.linguas):
ui.info_count(i, len(self.linguas), ui.blue, lingua)
cmd = ["update",
"-c", os.path.join(self.build_dir, "conf.py"),
"--pot-dir", locale_dir,
"--locale-dir", locale_dir,
"--language", lingua]
sphinx_intl_run(cmd)
def intl_build(self, language):
from sphinx_intl.commands import run as sphinx_intl_run
locale_dir = os.path.join(self.source_dir, "locale")
ui.info(ui.blue, "::", ui.reset, "Building .mo files ...")
cmd = ["build",
"-c", os.path.join(self.build_dir, "conf.py"),
"--pot-dir", locale_dir,
"--locale-dir", locale_dir,
"--language", language]
sphinx_intl_run(cmd)
def install(self, destdir):
for example_src in self.examples:
example_path = os.path.join(self.source_dir, example_src)
real_dest = os.path.join(destdir, example_src)
qisys.sh.install(example_path, real_dest, quiet=True)
qisys.sh.install(self.html_dir, destdir)
def get_num_spellcheck_errors(build_dir):
output_txt = os.path.join(build_dir, "spellcheck", "output.txt")
res = 0
if not os.path.exists(output_txt):
return 1 # so that we raise SphinxBuildError
with open(output_txt, "r") as fp:
lines = fp.readlines()
res = len(lines)
if res != 0:
ui.error("Found %i spelling error(s). See %s for the details" %
(res, output_txt))
return res
class SphinxBuildError(Exception):
def __str__(self):
return "Error occurred when building doc project: %s" % self.args[0].name
class UnknownLingua(Exception):
def __init__(self, project, language):
super(UnknownLingua, self).__init__()
self.language = language
self.project = project
def __str__(self):
mess = """ Unknown language '{language}' for {project.name}.
Please check the `linguas` attribute in the `<translate>` tag
in {project.qiproject_xml}
"""
return mess.format(language=self.language, project=self.project)
| 36.533333
| 105
| 0.584307
|
3ff58fe302c8646f3ae170569ef62ff00cd37e20
| 3,094
|
py
|
Python
|
.history/Missions_to_Mars/scrape_mars_20200809084044.py
|
ermiasgelaye/web-scraping-challenge
|
f99c3436dfb0169595c46dae7733d90e21385cc6
|
[
"ADSL"
] | null | null | null |
.history/Missions_to_Mars/scrape_mars_20200809084044.py
|
ermiasgelaye/web-scraping-challenge
|
f99c3436dfb0169595c46dae7733d90e21385cc6
|
[
"ADSL"
] | null | null | null |
.history/Missions_to_Mars/scrape_mars_20200809084044.py
|
ermiasgelaye/web-scraping-challenge
|
f99c3436dfb0169595c46dae7733d90e21385cc6
|
[
"ADSL"
] | 2
|
2020-11-02T08:12:16.000Z
|
2021-05-17T21:45:42.000Z
|
from splinter import Browser
from bs4 import BeautifulSoup
import pandas as pd
import time
import re
# This is for debugging
def savetofile(contents):
file = open('_temporary.txt',"w",encoding="utf-8")
file.write(contents)
file.close()
def scrape():
executable_path = {"executable_path": "chromedriver"}
browser = Browser("chrome", **executable_path, headless=False)
# NASA Mars News
url = 'https://mars.nasa.gov/news/'
browser.visit(url)
time.sleep(3)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
slides = soup.find_all('li', class_='slide')
html = browser.html
soup = BeautifulSoup(html, "html.parser")
content_title = slides[0].find('div', class_='content_title')
news_title = content_title.text.strip()
article_teaser_body = slides[0].find('div', class_='article_teaser_body')
news_p = article_teaser_body.text.strip()
# JPL Mars Space Images
base_url = 'https://www.jpl.nasa.gov'
url = base_url + '/spaceimages/?search=&category=Mars'
browser.visit(url)
time.sleep(1)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
featured_image_url = base_url + soup.find('a',class_='button fancybox')['data-fancybox-href']
# Mars Weather
url = 'https://twitter.com/marswxreport?lang=en'
browser.visit(url)
time.sleep(1)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
mars_weather=soup.find(text=re.compile("InSight sol"))
# Mars facts
url = 'https://space-facts.com/mars/'
browser.visit(url)
tables = pd.read_html(url)
facts_df = tables[0]
facts_df.columns = ['Fact', 'Value']
facts_df['Fact'] = facts_df['Fact'].str.replace(':', '')
facts_df.reset_index(drop=True, inplace=True)
facts_html = facts_df.to_html()
# Mars Hemispheres
base_url = 'https://astrogeology.usgs.gov'
url = base_url + '/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(url)
time.sleep(1)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
items = soup.find_all('div', class_='item')
urls = []
titles = []
for item in items:
urls.append(base_url + item.find('a')['href'])
titles.append(item.find('h3').text.strip())
img_urls = []
for oneurl in urls:
browser.visit(oneurl)
time.sleep(1)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
oneurl = base_url+soup.find('img',class_='wide-image')['src']
img_urls.append(oneurl)
hemisphere_image_urls = []
for i in range(len(titles)):
hemisphere_image_urls.append({'title':titles[i],'img_url':img_urls[i]})
# Assigning scraped data to a page
marspage = {}
marspage["news_title"] = news_title
marspage["news_p"] = news_p
marspage["featured_image_url"] = featured_image_url
marspage["mars_weather"] = mars_weather
marspage["marsfacts_html"] =
marspage["hemisphere_image_urls"] = hemisphere_image_urls
return marspage
| 26.672414
| 101
| 0.652877
|
348b8e072509ce02e44953f7acf24e7362f75f56
| 12,386
|
py
|
Python
|
sdk/python/pulumi_azure_native/security/security_contact.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/security/security_contact.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/security/security_contact.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['SecurityContactArgs', 'SecurityContact']
@pulumi.input_type
class SecurityContactArgs:
def __init__(__self__, *,
alert_notifications: Optional[pulumi.Input['SecurityContactPropertiesAlertNotificationsArgs']] = None,
emails: Optional[pulumi.Input[str]] = None,
notifications_by_role: Optional[pulumi.Input['SecurityContactPropertiesNotificationsByRoleArgs']] = None,
phone: Optional[pulumi.Input[str]] = None,
security_contact_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a SecurityContact resource.
:param pulumi.Input['SecurityContactPropertiesAlertNotificationsArgs'] alert_notifications: Defines whether to send email notifications about new security alerts
:param pulumi.Input[str] emails: List of email addresses which will get notifications from Azure Security Center by the configurations defined in this security contact.
:param pulumi.Input['SecurityContactPropertiesNotificationsByRoleArgs'] notifications_by_role: Defines whether to send email notifications from Azure Security Center to persons with specific RBAC roles on the subscription.
:param pulumi.Input[str] phone: The security contact's phone number
:param pulumi.Input[str] security_contact_name: Name of the security contact object
"""
if alert_notifications is not None:
pulumi.set(__self__, "alert_notifications", alert_notifications)
if emails is not None:
pulumi.set(__self__, "emails", emails)
if notifications_by_role is not None:
pulumi.set(__self__, "notifications_by_role", notifications_by_role)
if phone is not None:
pulumi.set(__self__, "phone", phone)
if security_contact_name is not None:
pulumi.set(__self__, "security_contact_name", security_contact_name)
@property
@pulumi.getter(name="alertNotifications")
def alert_notifications(self) -> Optional[pulumi.Input['SecurityContactPropertiesAlertNotificationsArgs']]:
"""
Defines whether to send email notifications about new security alerts
"""
return pulumi.get(self, "alert_notifications")
@alert_notifications.setter
def alert_notifications(self, value: Optional[pulumi.Input['SecurityContactPropertiesAlertNotificationsArgs']]):
pulumi.set(self, "alert_notifications", value)
@property
@pulumi.getter
def emails(self) -> Optional[pulumi.Input[str]]:
"""
List of email addresses which will get notifications from Azure Security Center by the configurations defined in this security contact.
"""
return pulumi.get(self, "emails")
@emails.setter
def emails(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "emails", value)
@property
@pulumi.getter(name="notificationsByRole")
def notifications_by_role(self) -> Optional[pulumi.Input['SecurityContactPropertiesNotificationsByRoleArgs']]:
"""
Defines whether to send email notifications from Azure Security Center to persons with specific RBAC roles on the subscription.
"""
return pulumi.get(self, "notifications_by_role")
@notifications_by_role.setter
def notifications_by_role(self, value: Optional[pulumi.Input['SecurityContactPropertiesNotificationsByRoleArgs']]):
pulumi.set(self, "notifications_by_role", value)
@property
@pulumi.getter
def phone(self) -> Optional[pulumi.Input[str]]:
"""
The security contact's phone number
"""
return pulumi.get(self, "phone")
@phone.setter
def phone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "phone", value)
@property
@pulumi.getter(name="securityContactName")
def security_contact_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the security contact object
"""
return pulumi.get(self, "security_contact_name")
@security_contact_name.setter
def security_contact_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "security_contact_name", value)
class SecurityContact(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
alert_notifications: Optional[pulumi.Input[pulumi.InputType['SecurityContactPropertiesAlertNotificationsArgs']]] = None,
emails: Optional[pulumi.Input[str]] = None,
notifications_by_role: Optional[pulumi.Input[pulumi.InputType['SecurityContactPropertiesNotificationsByRoleArgs']]] = None,
phone: Optional[pulumi.Input[str]] = None,
security_contact_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Contact details and configurations for notifications coming from Azure Security Center.
API Version: 2020-01-01-preview.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['SecurityContactPropertiesAlertNotificationsArgs']] alert_notifications: Defines whether to send email notifications about new security alerts
:param pulumi.Input[str] emails: List of email addresses which will get notifications from Azure Security Center by the configurations defined in this security contact.
:param pulumi.Input[pulumi.InputType['SecurityContactPropertiesNotificationsByRoleArgs']] notifications_by_role: Defines whether to send email notifications from Azure Security Center to persons with specific RBAC roles on the subscription.
:param pulumi.Input[str] phone: The security contact's phone number
:param pulumi.Input[str] security_contact_name: Name of the security contact object
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[SecurityContactArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Contact details and configurations for notifications coming from Azure Security Center.
API Version: 2020-01-01-preview.
:param str resource_name: The name of the resource.
:param SecurityContactArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SecurityContactArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
alert_notifications: Optional[pulumi.Input[pulumi.InputType['SecurityContactPropertiesAlertNotificationsArgs']]] = None,
emails: Optional[pulumi.Input[str]] = None,
notifications_by_role: Optional[pulumi.Input[pulumi.InputType['SecurityContactPropertiesNotificationsByRoleArgs']]] = None,
phone: Optional[pulumi.Input[str]] = None,
security_contact_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SecurityContactArgs.__new__(SecurityContactArgs)
__props__.__dict__["alert_notifications"] = alert_notifications
__props__.__dict__["emails"] = emails
__props__.__dict__["notifications_by_role"] = notifications_by_role
__props__.__dict__["phone"] = phone
__props__.__dict__["security_contact_name"] = security_contact_name
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:security:SecurityContact"), pulumi.Alias(type_="azure-native:security/v20170801preview:SecurityContact"), pulumi.Alias(type_="azure-nextgen:security/v20170801preview:SecurityContact"), pulumi.Alias(type_="azure-native:security/v20200101preview:SecurityContact"), pulumi.Alias(type_="azure-nextgen:security/v20200101preview:SecurityContact")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(SecurityContact, __self__).__init__(
'azure-native:security:SecurityContact',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'SecurityContact':
"""
Get an existing SecurityContact resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = SecurityContactArgs.__new__(SecurityContactArgs)
__props__.__dict__["alert_notifications"] = None
__props__.__dict__["emails"] = None
__props__.__dict__["name"] = None
__props__.__dict__["notifications_by_role"] = None
__props__.__dict__["phone"] = None
__props__.__dict__["type"] = None
return SecurityContact(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="alertNotifications")
def alert_notifications(self) -> pulumi.Output[Optional['outputs.SecurityContactPropertiesResponseAlertNotifications']]:
"""
Defines whether to send email notifications about new security alerts
"""
return pulumi.get(self, "alert_notifications")
@property
@pulumi.getter
def emails(self) -> pulumi.Output[Optional[str]]:
"""
List of email addresses which will get notifications from Azure Security Center by the configurations defined in this security contact.
"""
return pulumi.get(self, "emails")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="notificationsByRole")
def notifications_by_role(self) -> pulumi.Output[Optional['outputs.SecurityContactPropertiesResponseNotificationsByRole']]:
"""
Defines whether to send email notifications from Azure Security Center to persons with specific RBAC roles on the subscription.
"""
return pulumi.get(self, "notifications_by_role")
@property
@pulumi.getter
def phone(self) -> pulumi.Output[Optional[str]]:
"""
The security contact's phone number
"""
return pulumi.get(self, "phone")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
| 48.382813
| 429
| 0.687308
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.