hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3d2c530a340a5037b4af9c6e8892a90cd0dd50c3
| 275
|
py
|
Python
|
tests/support.py
|
icetemple/genrss
|
25d210a8b61feab0e57efc441c9a63f56989b075
|
[
"MIT"
] | 16
|
2019-07-23T20:43:39.000Z
|
2022-03-13T12:22:16.000Z
|
tests/support.py
|
icetemple/genrss
|
25d210a8b61feab0e57efc441c9a63f56989b075
|
[
"MIT"
] | 10
|
2019-07-25T16:39:25.000Z
|
2021-04-07T15:09:55.000Z
|
tests/support.py
|
icetemple/genrss
|
25d210a8b61feab0e57efc441c9a63f56989b075
|
[
"MIT"
] | 1
|
2019-07-25T19:33:07.000Z
|
2019-07-25T19:33:07.000Z
|
from genrss import GenRSS
def create_rss(**kwargs):
return GenRSS(title='SmartFridge', site_url='https://smartfridge.me/',
feed_url='https://smartfridge.me/rss.xml', **kwargs)
def create_item(feed, **kwargs):
feed.item(title='Recipe', **kwargs)
| 25
| 74
| 0.661818
|
71c1c3506c7cd72e6e1f8481f0455bdcefd92c88
| 28
|
py
|
Python
|
Python/Tests/TestData/Repl/Program.py
|
techkey/PTVS
|
8355e67eedd8e915ca49bd38a2f36172696fd903
|
[
"Apache-2.0"
] | 404
|
2019-05-07T02:21:57.000Z
|
2022-03-31T17:03:04.000Z
|
Python/Tests/TestData/Repl/Program.py
|
techkey/PTVS
|
8355e67eedd8e915ca49bd38a2f36172696fd903
|
[
"Apache-2.0"
] | 1,672
|
2019-05-06T21:09:38.000Z
|
2022-03-31T23:16:04.000Z
|
Python/Tests/TestData/Repl/Program.py
|
techkey/PTVS
|
8355e67eedd8e915ca49bd38a2f36172696fd903
|
[
"Apache-2.0"
] | 186
|
2019-05-13T03:17:37.000Z
|
2022-03-31T16:24:05.000Z
|
def f():
return 42
100
| 5.6
| 13
| 0.535714
|
11ecb78375fad3f21e998fed859629051c5be0e9
| 5,009
|
py
|
Python
|
python/cugraph/dask/community/louvain.py
|
drobison00/cugraph
|
0ad18e7be0e5b294b070f7062fd5a58b135b180f
|
[
"Apache-2.0"
] | null | null | null |
python/cugraph/dask/community/louvain.py
|
drobison00/cugraph
|
0ad18e7be0e5b294b070f7062fd5a58b135b180f
|
[
"Apache-2.0"
] | 1
|
2020-12-01T17:34:57.000Z
|
2020-12-01T17:34:57.000Z
|
python/cugraph/dask/community/louvain.py
|
drobison00/cugraph
|
0ad18e7be0e5b294b070f7062fd5a58b135b180f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import operator as op
from dask.distributed import wait, default_client
import cugraph.comms.comms as Comms
from cugraph.dask.common.input_utils import get_distributed_data
from cugraph.structure.shuffle import shuffle
from cugraph.dask.community import louvain_wrapper as c_mg_louvain
from cugraph.utilities.utils import is_cuda_version_less_than
import dask_cudf
def call_louvain(sID,
data,
num_verts,
num_edges,
vertex_partition_offsets,
sorted_by_degree,
max_level,
resolution):
wid = Comms.get_worker_id(sID)
handle = Comms.get_handle(sID)
return c_mg_louvain.louvain(data[0],
num_verts,
num_edges,
vertex_partition_offsets,
wid,
handle,
sorted_by_degree,
max_level,
resolution)
def louvain(input_graph, max_iter=100, resolution=1.0):
"""
Compute the modularity optimizing partition of the input graph using the
Louvain method on multiple GPUs
Examples
--------
>>> import cugraph.dask as dcg
>>> Comms.initialize(p2p=True)
>>> chunksize = dcg.get_chunksize(input_data_path)
>>> ddf = dask_cudf.read_csv('datasets/karate.csv', chunksize=chunksize,
delimiter=' ',
names=['src', 'dst', 'value'],
dtype=['int32', 'int32', 'float32'])
>>> dg = cugraph.Graph()
>>> dg.from_dask_cudf_edgelist(ddf, source='src', destination='dst',
edge_attr='value')
>>> parts, modularity_score = dcg.louvain(dg)
"""
# FIXME: finish docstring: describe parameters, etc.
# MG Louvain currently requires CUDA 10.2 or higher.
# FIXME: remove this check once RAPIDS drops support for CUDA < 10.2
if is_cuda_version_less_than((10, 2)):
raise NotImplementedError("Multi-GPU Louvain is not implemented for "
"this version of CUDA. Ensure CUDA version "
"10.2 or higher is installed.")
# FIXME: dask methods to populate graphs from edgelists are only present on
# DiGraph classes. Disable the Graph check for now and assume inputs are
# symmetric DiGraphs.
# if type(graph) is not Graph:
# raise Exception("input graph must be undirected")
client = default_client()
# Calling renumbering results in data that is sorted by degree
input_graph.compute_renumber_edge_list(transposed=False)
sorted_by_degree = True
(ddf,
num_verts,
partition_row_size,
partition_col_size,
vertex_partition_offsets) = shuffle(input_graph, transposed=False)
num_edges = len(ddf)
data = get_distributed_data(ddf)
futures = [client.submit(call_louvain,
Comms.get_session_id(),
wf[1],
num_verts,
num_edges,
vertex_partition_offsets,
sorted_by_degree,
max_iter,
resolution,
workers=[wf[0]])
for idx, wf in enumerate(data.worker_to_parts.items())]
wait(futures)
# futures is a list of Futures containing tuples of (DataFrame, mod_score),
# unpack using separate calls to client.submit with a callable to get
# individual items.
# FIXME: look into an alternate way (not returning a tuples, accessing
# tuples differently, etc.) since multiple client.submit() calls may not be
# optimal.
df_futures = [client.submit(op.getitem, f, 0) for f in futures]
mod_score_futures = [client.submit(op.getitem, f, 1) for f in futures]
ddf = dask_cudf.from_delayed(df_futures)
# Each worker should have computed the same mod_score
mod_score = mod_score_futures[0].result()
if input_graph.renumbered:
# MG renumbering is lazy, but it's safe to assume it's been called at
# this point if renumbered=True
ddf = input_graph.unrenumber(ddf, "vertex")
return (ddf, mod_score)
| 38.530769
| 79
| 0.607107
|
d1918ed3555f99574addc93c9e2a3e7481e1e40c
| 287
|
py
|
Python
|
src/package/archive.py
|
purmirl/ProbeArrow
|
7c2c7d0765130d61ccd0c998d0b305b660708d56
|
[
"BSD-3-Clause"
] | 5
|
2021-04-15T03:14:27.000Z
|
2021-11-11T06:38:49.000Z
|
src/package/archive.py
|
purmirl/ProbeArrow
|
7c2c7d0765130d61ccd0c998d0b305b660708d56
|
[
"BSD-3-Clause"
] | 5
|
2021-01-16T12:52:15.000Z
|
2021-06-29T14:43:54.000Z
|
src/package/archive.py
|
purmirl/ProbeArrow
|
7c2c7d0765130d61ccd0c998d0b305b660708d56
|
[
"BSD-3-Clause"
] | null | null | null |
"""
ProbeArrow
Copyright 2020~ PeTrA. All rights reserved.
. Python Project Structure Repository;
Probe Arrow Project by PeTrA. 2020~
ProbeArrow 1.0
Language : Python3.8.2
Library : Scapy2.4.3
Advanced Trace Route
------
@ archive.py
* utility data python code file
"""
| 16.882353
| 44
| 0.700348
|
f2fe4c07c8149657b70f0893a67b87fab7e9e25d
| 77,536
|
py
|
Python
|
fhirclient/r4models/elementdefinition.py
|
Healthedata1/Flask-PL
|
88a2f40ca430c4cbb9fbded7fc92fdc166ebb9f1
|
[
"MIT"
] | null | null | null |
fhirclient/r4models/elementdefinition.py
|
Healthedata1/Flask-PL
|
88a2f40ca430c4cbb9fbded7fc92fdc166ebb9f1
|
[
"MIT"
] | null | null | null |
fhirclient/r4models/elementdefinition.py
|
Healthedata1/Flask-PL
|
88a2f40ca430c4cbb9fbded7fc92fdc166ebb9f1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/ElementDefinition) on 2019-05-07.
# 2019, SMART Health IT.
from . import backboneelement
class ElementDefinition(backboneelement.BackboneElement):
""" Definition of an element in a resource or extension.
Captures constraints on each element within the resource, profile, or
extension.
"""
resource_type = "ElementDefinition"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.alias = None
""" Other names.
List of `str` items. """
self.base = None
""" Base definition information for tools.
Type `ElementDefinitionBase` (represented as `dict` in JSON). """
self.binding = None
""" ValueSet details if this is coded.
Type `ElementDefinitionBinding` (represented as `dict` in JSON). """
self.code = None
""" Corresponding codes in terminologies.
List of `Coding` items (represented as `dict` in JSON). """
self.comment = None
""" Comments about the use of this element.
Type `str`. """
self.condition = None
""" Reference to invariant about presence.
List of `str` items. """
self.constraint = None
""" Condition that must evaluate to true.
List of `ElementDefinitionConstraint` items (represented as `dict` in JSON). """
self.contentReference = None
""" Reference to definition of content for the element.
Type `str`. """
self.defaultValueAddress = None
""" Specified value if missing from instance.
Type `Address` (represented as `dict` in JSON). """
self.defaultValueAge = None
""" Specified value if missing from instance.
Type `Age` (represented as `dict` in JSON). """
self.defaultValueAnnotation = None
""" Specified value if missing from instance.
Type `Annotation` (represented as `dict` in JSON). """
self.defaultValueAttachment = None
""" Specified value if missing from instance.
Type `Attachment` (represented as `dict` in JSON). """
self.defaultValueBase64Binary = None
""" Specified value if missing from instance.
Type `str`. """
self.defaultValueBoolean = None
""" Specified value if missing from instance.
Type `bool`. """
self.defaultValueCanonical = None
""" Specified value if missing from instance.
Type `str`. """
self.defaultValueCode = None
""" Specified value if missing from instance.
Type `str`. """
self.defaultValueCodeableConcept = None
""" Specified value if missing from instance.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.defaultValueCoding = None
""" Specified value if missing from instance.
Type `Coding` (represented as `dict` in JSON). """
self.defaultValueContactDetail = None
""" Specified value if missing from instance.
Type `ContactDetail` (represented as `dict` in JSON). """
self.defaultValueContactPoint = None
""" Specified value if missing from instance.
Type `ContactPoint` (represented as `dict` in JSON). """
self.defaultValueContributor = None
""" Specified value if missing from instance.
Type `Contributor` (represented as `dict` in JSON). """
self.defaultValueCount = None
""" Specified value if missing from instance.
Type `Count` (represented as `dict` in JSON). """
self.defaultValueDataRequirement = None
""" Specified value if missing from instance.
Type `DataRequirement` (represented as `dict` in JSON). """
self.defaultValueDate = None
""" Specified value if missing from instance.
Type `FHIRDate` (represented as `str` in JSON). """
self.defaultValueDateTime = None
""" Specified value if missing from instance.
Type `FHIRDate` (represented as `str` in JSON). """
self.defaultValueDecimal = None
""" Specified value if missing from instance.
Type `float`. """
self.defaultValueDistance = None
""" Specified value if missing from instance.
Type `Distance` (represented as `dict` in JSON). """
self.defaultValueDosage = None
""" Specified value if missing from instance.
Type `Dosage` (represented as `dict` in JSON). """
self.defaultValueDuration = None
""" Specified value if missing from instance.
Type `Duration` (represented as `dict` in JSON). """
self.defaultValueExpression = None
""" Specified value if missing from instance.
Type `Expression` (represented as `dict` in JSON). """
self.defaultValueHumanName = None
""" Specified value if missing from instance.
Type `HumanName` (represented as `dict` in JSON). """
self.defaultValueId = None
""" Specified value if missing from instance.
Type `str`. """
self.defaultValueIdentifier = None
""" Specified value if missing from instance.
Type `Identifier` (represented as `dict` in JSON). """
self.defaultValueInstant = None
""" Specified value if missing from instance.
Type `FHIRDate` (represented as `str` in JSON). """
self.defaultValueInteger = None
""" Specified value if missing from instance.
Type `int`. """
self.defaultValueMarkdown = None
""" Specified value if missing from instance.
Type `str`. """
self.defaultValueMoney = None
""" Specified value if missing from instance.
Type `Money` (represented as `dict` in JSON). """
self.defaultValueOid = None
""" Specified value if missing from instance.
Type `str`. """
self.defaultValueParameterDefinition = None
""" Specified value if missing from instance.
Type `ParameterDefinition` (represented as `dict` in JSON). """
self.defaultValuePeriod = None
""" Specified value if missing from instance.
Type `Period` (represented as `dict` in JSON). """
self.defaultValuePositiveInt = None
""" Specified value if missing from instance.
Type `int`. """
self.defaultValueQuantity = None
""" Specified value if missing from instance.
Type `Quantity` (represented as `dict` in JSON). """
self.defaultValueRange = None
""" Specified value if missing from instance.
Type `Range` (represented as `dict` in JSON). """
self.defaultValueRatio = None
""" Specified value if missing from instance.
Type `Ratio` (represented as `dict` in JSON). """
self.defaultValueReference = None
""" Specified value if missing from instance.
Type `FHIRReference` (represented as `dict` in JSON). """
self.defaultValueRelatedArtifact = None
""" Specified value if missing from instance.
Type `RelatedArtifact` (represented as `dict` in JSON). """
self.defaultValueSampledData = None
""" Specified value if missing from instance.
Type `SampledData` (represented as `dict` in JSON). """
self.defaultValueSignature = None
""" Specified value if missing from instance.
Type `Signature` (represented as `dict` in JSON). """
self.defaultValueString = None
""" Specified value if missing from instance.
Type `str`. """
self.defaultValueTime = None
""" Specified value if missing from instance.
Type `FHIRDate` (represented as `str` in JSON). """
self.defaultValueTiming = None
""" Specified value if missing from instance.
Type `Timing` (represented as `dict` in JSON). """
self.defaultValueTriggerDefinition = None
""" Specified value if missing from instance.
Type `TriggerDefinition` (represented as `dict` in JSON). """
self.defaultValueUnsignedInt = None
""" Specified value if missing from instance.
Type `int`. """
self.defaultValueUri = None
""" Specified value if missing from instance.
Type `str`. """
self.defaultValueUrl = None
""" Specified value if missing from instance.
Type `str`. """
self.defaultValueUsageContext = None
""" Specified value if missing from instance.
Type `UsageContext` (represented as `dict` in JSON). """
self.defaultValueUuid = None
""" Specified value if missing from instance.
Type `str`. """
self.definition = None
""" Full formal definition as narrative text.
Type `str`. """
self.example = None
""" Example value (as defined for type).
List of `ElementDefinitionExample` items (represented as `dict` in JSON). """
self.fixedAddress = None
""" Value must be exactly this.
Type `Address` (represented as `dict` in JSON). """
self.fixedAge = None
""" Value must be exactly this.
Type `Age` (represented as `dict` in JSON). """
self.fixedAnnotation = None
""" Value must be exactly this.
Type `Annotation` (represented as `dict` in JSON). """
self.fixedAttachment = None
""" Value must be exactly this.
Type `Attachment` (represented as `dict` in JSON). """
self.fixedBase64Binary = None
""" Value must be exactly this.
Type `str`. """
self.fixedBoolean = None
""" Value must be exactly this.
Type `bool`. """
self.fixedCanonical = None
""" Value must be exactly this.
Type `str`. """
self.fixedCode = None
""" Value must be exactly this.
Type `str`. """
self.fixedCodeableConcept = None
""" Value must be exactly this.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.fixedCoding = None
""" Value must be exactly this.
Type `Coding` (represented as `dict` in JSON). """
self.fixedContactDetail = None
""" Value must be exactly this.
Type `ContactDetail` (represented as `dict` in JSON). """
self.fixedContactPoint = None
""" Value must be exactly this.
Type `ContactPoint` (represented as `dict` in JSON). """
self.fixedContributor = None
""" Value must be exactly this.
Type `Contributor` (represented as `dict` in JSON). """
self.fixedCount = None
""" Value must be exactly this.
Type `Count` (represented as `dict` in JSON). """
self.fixedDataRequirement = None
""" Value must be exactly this.
Type `DataRequirement` (represented as `dict` in JSON). """
self.fixedDate = None
""" Value must be exactly this.
Type `FHIRDate` (represented as `str` in JSON). """
self.fixedDateTime = None
""" Value must be exactly this.
Type `FHIRDate` (represented as `str` in JSON). """
self.fixedDecimal = None
""" Value must be exactly this.
Type `float`. """
self.fixedDistance = None
""" Value must be exactly this.
Type `Distance` (represented as `dict` in JSON). """
self.fixedDosage = None
""" Value must be exactly this.
Type `Dosage` (represented as `dict` in JSON). """
self.fixedDuration = None
""" Value must be exactly this.
Type `Duration` (represented as `dict` in JSON). """
self.fixedExpression = None
""" Value must be exactly this.
Type `Expression` (represented as `dict` in JSON). """
self.fixedHumanName = None
""" Value must be exactly this.
Type `HumanName` (represented as `dict` in JSON). """
self.fixedId = None
""" Value must be exactly this.
Type `str`. """
self.fixedIdentifier = None
""" Value must be exactly this.
Type `Identifier` (represented as `dict` in JSON). """
self.fixedInstant = None
""" Value must be exactly this.
Type `FHIRDate` (represented as `str` in JSON). """
self.fixedInteger = None
""" Value must be exactly this.
Type `int`. """
self.fixedMarkdown = None
""" Value must be exactly this.
Type `str`. """
self.fixedMoney = None
""" Value must be exactly this.
Type `Money` (represented as `dict` in JSON). """
self.fixedOid = None
""" Value must be exactly this.
Type `str`. """
self.fixedParameterDefinition = None
""" Value must be exactly this.
Type `ParameterDefinition` (represented as `dict` in JSON). """
self.fixedPeriod = None
""" Value must be exactly this.
Type `Period` (represented as `dict` in JSON). """
self.fixedPositiveInt = None
""" Value must be exactly this.
Type `int`. """
self.fixedQuantity = None
""" Value must be exactly this.
Type `Quantity` (represented as `dict` in JSON). """
self.fixedRange = None
""" Value must be exactly this.
Type `Range` (represented as `dict` in JSON). """
self.fixedRatio = None
""" Value must be exactly this.
Type `Ratio` (represented as `dict` in JSON). """
self.fixedReference = None
""" Value must be exactly this.
Type `FHIRReference` (represented as `dict` in JSON). """
self.fixedRelatedArtifact = None
""" Value must be exactly this.
Type `RelatedArtifact` (represented as `dict` in JSON). """
self.fixedSampledData = None
""" Value must be exactly this.
Type `SampledData` (represented as `dict` in JSON). """
self.fixedSignature = None
""" Value must be exactly this.
Type `Signature` (represented as `dict` in JSON). """
self.fixedString = None
""" Value must be exactly this.
Type `str`. """
self.fixedTime = None
""" Value must be exactly this.
Type `FHIRDate` (represented as `str` in JSON). """
self.fixedTiming = None
""" Value must be exactly this.
Type `Timing` (represented as `dict` in JSON). """
self.fixedTriggerDefinition = None
""" Value must be exactly this.
Type `TriggerDefinition` (represented as `dict` in JSON). """
self.fixedUnsignedInt = None
""" Value must be exactly this.
Type `int`. """
self.fixedUri = None
""" Value must be exactly this.
Type `str`. """
self.fixedUrl = None
""" Value must be exactly this.
Type `str`. """
self.fixedUsageContext = None
""" Value must be exactly this.
Type `UsageContext` (represented as `dict` in JSON). """
self.fixedUuid = None
""" Value must be exactly this.
Type `str`. """
self.isModifier = None
""" If this modifies the meaning of other elements.
Type `bool`. """
self.isModifierReason = None
""" Reason that this element is marked as a modifier.
Type `str`. """
self.isSummary = None
""" Include when _summary = true?.
Type `bool`. """
self.label = None
""" Name for element to display with or prompt for element.
Type `str`. """
self.mapping = None
""" Map element to another set of definitions.
List of `ElementDefinitionMapping` items (represented as `dict` in JSON). """
self.max = None
""" Maximum Cardinality (a number or *).
Type `str`. """
self.maxLength = None
""" Max length for strings.
Type `int`. """
self.maxValueDate = None
""" Maximum Allowed Value (for some types).
Type `FHIRDate` (represented as `str` in JSON). """
self.maxValueDateTime = None
""" Maximum Allowed Value (for some types).
Type `FHIRDate` (represented as `str` in JSON). """
self.maxValueDecimal = None
""" Maximum Allowed Value (for some types).
Type `float`. """
self.maxValueInstant = None
""" Maximum Allowed Value (for some types).
Type `FHIRDate` (represented as `str` in JSON). """
self.maxValueInteger = None
""" Maximum Allowed Value (for some types).
Type `int`. """
self.maxValuePositiveInt = None
""" Maximum Allowed Value (for some types).
Type `int`. """
self.maxValueQuantity = None
""" Maximum Allowed Value (for some types).
Type `Quantity` (represented as `dict` in JSON). """
self.maxValueTime = None
""" Maximum Allowed Value (for some types).
Type `FHIRDate` (represented as `str` in JSON). """
self.maxValueUnsignedInt = None
""" Maximum Allowed Value (for some types).
Type `int`. """
self.meaningWhenMissing = None
""" Implicit meaning when this element is missing.
Type `str`. """
self.min = None
""" Minimum Cardinality.
Type `int`. """
self.minValueDate = None
""" Minimum Allowed Value (for some types).
Type `FHIRDate` (represented as `str` in JSON). """
self.minValueDateTime = None
""" Minimum Allowed Value (for some types).
Type `FHIRDate` (represented as `str` in JSON). """
self.minValueDecimal = None
""" Minimum Allowed Value (for some types).
Type `float`. """
self.minValueInstant = None
""" Minimum Allowed Value (for some types).
Type `FHIRDate` (represented as `str` in JSON). """
self.minValueInteger = None
""" Minimum Allowed Value (for some types).
Type `int`. """
self.minValuePositiveInt = None
""" Minimum Allowed Value (for some types).
Type `int`. """
self.minValueQuantity = None
""" Minimum Allowed Value (for some types).
Type `Quantity` (represented as `dict` in JSON). """
self.minValueTime = None
""" Minimum Allowed Value (for some types).
Type `FHIRDate` (represented as `str` in JSON). """
self.minValueUnsignedInt = None
""" Minimum Allowed Value (for some types).
Type `int`. """
self.mustSupport = None
""" If the element must be supported.
Type `bool`. """
self.orderMeaning = None
""" What the order of the elements means.
Type `str`. """
self.path = None
""" Path of the element in the hierarchy of elements.
Type `str`. """
self.patternAddress = None
""" Value must have at least these property values.
Type `Address` (represented as `dict` in JSON). """
self.patternAge = None
""" Value must have at least these property values.
Type `Age` (represented as `dict` in JSON). """
self.patternAnnotation = None
""" Value must have at least these property values.
Type `Annotation` (represented as `dict` in JSON). """
self.patternAttachment = None
""" Value must have at least these property values.
Type `Attachment` (represented as `dict` in JSON). """
self.patternBase64Binary = None
""" Value must have at least these property values.
Type `str`. """
self.patternBoolean = None
""" Value must have at least these property values.
Type `bool`. """
self.patternCanonical = None
""" Value must have at least these property values.
Type `str`. """
self.patternCode = None
""" Value must have at least these property values.
Type `str`. """
self.patternCodeableConcept = None
""" Value must have at least these property values.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.patternCoding = None
""" Value must have at least these property values.
Type `Coding` (represented as `dict` in JSON). """
self.patternContactDetail = None
""" Value must have at least these property values.
Type `ContactDetail` (represented as `dict` in JSON). """
self.patternContactPoint = None
""" Value must have at least these property values.
Type `ContactPoint` (represented as `dict` in JSON). """
self.patternContributor = None
""" Value must have at least these property values.
Type `Contributor` (represented as `dict` in JSON). """
self.patternCount = None
""" Value must have at least these property values.
Type `Count` (represented as `dict` in JSON). """
self.patternDataRequirement = None
""" Value must have at least these property values.
Type `DataRequirement` (represented as `dict` in JSON). """
self.patternDate = None
""" Value must have at least these property values.
Type `FHIRDate` (represented as `str` in JSON). """
self.patternDateTime = None
""" Value must have at least these property values.
Type `FHIRDate` (represented as `str` in JSON). """
self.patternDecimal = None
""" Value must have at least these property values.
Type `float`. """
self.patternDistance = None
""" Value must have at least these property values.
Type `Distance` (represented as `dict` in JSON). """
self.patternDosage = None
""" Value must have at least these property values.
Type `Dosage` (represented as `dict` in JSON). """
self.patternDuration = None
""" Value must have at least these property values.
Type `Duration` (represented as `dict` in JSON). """
self.patternExpression = None
""" Value must have at least these property values.
Type `Expression` (represented as `dict` in JSON). """
self.patternHumanName = None
""" Value must have at least these property values.
Type `HumanName` (represented as `dict` in JSON). """
self.patternId = None
""" Value must have at least these property values.
Type `str`. """
self.patternIdentifier = None
""" Value must have at least these property values.
Type `Identifier` (represented as `dict` in JSON). """
self.patternInstant = None
""" Value must have at least these property values.
Type `FHIRDate` (represented as `str` in JSON). """
self.patternInteger = None
""" Value must have at least these property values.
Type `int`. """
self.patternMarkdown = None
""" Value must have at least these property values.
Type `str`. """
self.patternMoney = None
""" Value must have at least these property values.
Type `Money` (represented as `dict` in JSON). """
self.patternOid = None
""" Value must have at least these property values.
Type `str`. """
self.patternParameterDefinition = None
""" Value must have at least these property values.
Type `ParameterDefinition` (represented as `dict` in JSON). """
self.patternPeriod = None
""" Value must have at least these property values.
Type `Period` (represented as `dict` in JSON). """
self.patternPositiveInt = None
""" Value must have at least these property values.
Type `int`. """
self.patternQuantity = None
""" Value must have at least these property values.
Type `Quantity` (represented as `dict` in JSON). """
self.patternRange = None
""" Value must have at least these property values.
Type `Range` (represented as `dict` in JSON). """
self.patternRatio = None
""" Value must have at least these property values.
Type `Ratio` (represented as `dict` in JSON). """
self.patternReference = None
""" Value must have at least these property values.
Type `FHIRReference` (represented as `dict` in JSON). """
self.patternRelatedArtifact = None
""" Value must have at least these property values.
Type `RelatedArtifact` (represented as `dict` in JSON). """
self.patternSampledData = None
""" Value must have at least these property values.
Type `SampledData` (represented as `dict` in JSON). """
self.patternSignature = None
""" Value must have at least these property values.
Type `Signature` (represented as `dict` in JSON). """
self.patternString = None
""" Value must have at least these property values.
Type `str`. """
self.patternTime = None
""" Value must have at least these property values.
Type `FHIRDate` (represented as `str` in JSON). """
self.patternTiming = None
""" Value must have at least these property values.
Type `Timing` (represented as `dict` in JSON). """
self.patternTriggerDefinition = None
""" Value must have at least these property values.
Type `TriggerDefinition` (represented as `dict` in JSON). """
self.patternUnsignedInt = None
""" Value must have at least these property values.
Type `int`. """
self.patternUri = None
""" Value must have at least these property values.
Type `str`. """
self.patternUrl = None
""" Value must have at least these property values.
Type `str`. """
self.patternUsageContext = None
""" Value must have at least these property values.
Type `UsageContext` (represented as `dict` in JSON). """
self.patternUuid = None
""" Value must have at least these property values.
Type `str`. """
self.representation = None
""" xmlAttr | xmlText | typeAttr | cdaText | xhtml.
List of `str` items. """
self.requirements = None
""" Why this resource has been created.
Type `str`. """
self.short = None
""" Concise definition for space-constrained presentation.
Type `str`. """
self.sliceIsConstraining = None
""" If this slice definition constrains an inherited slice definition
(or not).
Type `bool`. """
self.sliceName = None
""" Name for this particular element (in a set of slices).
Type `str`. """
self.slicing = None
""" This element is sliced - slices follow.
Type `ElementDefinitionSlicing` (represented as `dict` in JSON). """
self.type = None
""" Data type and Profile for this element.
List of `ElementDefinitionType` items (represented as `dict` in JSON). """
super(ElementDefinition, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ElementDefinition, self).elementProperties()
js.extend([
("alias", "alias", str, True, None, False),
("base", "base", ElementDefinitionBase, False, None, False),
("binding", "binding", ElementDefinitionBinding, False, None, False),
("code", "code", coding.Coding, True, None, False),
("comment", "comment", str, False, None, False),
("condition", "condition", str, True, None, False),
("constraint", "constraint", ElementDefinitionConstraint, True, None, False),
("contentReference", "contentReference", str, False, None, False),
("defaultValueAddress", "defaultValueAddress", address.Address, False, "defaultValue", False),
("defaultValueAge", "defaultValueAge", age.Age, False, "defaultValue", False),
("defaultValueAnnotation", "defaultValueAnnotation", annotation.Annotation, False, "defaultValue", False),
("defaultValueAttachment", "defaultValueAttachment", attachment.Attachment, False, "defaultValue", False),
("defaultValueBase64Binary", "defaultValueBase64Binary", str, False, "defaultValue", False),
("defaultValueBoolean", "defaultValueBoolean", bool, False, "defaultValue", False),
("defaultValueCanonical", "defaultValueCanonical", str, False, "defaultValue", False),
("defaultValueCode", "defaultValueCode", str, False, "defaultValue", False),
("defaultValueCodeableConcept", "defaultValueCodeableConcept", codeableconcept.CodeableConcept, False, "defaultValue", False),
("defaultValueCoding", "defaultValueCoding", coding.Coding, False, "defaultValue", False),
("defaultValueContactDetail", "defaultValueContactDetail", contactdetail.ContactDetail, False, "defaultValue", False),
("defaultValueContactPoint", "defaultValueContactPoint", contactpoint.ContactPoint, False, "defaultValue", False),
("defaultValueContributor", "defaultValueContributor", contributor.Contributor, False, "defaultValue", False),
("defaultValueCount", "defaultValueCount", count.Count, False, "defaultValue", False),
("defaultValueDataRequirement", "defaultValueDataRequirement", datarequirement.DataRequirement, False, "defaultValue", False),
("defaultValueDate", "defaultValueDate", fhirdate.FHIRDate, False, "defaultValue", False),
("defaultValueDateTime", "defaultValueDateTime", fhirdate.FHIRDate, False, "defaultValue", False),
("defaultValueDecimal", "defaultValueDecimal", float, False, "defaultValue", False),
("defaultValueDistance", "defaultValueDistance", distance.Distance, False, "defaultValue", False),
("defaultValueDosage", "defaultValueDosage", dosage.Dosage, False, "defaultValue", False),
("defaultValueDuration", "defaultValueDuration", duration.Duration, False, "defaultValue", False),
("defaultValueExpression", "defaultValueExpression", expression.Expression, False, "defaultValue", False),
("defaultValueHumanName", "defaultValueHumanName", humanname.HumanName, False, "defaultValue", False),
("defaultValueId", "defaultValueId", str, False, "defaultValue", False),
("defaultValueIdentifier", "defaultValueIdentifier", identifier.Identifier, False, "defaultValue", False),
("defaultValueInstant", "defaultValueInstant", fhirdate.FHIRDate, False, "defaultValue", False),
("defaultValueInteger", "defaultValueInteger", int, False, "defaultValue", False),
("defaultValueMarkdown", "defaultValueMarkdown", str, False, "defaultValue", False),
("defaultValueMoney", "defaultValueMoney", money.Money, False, "defaultValue", False),
("defaultValueOid", "defaultValueOid", str, False, "defaultValue", False),
("defaultValueParameterDefinition", "defaultValueParameterDefinition", parameterdefinition.ParameterDefinition, False, "defaultValue", False),
("defaultValuePeriod", "defaultValuePeriod", period.Period, False, "defaultValue", False),
("defaultValuePositiveInt", "defaultValuePositiveInt", int, False, "defaultValue", False),
("defaultValueQuantity", "defaultValueQuantity", quantity.Quantity, False, "defaultValue", False),
("defaultValueRange", "defaultValueRange", range.Range, False, "defaultValue", False),
("defaultValueRatio", "defaultValueRatio", ratio.Ratio, False, "defaultValue", False),
("defaultValueReference", "defaultValueReference", fhirreference.FHIRReference, False, "defaultValue", False),
("defaultValueRelatedArtifact", "defaultValueRelatedArtifact", relatedartifact.RelatedArtifact, False, "defaultValue", False),
("defaultValueSampledData", "defaultValueSampledData", sampleddata.SampledData, False, "defaultValue", False),
("defaultValueSignature", "defaultValueSignature", signature.Signature, False, "defaultValue", False),
("defaultValueString", "defaultValueString", str, False, "defaultValue", False),
("defaultValueTime", "defaultValueTime", fhirdate.FHIRDate, False, "defaultValue", False),
("defaultValueTiming", "defaultValueTiming", timing.Timing, False, "defaultValue", False),
("defaultValueTriggerDefinition", "defaultValueTriggerDefinition", triggerdefinition.TriggerDefinition, False, "defaultValue", False),
("defaultValueUnsignedInt", "defaultValueUnsignedInt", int, False, "defaultValue", False),
("defaultValueUri", "defaultValueUri", str, False, "defaultValue", False),
("defaultValueUrl", "defaultValueUrl", str, False, "defaultValue", False),
("defaultValueUsageContext", "defaultValueUsageContext", usagecontext.UsageContext, False, "defaultValue", False),
("defaultValueUuid", "defaultValueUuid", str, False, "defaultValue", False),
("definition", "definition", str, False, None, False),
("example", "example", ElementDefinitionExample, True, None, False),
("fixedAddress", "fixedAddress", address.Address, False, "fixed", False),
("fixedAge", "fixedAge", age.Age, False, "fixed", False),
("fixedAnnotation", "fixedAnnotation", annotation.Annotation, False, "fixed", False),
("fixedAttachment", "fixedAttachment", attachment.Attachment, False, "fixed", False),
("fixedBase64Binary", "fixedBase64Binary", str, False, "fixed", False),
("fixedBoolean", "fixedBoolean", bool, False, "fixed", False),
("fixedCanonical", "fixedCanonical", str, False, "fixed", False),
("fixedCode", "fixedCode", str, False, "fixed", False),
("fixedCodeableConcept", "fixedCodeableConcept", codeableconcept.CodeableConcept, False, "fixed", False),
("fixedCoding", "fixedCoding", coding.Coding, False, "fixed", False),
("fixedContactDetail", "fixedContactDetail", contactdetail.ContactDetail, False, "fixed", False),
("fixedContactPoint", "fixedContactPoint", contactpoint.ContactPoint, False, "fixed", False),
("fixedContributor", "fixedContributor", contributor.Contributor, False, "fixed", False),
("fixedCount", "fixedCount", count.Count, False, "fixed", False),
("fixedDataRequirement", "fixedDataRequirement", datarequirement.DataRequirement, False, "fixed", False),
("fixedDate", "fixedDate", fhirdate.FHIRDate, False, "fixed", False),
("fixedDateTime", "fixedDateTime", fhirdate.FHIRDate, False, "fixed", False),
("fixedDecimal", "fixedDecimal", float, False, "fixed", False),
("fixedDistance", "fixedDistance", distance.Distance, False, "fixed", False),
("fixedDosage", "fixedDosage", dosage.Dosage, False, "fixed", False),
("fixedDuration", "fixedDuration", duration.Duration, False, "fixed", False),
("fixedExpression", "fixedExpression", expression.Expression, False, "fixed", False),
("fixedHumanName", "fixedHumanName", humanname.HumanName, False, "fixed", False),
("fixedId", "fixedId", str, False, "fixed", False),
("fixedIdentifier", "fixedIdentifier", identifier.Identifier, False, "fixed", False),
("fixedInstant", "fixedInstant", fhirdate.FHIRDate, False, "fixed", False),
("fixedInteger", "fixedInteger", int, False, "fixed", False),
("fixedMarkdown", "fixedMarkdown", str, False, "fixed", False),
("fixedMoney", "fixedMoney", money.Money, False, "fixed", False),
("fixedOid", "fixedOid", str, False, "fixed", False),
("fixedParameterDefinition", "fixedParameterDefinition", parameterdefinition.ParameterDefinition, False, "fixed", False),
("fixedPeriod", "fixedPeriod", period.Period, False, "fixed", False),
("fixedPositiveInt", "fixedPositiveInt", int, False, "fixed", False),
("fixedQuantity", "fixedQuantity", quantity.Quantity, False, "fixed", False),
("fixedRange", "fixedRange", range.Range, False, "fixed", False),
("fixedRatio", "fixedRatio", ratio.Ratio, False, "fixed", False),
("fixedReference", "fixedReference", fhirreference.FHIRReference, False, "fixed", False),
("fixedRelatedArtifact", "fixedRelatedArtifact", relatedartifact.RelatedArtifact, False, "fixed", False),
("fixedSampledData", "fixedSampledData", sampleddata.SampledData, False, "fixed", False),
("fixedSignature", "fixedSignature", signature.Signature, False, "fixed", False),
("fixedString", "fixedString", str, False, "fixed", False),
("fixedTime", "fixedTime", fhirdate.FHIRDate, False, "fixed", False),
("fixedTiming", "fixedTiming", timing.Timing, False, "fixed", False),
("fixedTriggerDefinition", "fixedTriggerDefinition", triggerdefinition.TriggerDefinition, False, "fixed", False),
("fixedUnsignedInt", "fixedUnsignedInt", int, False, "fixed", False),
("fixedUri", "fixedUri", str, False, "fixed", False),
("fixedUrl", "fixedUrl", str, False, "fixed", False),
("fixedUsageContext", "fixedUsageContext", usagecontext.UsageContext, False, "fixed", False),
("fixedUuid", "fixedUuid", str, False, "fixed", False),
("isModifier", "isModifier", bool, False, None, False),
("isModifierReason", "isModifierReason", str, False, None, False),
("isSummary", "isSummary", bool, False, None, False),
("label", "label", str, False, None, False),
("mapping", "mapping", ElementDefinitionMapping, True, None, False),
("max", "max", str, False, None, False),
("maxLength", "maxLength", int, False, None, False),
("maxValueDate", "maxValueDate", fhirdate.FHIRDate, False, "maxValue", False),
("maxValueDateTime", "maxValueDateTime", fhirdate.FHIRDate, False, "maxValue", False),
("maxValueDecimal", "maxValueDecimal", float, False, "maxValue", False),
("maxValueInstant", "maxValueInstant", fhirdate.FHIRDate, False, "maxValue", False),
("maxValueInteger", "maxValueInteger", int, False, "maxValue", False),
("maxValuePositiveInt", "maxValuePositiveInt", int, False, "maxValue", False),
("maxValueQuantity", "maxValueQuantity", quantity.Quantity, False, "maxValue", False),
("maxValueTime", "maxValueTime", fhirdate.FHIRDate, False, "maxValue", False),
("maxValueUnsignedInt", "maxValueUnsignedInt", int, False, "maxValue", False),
("meaningWhenMissing", "meaningWhenMissing", str, False, None, False),
("min", "min", int, False, None, False),
("minValueDate", "minValueDate", fhirdate.FHIRDate, False, "minValue", False),
("minValueDateTime", "minValueDateTime", fhirdate.FHIRDate, False, "minValue", False),
("minValueDecimal", "minValueDecimal", float, False, "minValue", False),
("minValueInstant", "minValueInstant", fhirdate.FHIRDate, False, "minValue", False),
("minValueInteger", "minValueInteger", int, False, "minValue", False),
("minValuePositiveInt", "minValuePositiveInt", int, False, "minValue", False),
("minValueQuantity", "minValueQuantity", quantity.Quantity, False, "minValue", False),
("minValueTime", "minValueTime", fhirdate.FHIRDate, False, "minValue", False),
("minValueUnsignedInt", "minValueUnsignedInt", int, False, "minValue", False),
("mustSupport", "mustSupport", bool, False, None, False),
("orderMeaning", "orderMeaning", str, False, None, False),
("path", "path", str, False, None, True),
("patternAddress", "patternAddress", address.Address, False, "pattern", False),
("patternAge", "patternAge", age.Age, False, "pattern", False),
("patternAnnotation", "patternAnnotation", annotation.Annotation, False, "pattern", False),
("patternAttachment", "patternAttachment", attachment.Attachment, False, "pattern", False),
("patternBase64Binary", "patternBase64Binary", str, False, "pattern", False),
("patternBoolean", "patternBoolean", bool, False, "pattern", False),
("patternCanonical", "patternCanonical", str, False, "pattern", False),
("patternCode", "patternCode", str, False, "pattern", False),
("patternCodeableConcept", "patternCodeableConcept", codeableconcept.CodeableConcept, False, "pattern", False),
("patternCoding", "patternCoding", coding.Coding, False, "pattern", False),
("patternContactDetail", "patternContactDetail", contactdetail.ContactDetail, False, "pattern", False),
("patternContactPoint", "patternContactPoint", contactpoint.ContactPoint, False, "pattern", False),
("patternContributor", "patternContributor", contributor.Contributor, False, "pattern", False),
("patternCount", "patternCount", count.Count, False, "pattern", False),
("patternDataRequirement", "patternDataRequirement", datarequirement.DataRequirement, False, "pattern", False),
("patternDate", "patternDate", fhirdate.FHIRDate, False, "pattern", False),
("patternDateTime", "patternDateTime", fhirdate.FHIRDate, False, "pattern", False),
("patternDecimal", "patternDecimal", float, False, "pattern", False),
("patternDistance", "patternDistance", distance.Distance, False, "pattern", False),
("patternDosage", "patternDosage", dosage.Dosage, False, "pattern", False),
("patternDuration", "patternDuration", duration.Duration, False, "pattern", False),
("patternExpression", "patternExpression", expression.Expression, False, "pattern", False),
("patternHumanName", "patternHumanName", humanname.HumanName, False, "pattern", False),
("patternId", "patternId", str, False, "pattern", False),
("patternIdentifier", "patternIdentifier", identifier.Identifier, False, "pattern", False),
("patternInstant", "patternInstant", fhirdate.FHIRDate, False, "pattern", False),
("patternInteger", "patternInteger", int, False, "pattern", False),
("patternMarkdown", "patternMarkdown", str, False, "pattern", False),
("patternMoney", "patternMoney", money.Money, False, "pattern", False),
("patternOid", "patternOid", str, False, "pattern", False),
("patternParameterDefinition", "patternParameterDefinition", parameterdefinition.ParameterDefinition, False, "pattern", False),
("patternPeriod", "patternPeriod", period.Period, False, "pattern", False),
("patternPositiveInt", "patternPositiveInt", int, False, "pattern", False),
("patternQuantity", "patternQuantity", quantity.Quantity, False, "pattern", False),
("patternRange", "patternRange", range.Range, False, "pattern", False),
("patternRatio", "patternRatio", ratio.Ratio, False, "pattern", False),
("patternReference", "patternReference", fhirreference.FHIRReference, False, "pattern", False),
("patternRelatedArtifact", "patternRelatedArtifact", relatedartifact.RelatedArtifact, False, "pattern", False),
("patternSampledData", "patternSampledData", sampleddata.SampledData, False, "pattern", False),
("patternSignature", "patternSignature", signature.Signature, False, "pattern", False),
("patternString", "patternString", str, False, "pattern", False),
("patternTime", "patternTime", fhirdate.FHIRDate, False, "pattern", False),
("patternTiming", "patternTiming", timing.Timing, False, "pattern", False),
("patternTriggerDefinition", "patternTriggerDefinition", triggerdefinition.TriggerDefinition, False, "pattern", False),
("patternUnsignedInt", "patternUnsignedInt", int, False, "pattern", False),
("patternUri", "patternUri", str, False, "pattern", False),
("patternUrl", "patternUrl", str, False, "pattern", False),
("patternUsageContext", "patternUsageContext", usagecontext.UsageContext, False, "pattern", False),
("patternUuid", "patternUuid", str, False, "pattern", False),
("representation", "representation", str, True, None, False),
("requirements", "requirements", str, False, None, False),
("short", "short", str, False, None, False),
("sliceIsConstraining", "sliceIsConstraining", bool, False, None, False),
("sliceName", "sliceName", str, False, None, False),
("slicing", "slicing", ElementDefinitionSlicing, False, None, False),
("type", "type", ElementDefinitionType, True, None, False),
])
return js
from . import element
class ElementDefinitionBase(element.Element):
""" Base definition information for tools.
Information about the base definition of the element, provided to make it
unnecessary for tools to trace the deviation of the element through the
derived and related profiles. When the element definition is not the
original definition of an element - i.g. either in a constraint on another
type, or for elements from a super type in a snap shot - then the
information in provided in the element definition may be different to the
base definition. On the original definition of the element, it will be
same.
"""
resource_type = "ElementDefinitionBase"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.max = None
""" Max cardinality of the base element.
Type `str`. """
self.min = None
""" Min cardinality of the base element.
Type `int`. """
self.path = None
""" Path that identifies the base element.
Type `str`. """
super(ElementDefinitionBase, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ElementDefinitionBase, self).elementProperties()
js.extend([
("max", "max", str, False, None, True),
("min", "min", int, False, None, True),
("path", "path", str, False, None, True),
])
return js
class ElementDefinitionBinding(element.Element):
""" ValueSet details if this is coded.
Binds to a value set if this element is coded (code, Coding,
CodeableConcept, Quantity), or the data types (string, uri).
"""
resource_type = "ElementDefinitionBinding"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.description = None
""" Human explanation of the value set.
Type `str`. """
self.strength = None
""" required | extensible | preferred | example.
Type `str`. """
self.valueSet = None
""" Source of value set.
Type `str`. """
super(ElementDefinitionBinding, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ElementDefinitionBinding, self).elementProperties()
js.extend([
("description", "description", str, False, None, False),
("strength", "strength", str, False, None, True),
("valueSet", "valueSet", str, False, None, False),
])
return js
class ElementDefinitionConstraint(element.Element):
""" Condition that must evaluate to true.
Formal constraints such as co-occurrence and other constraints that can be
computationally evaluated within the context of the instance.
"""
resource_type = "ElementDefinitionConstraint"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.expression = None
""" FHIRPath expression of constraint.
Type `str`. """
self.human = None
""" Human description of constraint.
Type `str`. """
self.key = None
""" Target of 'condition' reference above.
Type `str`. """
self.requirements = None
""" Why this constraint is necessary or appropriate.
Type `str`. """
self.severity = None
""" error | warning.
Type `str`. """
self.source = None
""" Reference to original source of constraint.
Type `str`. """
self.xpath = None
""" XPath expression of constraint.
Type `str`. """
super(ElementDefinitionConstraint, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ElementDefinitionConstraint, self).elementProperties()
js.extend([
("expression", "expression", str, False, None, False),
("human", "human", str, False, None, True),
("key", "key", str, False, None, True),
("requirements", "requirements", str, False, None, False),
("severity", "severity", str, False, None, True),
("source", "source", str, False, None, False),
("xpath", "xpath", str, False, None, False),
])
return js
class ElementDefinitionExample(element.Element):
""" Example value (as defined for type).
A sample value for this element demonstrating the type of information that
would typically be found in the element.
"""
resource_type = "ElementDefinitionExample"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.label = None
""" Describes the purpose of this example.
Type `str`. """
self.valueAddress = None
""" Value of Example (one of allowed types).
Type `Address` (represented as `dict` in JSON). """
self.valueAge = None
""" Value of Example (one of allowed types).
Type `Age` (represented as `dict` in JSON). """
self.valueAnnotation = None
""" Value of Example (one of allowed types).
Type `Annotation` (represented as `dict` in JSON). """
self.valueAttachment = None
""" Value of Example (one of allowed types).
Type `Attachment` (represented as `dict` in JSON). """
self.valueBase64Binary = None
""" Value of Example (one of allowed types).
Type `str`. """
self.valueBoolean = None
""" Value of Example (one of allowed types).
Type `bool`. """
self.valueCanonical = None
""" Value of Example (one of allowed types).
Type `str`. """
self.valueCode = None
""" Value of Example (one of allowed types).
Type `str`. """
self.valueCodeableConcept = None
""" Value of Example (one of allowed types).
Type `CodeableConcept` (represented as `dict` in JSON). """
self.valueCoding = None
""" Value of Example (one of allowed types).
Type `Coding` (represented as `dict` in JSON). """
self.valueContactDetail = None
""" Value of Example (one of allowed types).
Type `ContactDetail` (represented as `dict` in JSON). """
self.valueContactPoint = None
""" Value of Example (one of allowed types).
Type `ContactPoint` (represented as `dict` in JSON). """
self.valueContributor = None
""" Value of Example (one of allowed types).
Type `Contributor` (represented as `dict` in JSON). """
self.valueCount = None
""" Value of Example (one of allowed types).
Type `Count` (represented as `dict` in JSON). """
self.valueDataRequirement = None
""" Value of Example (one of allowed types).
Type `DataRequirement` (represented as `dict` in JSON). """
self.valueDate = None
""" Value of Example (one of allowed types).
Type `FHIRDate` (represented as `str` in JSON). """
self.valueDateTime = None
""" Value of Example (one of allowed types).
Type `FHIRDate` (represented as `str` in JSON). """
self.valueDecimal = None
""" Value of Example (one of allowed types).
Type `float`. """
self.valueDistance = None
""" Value of Example (one of allowed types).
Type `Distance` (represented as `dict` in JSON). """
self.valueDosage = None
""" Value of Example (one of allowed types).
Type `Dosage` (represented as `dict` in JSON). """
self.valueDuration = None
""" Value of Example (one of allowed types).
Type `Duration` (represented as `dict` in JSON). """
self.valueExpression = None
""" Value of Example (one of allowed types).
Type `Expression` (represented as `dict` in JSON). """
self.valueHumanName = None
""" Value of Example (one of allowed types).
Type `HumanName` (represented as `dict` in JSON). """
self.valueId = None
""" Value of Example (one of allowed types).
Type `str`. """
self.valueIdentifier = None
""" Value of Example (one of allowed types).
Type `Identifier` (represented as `dict` in JSON). """
self.valueInstant = None
""" Value of Example (one of allowed types).
Type `FHIRDate` (represented as `str` in JSON). """
self.valueInteger = None
""" Value of Example (one of allowed types).
Type `int`. """
self.valueMarkdown = None
""" Value of Example (one of allowed types).
Type `str`. """
self.valueMoney = None
""" Value of Example (one of allowed types).
Type `Money` (represented as `dict` in JSON). """
self.valueOid = None
""" Value of Example (one of allowed types).
Type `str`. """
self.valueParameterDefinition = None
""" Value of Example (one of allowed types).
Type `ParameterDefinition` (represented as `dict` in JSON). """
self.valuePeriod = None
""" Value of Example (one of allowed types).
Type `Period` (represented as `dict` in JSON). """
self.valuePositiveInt = None
""" Value of Example (one of allowed types).
Type `int`. """
self.valueQuantity = None
""" Value of Example (one of allowed types).
Type `Quantity` (represented as `dict` in JSON). """
self.valueRange = None
""" Value of Example (one of allowed types).
Type `Range` (represented as `dict` in JSON). """
self.valueRatio = None
""" Value of Example (one of allowed types).
Type `Ratio` (represented as `dict` in JSON). """
self.valueReference = None
""" Value of Example (one of allowed types).
Type `FHIRReference` (represented as `dict` in JSON). """
self.valueRelatedArtifact = None
""" Value of Example (one of allowed types).
Type `RelatedArtifact` (represented as `dict` in JSON). """
self.valueSampledData = None
""" Value of Example (one of allowed types).
Type `SampledData` (represented as `dict` in JSON). """
self.valueSignature = None
""" Value of Example (one of allowed types).
Type `Signature` (represented as `dict` in JSON). """
self.valueString = None
""" Value of Example (one of allowed types).
Type `str`. """
self.valueTime = None
""" Value of Example (one of allowed types).
Type `FHIRDate` (represented as `str` in JSON). """
self.valueTiming = None
""" Value of Example (one of allowed types).
Type `Timing` (represented as `dict` in JSON). """
self.valueTriggerDefinition = None
""" Value of Example (one of allowed types).
Type `TriggerDefinition` (represented as `dict` in JSON). """
self.valueUnsignedInt = None
""" Value of Example (one of allowed types).
Type `int`. """
self.valueUri = None
""" Value of Example (one of allowed types).
Type `str`. """
self.valueUrl = None
""" Value of Example (one of allowed types).
Type `str`. """
self.valueUsageContext = None
""" Value of Example (one of allowed types).
Type `UsageContext` (represented as `dict` in JSON). """
self.valueUuid = None
""" Value of Example (one of allowed types).
Type `str`. """
super(ElementDefinitionExample, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ElementDefinitionExample, self).elementProperties()
js.extend([
("label", "label", str, False, None, True),
("valueAddress", "valueAddress", address.Address, False, "value", True),
("valueAge", "valueAge", age.Age, False, "value", True),
("valueAnnotation", "valueAnnotation", annotation.Annotation, False, "value", True),
("valueAttachment", "valueAttachment", attachment.Attachment, False, "value", True),
("valueBase64Binary", "valueBase64Binary", str, False, "value", True),
("valueBoolean", "valueBoolean", bool, False, "value", True),
("valueCanonical", "valueCanonical", str, False, "value", True),
("valueCode", "valueCode", str, False, "value", True),
("valueCodeableConcept", "valueCodeableConcept", codeableconcept.CodeableConcept, False, "value", True),
("valueCoding", "valueCoding", coding.Coding, False, "value", True),
("valueContactDetail", "valueContactDetail", contactdetail.ContactDetail, False, "value", True),
("valueContactPoint", "valueContactPoint", contactpoint.ContactPoint, False, "value", True),
("valueContributor", "valueContributor", contributor.Contributor, False, "value", True),
("valueCount", "valueCount", count.Count, False, "value", True),
("valueDataRequirement", "valueDataRequirement", datarequirement.DataRequirement, False, "value", True),
("valueDate", "valueDate", fhirdate.FHIRDate, False, "value", True),
("valueDateTime", "valueDateTime", fhirdate.FHIRDate, False, "value", True),
("valueDecimal", "valueDecimal", float, False, "value", True),
("valueDistance", "valueDistance", distance.Distance, False, "value", True),
("valueDosage", "valueDosage", dosage.Dosage, False, "value", True),
("valueDuration", "valueDuration", duration.Duration, False, "value", True),
("valueExpression", "valueExpression", expression.Expression, False, "value", True),
("valueHumanName", "valueHumanName", humanname.HumanName, False, "value", True),
("valueId", "valueId", str, False, "value", True),
("valueIdentifier", "valueIdentifier", identifier.Identifier, False, "value", True),
("valueInstant", "valueInstant", fhirdate.FHIRDate, False, "value", True),
("valueInteger", "valueInteger", int, False, "value", True),
("valueMarkdown", "valueMarkdown", str, False, "value", True),
("valueMoney", "valueMoney", money.Money, False, "value", True),
("valueOid", "valueOid", str, False, "value", True),
("valueParameterDefinition", "valueParameterDefinition", parameterdefinition.ParameterDefinition, False, "value", True),
("valuePeriod", "valuePeriod", period.Period, False, "value", True),
("valuePositiveInt", "valuePositiveInt", int, False, "value", True),
("valueQuantity", "valueQuantity", quantity.Quantity, False, "value", True),
("valueRange", "valueRange", range.Range, False, "value", True),
("valueRatio", "valueRatio", ratio.Ratio, False, "value", True),
("valueReference", "valueReference", fhirreference.FHIRReference, False, "value", True),
("valueRelatedArtifact", "valueRelatedArtifact", relatedartifact.RelatedArtifact, False, "value", True),
("valueSampledData", "valueSampledData", sampleddata.SampledData, False, "value", True),
("valueSignature", "valueSignature", signature.Signature, False, "value", True),
("valueString", "valueString", str, False, "value", True),
("valueTime", "valueTime", fhirdate.FHIRDate, False, "value", True),
("valueTiming", "valueTiming", timing.Timing, False, "value", True),
("valueTriggerDefinition", "valueTriggerDefinition", triggerdefinition.TriggerDefinition, False, "value", True),
("valueUnsignedInt", "valueUnsignedInt", int, False, "value", True),
("valueUri", "valueUri", str, False, "value", True),
("valueUrl", "valueUrl", str, False, "value", True),
("valueUsageContext", "valueUsageContext", usagecontext.UsageContext, False, "value", True),
("valueUuid", "valueUuid", str, False, "value", True),
])
return js
class ElementDefinitionMapping(element.Element):
""" Map element to another set of definitions.
Identifies a concept from an external specification that roughly
corresponds to this element.
"""
resource_type = "ElementDefinitionMapping"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.comment = None
""" Comments about the mapping or its use.
Type `str`. """
self.identity = None
""" Reference to mapping declaration.
Type `str`. """
self.language = None
""" Computable language of mapping.
Type `str`. """
self.map = None
""" Details of the mapping.
Type `str`. """
super(ElementDefinitionMapping, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ElementDefinitionMapping, self).elementProperties()
js.extend([
("comment", "comment", str, False, None, False),
("identity", "identity", str, False, None, True),
("language", "language", str, False, None, False),
("map", "map", str, False, None, True),
])
return js
class ElementDefinitionSlicing(element.Element):
""" This element is sliced - slices follow.
Indicates that the element is sliced into a set of alternative definitions
(i.e. in a structure definition, there are multiple different constraints
on a single element in the base resource). Slicing can be used in any
resource that has cardinality ..* on the base resource, or any resource
with a choice of types. The set of slices is any elements that come after
this in the element sequence that have the same path, until a shorter path
occurs (the shorter path terminates the set).
"""
resource_type = "ElementDefinitionSlicing"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.description = None
""" Text description of how slicing works (or not).
Type `str`. """
self.discriminator = None
""" Element values that are used to distinguish the slices.
List of `ElementDefinitionSlicingDiscriminator` items (represented as `dict` in JSON). """
self.ordered = None
""" If elements must be in same order as slices.
Type `bool`. """
self.rules = None
""" closed | open | openAtEnd.
Type `str`. """
super(ElementDefinitionSlicing, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ElementDefinitionSlicing, self).elementProperties()
js.extend([
("description", "description", str, False, None, False),
("discriminator", "discriminator", ElementDefinitionSlicingDiscriminator, True, None, False),
("ordered", "ordered", bool, False, None, False),
("rules", "rules", str, False, None, True),
])
return js
class ElementDefinitionSlicingDiscriminator(element.Element):
""" Element values that are used to distinguish the slices.
Designates which child elements are used to discriminate between the slices
when processing an instance. If one or more discriminators are provided,
the value of the child elements in the instance data SHALL completely
distinguish which slice the element in the resource matches based on the
allowed values for those elements in each of the slices.
"""
resource_type = "ElementDefinitionSlicingDiscriminator"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.path = None
""" Path to element value.
Type `str`. """
self.type = None
""" value | exists | pattern | type | profile.
Type `str`. """
super(ElementDefinitionSlicingDiscriminator, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ElementDefinitionSlicingDiscriminator, self).elementProperties()
js.extend([
("path", "path", str, False, None, True),
("type", "type", str, False, None, True),
])
return js
class ElementDefinitionType(element.Element):
""" Data type and Profile for this element.
The data type or resource that the value of this element is permitted to
be.
"""
resource_type = "ElementDefinitionType"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.aggregation = None
""" contained | referenced | bundled - how aggregated.
List of `str` items. """
self.code = None
""" Data type or Resource (reference to definition).
Type `str`. """
self.profile = None
""" Profiles (StructureDefinition or IG) - one must apply.
List of `str` items. """
self.targetProfile = None
""" Profile (StructureDefinition or IG) on the Reference/canonical
target - one must apply.
List of `str` items. """
self.versioning = None
""" either | independent | specific.
Type `str`. """
super(ElementDefinitionType, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ElementDefinitionType, self).elementProperties()
js.extend([
("aggregation", "aggregation", str, True, None, False),
("code", "code", str, False, None, True),
("profile", "profile", str, True, None, False),
("targetProfile", "targetProfile", str, True, None, False),
("versioning", "versioning", str, False, None, False),
])
return js
import sys
try:
from . import address
except ImportError:
address = sys.modules[__package__ + '.address']
try:
from . import age
except ImportError:
age = sys.modules[__package__ + '.age']
try:
from . import annotation
except ImportError:
annotation = sys.modules[__package__ + '.annotation']
try:
from . import attachment
except ImportError:
attachment = sys.modules[__package__ + '.attachment']
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import coding
except ImportError:
coding = sys.modules[__package__ + '.coding']
try:
from . import contactdetail
except ImportError:
contactdetail = sys.modules[__package__ + '.contactdetail']
try:
from . import contactpoint
except ImportError:
contactpoint = sys.modules[__package__ + '.contactpoint']
try:
from . import contributor
except ImportError:
contributor = sys.modules[__package__ + '.contributor']
try:
from . import count
except ImportError:
count = sys.modules[__package__ + '.count']
try:
from . import datarequirement
except ImportError:
datarequirement = sys.modules[__package__ + '.datarequirement']
try:
from . import distance
except ImportError:
distance = sys.modules[__package__ + '.distance']
try:
from . import dosage
except ImportError:
dosage = sys.modules[__package__ + '.dosage']
try:
from . import duration
except ImportError:
duration = sys.modules[__package__ + '.duration']
try:
from . import expression
except ImportError:
expression = sys.modules[__package__ + '.expression']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import humanname
except ImportError:
humanname = sys.modules[__package__ + '.humanname']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import money
except ImportError:
money = sys.modules[__package__ + '.money']
try:
from . import parameterdefinition
except ImportError:
parameterdefinition = sys.modules[__package__ + '.parameterdefinition']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
try:
from . import quantity
except ImportError:
quantity = sys.modules[__package__ + '.quantity']
try:
from . import range
except ImportError:
range = sys.modules[__package__ + '.range']
try:
from . import ratio
except ImportError:
ratio = sys.modules[__package__ + '.ratio']
try:
from . import relatedartifact
except ImportError:
relatedartifact = sys.modules[__package__ + '.relatedartifact']
try:
from . import sampleddata
except ImportError:
sampleddata = sys.modules[__package__ + '.sampleddata']
try:
from . import signature
except ImportError:
signature = sys.modules[__package__ + '.signature']
try:
from . import timing
except ImportError:
timing = sys.modules[__package__ + '.timing']
try:
from . import triggerdefinition
except ImportError:
triggerdefinition = sys.modules[__package__ + '.triggerdefinition']
try:
from . import usagecontext
except ImportError:
usagecontext = sys.modules[__package__ + '.usagecontext']
| 44.38237
| 155
| 0.587779
|
fc4cd29cac3df3372927af1d42b4131f56ba1d48
| 4,190
|
py
|
Python
|
app.py
|
tiagofsv95/trabalho_final_docs
|
1842f73d1a627a4c08d6b4da321e2bf381151857
|
[
"MIT"
] | null | null | null |
app.py
|
tiagofsv95/trabalho_final_docs
|
1842f73d1a627a4c08d6b4da321e2bf381151857
|
[
"MIT"
] | null | null | null |
app.py
|
tiagofsv95/trabalho_final_docs
|
1842f73d1a627a4c08d6b4da321e2bf381151857
|
[
"MIT"
] | null | null | null |
import werkzeug
from flask.scaffold import _endpoint_from_view_func
from werkzeug.utils import cached_property
import flask
flask.helpers._endpoint_from_view_func = _endpoint_from_view_func
werkzeug.cached_property = cached_property
from flask import Flask, make_response, jsonify, request
from flask_restplus import Api, Resource, fields
app = Flask(__name__)
api = Api()
api.init_app(app)
@api.route('/test')
class test_get(Resource):
def get(self):
resp = make_response(jsonify({'mensagem': 'APPLICATION UP.'}), 200)
return resp
user_fields = api.model('Usuario', {
'id': fields.String,
'nome': fields.String,
'email': fields.String,
'rua': fields.String,
'bairro': fields.String,
'cep': fields.String,
'cidadeId': fields.String,
'estadoId': fields.String,
'sexoId': fields.String,
'telefone': fields.String,
'foto': fields.String,
})
@api.route('/usuario')
@api.doc(model='Usuario')
class create_user(Resource):
def post(self):
return {}
@api.route('/usuarios')
@api.doc(model='Usuario')
class get_all_users(Resource):
def get(self):
return {}
@api.route('/usuario/<iduser>')
@api.doc(params={'iduser': 'An User ID'})
class get_user_by_id(Resource):
def get(self, iduser):
return {}
@api.route('/usuario/<iduser>')
@api.doc(params={'iduser': 'An User ID'})
class update_user(Resource):
def put(self, iduser):
return {}
@api.route('/usuario/<iduser>')
@api.doc(params={'iduser': 'An User ID'})
class delete_user(Resource):
def delete(self, iduser):
return {}
user_auth = api.model('user_auth', {
'nome': fields.String,
'senha': fields.String,
})
@api.route('/autenticarUsuario')
class auth_user(Resource):
@api.expect(user_auth)
def post(self):
return {}
@api.route('/deslogarUsuario')
class logout_user(Resource):
def post(self):
return {}
@api.route('/racas')
class get_all_breed(Resource):
def get(self):
return {}
@api.route('/raca/<idbreed>')
@api.doc(params={'idbreed': 'An Breed ID'})
class get_breed_by_id(Resource):
def get(self, idbreed):
return {}
@api.route('/racas/<sizeId>')
@api.doc(params={'sizeId': 'An size ID'})
class get_breed_by_size(Resource):
def get(self, sizeId):
return {}
@api.route('/portes')
class get_all_size(Resource):
def get(self):
return {}
@api.route('/cachorro')
class create_dog(Resource):
def post(self):
return {}
@api.route('/cachorro')
class update_dog(Resource):
def put(self):
return {}
@api.route('/cachorro/<iddog>')
@api.doc(params={'iddog': 'An dog ID'})
class delete_dog(Resource):
def delete(self, iddog):
return {}
@api.route('/cachorro/<iddog>')
@api.doc(params={'iddog': 'An dog ID'})
class get_dog_by_id(Resource):
def get(self, iddog):
return {}
@api.route('/usuarioCachorros/<iduser>')
@api.doc(params={'iduser': 'An user ID'})
class get_dogs_by_user(Resource):
def get(self, iduser):
return {}
@api.route('/cachorros')
class get_all_dogs(Resource):
def get(self):
return {}
@api.route('/informacoes')
class get_info(Resource):
def get(self):
return {}
@api.route('/fotoPerfil')
class upload_profile_photo(Resource):
def post(self):
return {}
@api.route('/fotoPerfil/<iduser>')
@api.doc(params={'iduser': 'An user ID'})
class get_profile_photo(Resource):
def get(self, iduser):
return {}
@api.route('/fotoPerfil/<iduser>')
@api.doc(params={'iduser': 'An user ID'})
class delete_profile_photo(Resource):
def delete(self, iduser):
return {}
@api.route('/fotoCachorro')
class upload_dog_photo(Resource):
def post(self):
return {}
@api.route('/fotoCachorro/<iddog>')
@api.doc(params={'iddog': 'An dog ID'})
class get_dog_photo(Resource):
def get(self, iddog):
return {}
@api.route('/fotoCachorro/<iddog>')
@api.doc(params={'iddog': 'An dog ID'})
class delete_dog_photo(Resource):
def delete(self, iddog):
return {}
#######################################################
# Execucao da Aplicacao
if __name__ == '__main__':
app.run()
| 22.771739
| 75
| 0.642959
|
d25b81625e025da44045907c78d48e62d294e11f
| 2,294
|
py
|
Python
|
exact_solvers/acoustics.py
|
haraldschilly/riemann_book
|
46698d695c43da1ad51cd10249240b2531ee578e
|
[
"BSD-3-Clause"
] | null | null | null |
exact_solvers/acoustics.py
|
haraldschilly/riemann_book
|
46698d695c43da1ad51cd10249240b2531ee578e
|
[
"BSD-3-Clause"
] | null | null | null |
exact_solvers/acoustics.py
|
haraldschilly/riemann_book
|
46698d695c43da1ad51cd10249240b2531ee578e
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Exact Riemann solver for the acoustics wave equation in 1D,
including some plotting functionality.
"""
import sys, os
import numpy as np
from utils import riemann_tools
def lambda1(q, xi, aux):
"""Characteristic speed for 1-waves."""
rho, bulk = aux
return -np.sqrt(bulk/rho)
def lambda2(q, xi, aux):
"""Characteristic speed for 2-waves."""
rho, bulk = aux
return np.sqrt(bulk/rho)
def exact_riemann_solution(ql, qr, aux):
""" Exact solution of Riemann problem for acoustics equations."""
# Define delta q, speeds and impedance
rho, bulk = aux
dq = qr - ql
c = np.sqrt(bulk/rho)
Z = rho*c
# Define the 2 eigenvectors
r1 = np.array([-Z, 1])
r2 = np.array([Z, 1])
alpha1 = (-dq[0] + dq[1]*Z)/(2*Z)
alpha2 = (dq[0] + dq[1]*Z)/(2*Z)
# Compute middle state qm
qm = ql + alpha1*r1
# It is equivalent to
#qm = qr - alpha2*r2
# Compute wave speeds
speeds = np.zeros(2)
speeds[0] = -c
speeds[1] = c
# Concatenate states for plotting
states = np.column_stack([ql,qm,qr])
# Calculate reval function (used for plotting the solution)
def reval(xi):
r"""Returns the Riemann solution for any value of xi = x/t.
"""
p_out = (xi<=speeds[0] )*ql[0] \
+ (xi>speeds[0])*(xi<=speeds[1])*qm[0] \
+ (xi>speeds[1] )*qr[0]
u_out = (xi<=speeds[0] )*ql[1] \
+ (xi>speeds[0])*(xi<=speeds[1])*qm[1] \
+ (xi>speeds[1] )*qr[1]
return p_out, u_out
return states, speeds, reval
def riemann_plot_func(q_l, q_r, aux):
"""Return Riemann plot function for (only) time-dependent interacts. """
ex_states, ex_speeds, reval = exact_riemann_solution(q_l ,q_r, aux)
plot_function = riemann_tools.make_plot_function(ex_states, ex_speeds, reval, layout='vertical',
variable_names=['pressure', 'velocity'],
aux=(np.array(aux),np.array(aux)),
plot_chars=[lambda1, lambda2])
return plot_function
| 29.792208
| 100
| 0.531386
|
e410705ec73b295c0eb2a30f81beaece34f5684e
| 20,274
|
py
|
Python
|
pyro/infer/mcmc/nuts.py
|
jrmcornish/pyro
|
38914d5eb596dc140e226031534ff4ea7903dc35
|
[
"MIT"
] | null | null | null |
pyro/infer/mcmc/nuts.py
|
jrmcornish/pyro
|
38914d5eb596dc140e226031534ff4ea7903dc35
|
[
"MIT"
] | null | null | null |
pyro/infer/mcmc/nuts.py
|
jrmcornish/pyro
|
38914d5eb596dc140e226031534ff4ea7903dc35
|
[
"MIT"
] | null | null | null |
from collections import namedtuple
import torch
import pyro
import pyro.distributions as dist
from pyro.distributions.util import scalar_like
from pyro.infer.mcmc.hmc import HMC
from pyro.ops.integrator import velocity_verlet
from pyro.util import optional, torch_isnan
def _logaddexp(x, y):
minval, maxval = (x, y) if x < y else (y, x)
return (minval - maxval).exp().log1p() + maxval
# sum_accept_probs and num_proposals are used to calculate
# the statistic accept_prob for Dual Averaging scheme;
# z_left_grads and z_right_grads are kept to avoid recalculating
# grads at left and right leaves;
# r_sum is used to check turning condition;
# z_proposal_pe and z_proposal_grads are used to cache the
# potential energy and potential energy gradient values for
# the proposal trace.
# weight is the number of valid points in case we use slice sampling
# and is the log sum of (unnormalized) probabilites of valid points
# when we use multinomial sampling
_TreeInfo = namedtuple("TreeInfo", ["z_left", "r_left", "z_left_grads",
"z_right", "r_right", "z_right_grads",
"z_proposal", "z_proposal_pe", "z_proposal_grads",
"r_sum", "weight", "turning", "diverging",
"sum_accept_probs", "num_proposals"])
class NUTS(HMC):
"""
No-U-Turn Sampler kernel, which provides an efficient and convenient way
to run Hamiltonian Monte Carlo. The number of steps taken by the
integrator is dynamically adjusted on each call to ``sample`` to ensure
an optimal length for the Hamiltonian trajectory [1]. As such, the samples
generated will typically have lower autocorrelation than those generated
by the :class:`~pyro.infer.mcmc.HMC` kernel. Optionally, the NUTS kernel
also provides the ability to adapt step size during the warmup phase.
Refer to the `baseball example <https://github.com/uber/pyro/blob/dev/examples/baseball.py>`_
to see how to do Bayesian inference in Pyro using NUTS.
**References**
[1] `The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo`,
Matthew D. Hoffman, and Andrew Gelman.
[2] `A Conceptual Introduction to Hamiltonian Monte Carlo`,
Michael Betancourt
[3] `Slice Sampling`,
Radford M. Neal
:param model: Python callable containing Pyro primitives.
:param potential_fn: Python callable calculating potential energy with input
is a dict of real support parameters.
:param float step_size: Determines the size of a single step taken by the
verlet integrator while computing the trajectory using Hamiltonian
dynamics. If not specified, it will be set to 1.
:param bool adapt_step_size: A flag to decide if we want to adapt step_size
during warm-up phase using Dual Averaging scheme.
:param bool adapt_mass_matrix: A flag to decide if we want to adapt mass
matrix during warm-up phase using Welford scheme.
:param bool full_mass: A flag to decide if mass matrix is dense or diagonal.
:param bool use_multinomial_sampling: A flag to decide if we want to sample
candidates along its trajectory using "multinomial sampling" or using
"slice sampling". Slice sampling is used in the original NUTS paper [1],
while multinomial sampling is suggested in [2]. By default, this flag is
set to True. If it is set to `False`, NUTS uses slice sampling.
:param dict transforms: Optional dictionary that specifies a transform
for a sample site with constrained support to unconstrained space. The
transform should be invertible, and implement `log_abs_det_jacobian`.
If not specified and the model has sites with constrained support,
automatic transformations will be applied, as specified in
:mod:`torch.distributions.constraint_registry`.
:param int max_plate_nesting: Optional bound on max number of nested
:func:`pyro.plate` contexts. This is required if model contains
discrete sample sites that can be enumerated over in parallel.
:param bool jit_compile: Optional parameter denoting whether to use
the PyTorch JIT to trace the log density computation, and use this
optimized executable trace in the integrator.
:param dict jit_options: A dictionary contains optional arguments for
:func:`torch.jit.trace` function.
:param bool ignore_jit_warnings: Flag to ignore warnings from the JIT
tracer when ``jit_compile=True``. Default is False.
:param float target_accept_prob: Target acceptance probability of step size
adaptation scheme. Increasing this value will lead to a smaller step size,
so the sampling will be slower but more robust. Default to 0.8.
:param int max_tree_depth: Max depth of the binary tree created during the doubling
scheme of NUTS sampler. Default to 10.
Example:
>>> true_coefs = torch.tensor([1., 2., 3.])
>>> data = torch.randn(2000, 3)
>>> dim = 3
>>> labels = dist.Bernoulli(logits=(true_coefs * data).sum(-1)).sample()
>>>
>>> def model(data):
... coefs_mean = torch.zeros(dim)
... coefs = pyro.sample('beta', dist.Normal(coefs_mean, torch.ones(3)))
... y = pyro.sample('y', dist.Bernoulli(logits=(coefs * data).sum(-1)), obs=labels)
... return y
>>>
>>> nuts_kernel = NUTS(model, adapt_step_size=True)
>>> mcmc = MCMC(nuts_kernel, num_samples=500, warmup_steps=300)
>>> mcmc.run(data)
>>> mcmc.get_samples()['beta'].mean(0) # doctest: +SKIP
tensor([ 0.9221, 1.9464, 2.9228])
"""
def __init__(self,
model=None,
potential_fn=None,
step_size=1,
adapt_step_size=True,
adapt_mass_matrix=True,
full_mass=False,
use_multinomial_sampling=True,
transforms=None,
max_plate_nesting=None,
jit_compile=False,
jit_options=None,
ignore_jit_warnings=False,
target_accept_prob=0.8,
max_tree_depth=10):
super(NUTS, self).__init__(model,
potential_fn,
step_size,
adapt_step_size=adapt_step_size,
adapt_mass_matrix=adapt_mass_matrix,
full_mass=full_mass,
transforms=transforms,
max_plate_nesting=max_plate_nesting,
jit_compile=jit_compile,
jit_options=jit_options,
ignore_jit_warnings=ignore_jit_warnings,
target_accept_prob=target_accept_prob)
self.use_multinomial_sampling = use_multinomial_sampling
self._max_tree_depth = max_tree_depth
# There are three conditions to stop doubling process:
# + Tree is becoming too big.
# + The trajectory is making a U-turn.
# + The probability of the states becoming negligible: p(z, r) << u,
# here u is the "slice" variable introduced at the `self.sample(...)` method.
# Denote E_p = -log p(z, r), E_u = -log u, the third condition is equivalent to
# sliced_energy := E_p - E_u > some constant =: max_sliced_energy.
# This also suggests the notion "diverging" in the implemenation:
# when the energy E_p diverges from E_u too much, we stop doubling.
# Here, as suggested in [1], we set dE_max = 1000.
self._max_sliced_energy = 1000
def _is_turning(self, r_left, r_right, r_sum):
# We follow the strategy in Section A.4.2 of [2] for this implementation.
r_left_flat = torch.cat([r_left[site_name].reshape(-1) for site_name in sorted(r_left)])
r_right_flat = torch.cat([r_right[site_name].reshape(-1) for site_name in sorted(r_right)])
r_sum = r_sum - (r_left_flat + r_right_flat) / 2
if self.inverse_mass_matrix.dim() == 2:
if (self.inverse_mass_matrix.matmul(r_left_flat).dot(r_sum) > 0 and
self.inverse_mass_matrix.matmul(r_right_flat).dot(r_sum) > 0):
return False
else:
if (self.inverse_mass_matrix.mul(r_left_flat).dot(r_sum) > 0 and
self.inverse_mass_matrix.mul(r_right_flat).dot(r_sum) > 0):
return False
return True
def _build_basetree(self, z, r, z_grads, log_slice, direction, energy_current):
step_size = self.step_size if direction == 1 else -self.step_size
z_new, r_new, z_grads, potential_energy = velocity_verlet(
z, r, self.potential_fn, self.inverse_mass_matrix, step_size, z_grads=z_grads)
r_new_flat = torch.cat([r_new[site_name].reshape(-1) for site_name in sorted(r_new)])
energy_new = potential_energy + self._kinetic_energy(r_new)
# handle the NaN case
energy_new = scalar_like(energy_new, float("inf")) if torch_isnan(energy_new) else energy_new
sliced_energy = energy_new + log_slice
diverging = (sliced_energy > self._max_sliced_energy)
delta_energy = energy_new - energy_current
accept_prob = (-delta_energy).exp().clamp(max=1.0)
if self.use_multinomial_sampling:
tree_weight = -sliced_energy
else:
# As a part of the slice sampling process (see below), along the trajectory
# we eliminate states which p(z, r) < u, or dE > 0.
# Due to this elimination (and stop doubling conditions),
# the weight of binary tree might not equal to 2^tree_depth.
tree_weight = scalar_like(sliced_energy, 1. if sliced_energy <= 0 else 0.)
return _TreeInfo(z_new, r_new, z_grads, z_new, r_new, z_grads, z_new, potential_energy,
z_grads, r_new_flat, tree_weight, False, diverging, accept_prob, 1)
def _build_tree(self, z, r, z_grads, log_slice, direction, tree_depth, energy_current):
if tree_depth == 0:
return self._build_basetree(z, r, z_grads, log_slice, direction, energy_current)
# build the first half of tree
half_tree = self._build_tree(z, r, z_grads, log_slice,
direction, tree_depth-1, energy_current)
z_proposal = half_tree.z_proposal
z_proposal_pe = half_tree.z_proposal_pe
z_proposal_grads = half_tree.z_proposal_grads
# Check conditions to stop doubling. If we meet that condition,
# there is no need to build the other tree.
if half_tree.turning or half_tree.diverging:
return half_tree
# Else, build remaining half of tree.
# If we are going to the right, start from the right leaf of the first half.
if direction == 1:
z = half_tree.z_right
r = half_tree.r_right
z_grads = half_tree.z_right_grads
else: # otherwise, start from the left leaf of the first half
z = half_tree.z_left
r = half_tree.r_left
z_grads = half_tree.z_left_grads
other_half_tree = self._build_tree(z, r, z_grads, log_slice,
direction, tree_depth-1, energy_current)
if self.use_multinomial_sampling:
tree_weight = _logaddexp(half_tree.weight, other_half_tree.weight)
else:
tree_weight = half_tree.weight + other_half_tree.weight
sum_accept_probs = half_tree.sum_accept_probs + other_half_tree.sum_accept_probs
num_proposals = half_tree.num_proposals + other_half_tree.num_proposals
r_sum = half_tree.r_sum + other_half_tree.r_sum
# The probability of that proposal belongs to which half of tree
# is computed based on the weights of each half.
if self.use_multinomial_sampling:
other_half_tree_prob = (other_half_tree.weight - tree_weight).exp()
else:
# For the special case that the weights of each half are both 0,
# we choose the proposal from the first half
# (any is fine, because the probability of picking it at the end is 0!).
other_half_tree_prob = (other_half_tree.weight / tree_weight if tree_weight > 0
else scalar_like(tree_weight, 0.))
is_other_half_tree = pyro.sample("is_other_half_tree",
dist.Bernoulli(probs=other_half_tree_prob))
if is_other_half_tree == 1:
z_proposal = other_half_tree.z_proposal
z_proposal_pe = other_half_tree.z_proposal_pe
z_proposal_grads = other_half_tree.z_proposal_grads
# leaves of the full tree are determined by the direction
if direction == 1:
z_left = half_tree.z_left
r_left = half_tree.r_left
z_left_grads = half_tree.z_left_grads
z_right = other_half_tree.z_right
r_right = other_half_tree.r_right
z_right_grads = other_half_tree.z_right_grads
else:
z_left = other_half_tree.z_left
r_left = other_half_tree.r_left
z_left_grads = other_half_tree.z_left_grads
z_right = half_tree.z_right
r_right = half_tree.r_right
z_right_grads = half_tree.z_right_grads
# We already check if first half tree is turning. Now, we check
# if the other half tree or full tree are turning.
turning = other_half_tree.turning or self._is_turning(r_left, r_right, r_sum)
# The divergence is checked by the second half tree (the first half is already checked).
diverging = other_half_tree.diverging
return _TreeInfo(z_left, r_left, z_left_grads, z_right, r_right, z_right_grads, z_proposal,
z_proposal_pe, z_proposal_grads, r_sum, tree_weight, turning, diverging,
sum_accept_probs, num_proposals)
def sample(self, params):
z, potential_energy, z_grads = self._fetch_from_cache()
# recompute PE when cache is cleared
if z is None:
z = params
potential_energy = self.potential_fn(z)
self._cache(z, potential_energy)
# return early if no sample sites
elif len(z) == 0:
self._t += 1
self._mean_accept_prob = 1.
if self._t > self._warmup_steps:
self._accept_cnt += 1
return z
r, r_flat = self._sample_r(name="r_t={}".format(self._t))
energy_current = self._kinetic_energy(r) + potential_energy
# Ideally, following a symplectic integrator trajectory, the energy is constant.
# In that case, we can sample the proposal uniformly, and there is no need to use "slice".
# However, it is not the case for real situation: there are errors during the computation.
# To deal with that problem, as in [1], we introduce an auxiliary "slice" variable (denoted
# by u).
# The sampling process goes as follows:
# first sampling u from initial state (z_0, r_0) according to
# u ~ Uniform(0, p(z_0, r_0)),
# then sampling state (z, r) from the integrator trajectory according to
# (z, r) ~ Uniform({(z', r') in trajectory | p(z', r') >= u}).
#
# For more information about slice sampling method, see [3].
# For another version of NUTS which uses multinomial sampling instead of slice sampling,
# see [2].
if self.use_multinomial_sampling:
log_slice = -energy_current
else:
# Rather than sampling the slice variable from `Uniform(0, exp(-energy))`, we can
# sample log_slice directly using `energy`, so as to avoid potential underflow or
# overflow issues ([2]).
slice_exp_term = pyro.sample("slicevar_exp_t={}".format(self._t),
dist.Exponential(scalar_like(energy_current, 1.)))
log_slice = -energy_current - slice_exp_term
z_left = z_right = z
r_left = r_right = r
z_left_grads = z_right_grads = z_grads
accepted = False
r_sum = r_flat
sum_accept_probs = 0.
num_proposals = 0
tree_weight = scalar_like(energy_current, 0. if self.use_multinomial_sampling else 1.)
# Temporarily disable distributions args checking as
# NaNs are expected during step size adaptation.
with optional(pyro.validation_enabled(False), self._t < self._warmup_steps):
# doubling process, stop when turning or diverging
tree_depth = 0
while tree_depth < self._max_tree_depth:
direction = pyro.sample("direction_t={}_treedepth={}".format(self._t, tree_depth),
dist.Bernoulli(probs=scalar_like(tree_weight, 0.5)))
direction = int(direction.item())
if direction == 1: # go to the right, start from the right leaf of current tree
new_tree = self._build_tree(z_right, r_right, z_right_grads, log_slice,
direction, tree_depth, energy_current)
# update leaf for the next doubling process
z_right = new_tree.z_right
r_right = new_tree.r_right
z_right_grads = new_tree.z_right_grads
else: # go the the left, start from the left leaf of current tree
new_tree = self._build_tree(z_left, r_left, z_left_grads, log_slice,
direction, tree_depth, energy_current)
z_left = new_tree.z_left
r_left = new_tree.r_left
z_left_grads = new_tree.z_left_grads
sum_accept_probs = sum_accept_probs + new_tree.sum_accept_probs
num_proposals = num_proposals + new_tree.num_proposals
# stop doubling
if new_tree.diverging:
if self._t >= self._warmup_steps:
self._divergences.append(self._t - self._warmup_steps)
break
if new_tree.turning:
break
tree_depth += 1
if self.use_multinomial_sampling:
new_tree_prob = (new_tree.weight - tree_weight).exp()
else:
new_tree_prob = new_tree.weight / tree_weight
rand = pyro.sample("rand_t={}_treedepth={}".format(self._t, tree_depth),
dist.Uniform(scalar_like(new_tree_prob, 0.),
scalar_like(new_tree_prob, 1.)))
if rand < new_tree_prob:
accepted = True
z = new_tree.z_proposal
self._cache(z, new_tree.z_proposal_pe, new_tree.z_proposal_grads)
r_sum = r_sum + new_tree.r_sum
if self._is_turning(r_left, r_right, r_sum): # stop doubling
break
else: # update tree_weight
if self.use_multinomial_sampling:
tree_weight = _logaddexp(tree_weight, new_tree.weight)
else:
tree_weight = tree_weight + new_tree.weight
accept_prob = sum_accept_probs / num_proposals
self._t += 1
if self._t > self._warmup_steps:
n = self._t - self._warmup_steps
if accepted:
self._accept_cnt += 1
else:
n = self._t
self._adapter.step(self._t, z, accept_prob)
self._mean_accept_prob += (accept_prob.item() - self._mean_accept_prob) / n
return z.copy()
| 50.558603
| 101
| 0.61897
|
ccc1ba80585701e9f57e96a909b20e20a9d7bc2c
| 3,231
|
py
|
Python
|
carla/recourse_methods/catalog/clue/library/clue_ml/AE_models/AE/models.py
|
jayanthyetukuri/CARLA
|
c3f3aaf11a5a8499c4bec5065e0c17ec8e6f5950
|
[
"MIT"
] | 140
|
2021-08-03T21:53:32.000Z
|
2022-03-20T08:52:02.000Z
|
carla/recourse_methods/catalog/clue/library/clue_ml/AE_models/AE/models.py
|
jayanthyetukuri/CARLA
|
c3f3aaf11a5a8499c4bec5065e0c17ec8e6f5950
|
[
"MIT"
] | 54
|
2021-03-07T18:22:16.000Z
|
2021-08-03T12:06:31.000Z
|
carla/recourse_methods/catalog/clue/library/clue_ml/AE_models/AE/models.py
|
jayanthyetukuri/CARLA
|
c3f3aaf11a5a8499c4bec5065e0c17ec8e6f5950
|
[
"MIT"
] | 16
|
2021-08-23T12:14:58.000Z
|
2022-03-01T00:52:58.000Z
|
from __future__ import division
import torch.nn as nn
from carla.recourse_methods.catalog.clue.library.clue_ml.src.layers import (
MLPBlock,
ResBlock,
SkipConnection,
leaky_MLPBlock,
preact_leaky_MLPBlock,
)
# MLP based model
class MLP_recognition_net(nn.Module):
def __init__(self, input_dim, width, depth, latent_dim):
super(MLP_recognition_net, self).__init__()
# input layer
proposal_layers = [
nn.Linear(input_dim, width),
nn.ReLU(),
nn.BatchNorm1d(num_features=width),
]
# body
for i in range(depth - 1):
proposal_layers.append(MLPBlock(width))
# output layer
proposal_layers.append(nn.Linear(width, latent_dim * 2))
self.block = nn.Sequential(*proposal_layers)
def forward(self, x):
return self.block(x)
class MLP_generator_net(nn.Module):
def __init__(self, input_dim, width, depth, latent_dim):
super(MLP_generator_net, self).__init__()
# input layer
generative_layers = [
nn.Linear(latent_dim, width),
nn.LeakyReLU(),
nn.BatchNorm1d(num_features=width),
]
# body
for i in range(depth - 1):
generative_layers.append(
# skip-connection from prior network to generative network
leaky_MLPBlock(width)
)
# output layer
generative_layers.extend(
[
nn.Linear(width, input_dim),
]
)
self.block = nn.Sequential(*generative_layers)
def forward(self, x):
return self.block(x)
# MLP fully linear residual path preact models
class MLP_preact_recognition_net(nn.Module):
def __init__(self, input_dim, width, depth, latent_dim):
super(MLP_preact_recognition_net, self).__init__()
# input layer
proposal_layers = [nn.Linear(input_dim, width)]
# body
for i in range(depth - 1):
proposal_layers.append(preact_leaky_MLPBlock(width))
# output layer
proposal_layers.extend(
[
nn.LeakyReLU(),
nn.BatchNorm1d(num_features=width),
nn.Linear(width, latent_dim * 2),
]
)
self.block = nn.Sequential(*proposal_layers)
def forward(self, x):
return self.block(x)
class MLP_preact_generator_net(nn.Module):
def __init__(self, input_dim, width, depth, latent_dim):
super(MLP_preact_generator_net, self).__init__()
# input layer
generative_layers = [
nn.Linear(latent_dim, width),
nn.LeakyReLU(),
nn.BatchNorm1d(num_features=width),
]
# body
for i in range(depth - 1):
generative_layers.append(
# skip-connection from prior network to generative network
preact_leaky_MLPBlock(width)
)
# output layer
generative_layers.extend(
[
nn.Linear(width, input_dim),
]
)
self.block = nn.Sequential(*generative_layers)
def forward(self, x):
return self.block(x)
| 28.095652
| 76
| 0.587125
|
2248a48dafeceacb317fb6693dc480e3617abd9d
| 1,828
|
py
|
Python
|
superset/data/sf_population_polygons.py
|
franksam007/incubator-superset
|
a0f572eb3ea4b89cb435a8af20436f8e1d34814e
|
[
"Apache-2.0"
] | 108
|
2018-01-22T11:09:59.000Z
|
2021-01-15T10:53:04.000Z
|
superset/data/sf_population_polygons.py
|
franksam007/incubator-superset
|
a0f572eb3ea4b89cb435a8af20436f8e1d34814e
|
[
"Apache-2.0"
] | 112
|
2018-01-25T22:57:21.000Z
|
2019-08-22T20:08:48.000Z
|
superset/data/sf_population_polygons.py
|
franksam007/incubator-superset
|
a0f572eb3ea4b89cb435a8af20436f8e1d34814e
|
[
"Apache-2.0"
] | 24
|
2018-01-19T22:54:39.000Z
|
2020-11-12T13:04:25.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import pandas as pd
from sqlalchemy import BigInteger, Text
from superset import db
from superset.utils import core as utils
from .helpers import TBL, get_example_data
def load_sf_population_polygons():
tbl_name = 'sf_population_polygons'
data = get_example_data('sf_population.json.gz')
df = pd.read_json(data)
df['contour'] = df.contour.map(json.dumps)
df.to_sql(
tbl_name,
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'zipcode': BigInteger,
'population': BigInteger,
'contour': Text,
'area': BigInteger,
},
index=False)
print('Creating table {} reference'.format(tbl_name))
tbl = db.session.query(TBL).filter_by(table_name=tbl_name).first()
if not tbl:
tbl = TBL(table_name=tbl_name)
tbl.description = 'Population density of San Francisco'
tbl.database = utils.get_or_create_main_db()
db.session.merge(tbl)
db.session.commit()
tbl.fetch_metadata()
| 33.236364
| 70
| 0.704595
|
fcae04122a8fae2f785e7a2832115b71c9351cc2
| 17,918
|
py
|
Python
|
leo/plugins/python_terminal.py
|
thomasbuttler/leo-editor
|
c1bddc31313b7788f0d6583dcb4ab75db73e9a09
|
[
"MIT"
] | 1,550
|
2015-01-14T16:30:37.000Z
|
2022-03-31T08:55:58.000Z
|
leo/plugins/python_terminal.py
|
thomasbuttler/leo-editor
|
c1bddc31313b7788f0d6583dcb4ab75db73e9a09
|
[
"MIT"
] | 2,009
|
2015-01-13T16:28:52.000Z
|
2022-03-31T18:21:48.000Z
|
leo/plugins/python_terminal.py
|
thomasbuttler/leo-editor
|
c1bddc31313b7788f0d6583dcb4ab75db73e9a09
|
[
"MIT"
] | 200
|
2015-01-05T15:07:41.000Z
|
2022-03-07T17:05:01.000Z
|
#@+leo-ver=5-thin
#@+node:peckj.20150428142633.1: * @file ../plugins/python_terminal.py
#@@language python
#@@tabwidth -4
# **Warning**: Use at your own risk.
# Numerous significant problems have been reported, including segfaults.
# This code from http://stackoverflow.com/questions/12431555
# with modifications from Jake Peck and EKR.
#@+<< docstring >>
#@+node:peckj.20150428142633.2: ** << docstring >>
"""Provides an interactive python terminal in the log pane.
**Warning**: Use at your own risk.
Numerous significant problems have been reported, including segfaults.
By Jacob M. Peck
Usage
=====
Enabling this plugin will add a new tab to the Log pane, labeled "Python Console". This is a fully interactive
python command shell, with access to `g`, `c`, and `p` included!
Features:
- Includes support for g, c, and p
- Each outline tab has a separate python console, with a separate namespace (aside from g, of course)
- Extremely primitive tab-completion
- Command history (use !hist to list, and !hist(n) to recall the n'th entry)
- !clear to clear the console
Caveats:
Stdout and stderr are proprely redirected to the interactive console pane while
it has focus, but proprely reset to their previous values when focus is lost. If
code executed inside the interactive console pane needs to output to the
command-line stdout/stderr, please use sys.__stdout__ and sys.__stderr__. - Just
as with scripts, if you do something dangerous, you're on your own
This code is largely lifted from
http://stackoverflow.com/questions/12431555/
enabling-code-completion-in-an-embedded-python-interpreter,
with some modifications made for Leo embedding.
"""
#@-<< docstring >>
#@+<< imports >>
#@+node:peckj.20150428142729.2: ** << imports >>
import re
import sys
import code
from leo.core import leoGlobals as g
from leo.core.leoQt import QtWidgets
from leo.core.leoQt import Key
use_rlcompleter = False
# A workaround for #1212: segfaults at startup when importing this file.
# True: enable tab completion, at the risk of segfaults.
# Third-party imports.
if use_rlcompleter:
from rlcompleter import Completer
else:
Completer = None
#
# Fail fast, right after all imports.
g.assertUi('qt') # May raise g.UiTypeException, caught by the plugins manager.
#@-<< imports >>
#@+others
#@+node:peckj.20150428142729.3: ** class MyInterpreter
if QtWidgets:
class MyInterpreter(QtWidgets.QWidget):
def __init__(self, parent, c):
super().__init__(parent)
hBox = QtWidgets.QHBoxLayout()
self.setLayout(hBox)
self.textEdit = PyInterp(self, c)
# this is how you pass in locals to the interpreter
self.textEdit.initInterpreter(locals())
hBox.addWidget(self.textEdit)
hBox.setContentsMargins(0, 0, 0, 0)
hBox.setSpacing(0)
#@+node:peckj.20150428142729.6: ** class InteractiveInterpreter (code.InteractiveInterpreter)
class InteractiveInterpreter(code.InteractiveInterpreter):
#@+others
#@+node:peckj.20150428142729.7: *3* InteractiveInterpreter.__init__
def __init__(self, locals, c):
"""Ctor for InteractiveInterpreter class."""
self.c = c
# inject g, c, p
loc = locals
loc['c'] = self.c
loc['g'] = g
loc['p'] = self.c.p
super().__init__(loc)
#@+node:peckj.20150428142729.8: *3* InteractiveInterpreter.runIt
def runIt(self, command):
code.InteractiveInterpreter.runsource(self, command)
#@-others
#@+node:peckj.20150428142729.5: ** class PyInterp (QTextEdit)
if QtWidgets:
class PyInterp(QtWidgets.QTextEdit):
#@+others
#@+node:peckj.20150428142729.9: *3* PyInterp.__init__
def __init__(self, parent, c):
super().__init__(parent)
#
# this widget swallows stdout + stderr while focused,
# but resets them upon losing focus
if not g.user_dict.get('old_stdout', None):
g.user_dict['old_stdout'] = sys.stdout
if not g.user_dict.get('old_stderr', None):
g.user_dict['old_stderr'] = sys.stderr
#
# init ivars.
self.indent = 0
self.refreshMarker = False # to change back to >>> from ...
# self.multiLine = False # code spans more than one line
# self.command = '' # command to be ran
self.printBanner() # print sys info
self.insert_marker() # make the >>> or ... marker
self.history = [] # list of commands entered
self.historyIndex = -1
self.interpreterLocals = {}
self.c = c
#
# initilize interpreter with self locals
self.initInterpreter(locals())
#
# update p when new node selected
g.registerHandler('select2', self.select2_hook)
#@+node:peckj.20150428142729.10: *3* PyInterp.select2_hook
def select2_hook(self, tag, keywords):
self.interpreter.runIt('p = c.p')
#@+node:peckj.20150428142729.11: *3* PyInterp.printBanner
def printBanner(self):
banner = [
'Type !hist for a history view and !hist(n) history index recall\n',
'Type !clear to clear this pane\n'
]
for msg in banner:
self.write(msg)
#@+node:peckj.20150428142729.12: *3* PyInterp.insert_marker
def insert_marker(self):
# line = '... ' if self.multiLine else '>>> '
line = '... ' if self.indent > 0 else '>>> '
self.insertPlainText(line + ' ' * self.indent)
#@+node:peckj.20150428142729.13: *3* PyInterp.initInterpreter
def initInterpreter(self, interpreterLocals=None):
if interpreterLocals:
# when we pass in locals, we don't want it to be named "self"
# so we rename it with the name of the class that did the passing
# and reinsert the locals back into the interpreter dictionary
selfName = interpreterLocals['self'].__class__.__name__
interpreterLocalVars = interpreterLocals.pop('self')
self.interpreterLocals[selfName] = interpreterLocalVars
else:
self.interpreterLocals = interpreterLocals
self.interpreter = InteractiveInterpreter(self.interpreterLocals, self.c)
#@+node:peckj.20150428142729.14: *3* PyInterp.updateInterpreterLocals
def updateInterpreterLocals(self, newLocals):
className = newLocals.__class__.__name__
self.interpreterLocals[className] = newLocals
#@+node:peckj.20150428142729.15: *3* PyInterp.write
def write(self, line):
self.insertPlainText(line)
self.ensureCursorVisible()
#@+node:peckj.20150428142729.16: *3* PyInterp.clearCurrentBlock
def clearCurrentBlock(self):
# block being current row
length = len(self.document().lastBlock().text()[4:])
if length == 0:
return None
#
# should have a better way of doing this but I can't find it.
# [self.textCursor().deletePreviousChar() for x in xrange(length)]
for x in range(length):
self.textCursor().deletePreviousChar()
return True
#@+node:peckj.20150428142729.17: *3* PyInterp.recallHistory
def recallHistory(self):
# used when using the arrow keys to scroll through history
self.clearCurrentBlock()
if self.historyIndex != -1:
self.insertPlainText(self.history[self.historyIndex])
return True
#@+node:peckj.20150428142729.18: *3* PyInterp.customCommands
def customCommands(self, command):
if command == '!hist': # display history
self.append('') # move down one line
# vars that are in the command are prefixed with ____CC and deleted
# once the command is done so they don't show up in dir()
backup = self.interpreterLocals.copy()
history = self.history[:]
history.reverse()
for i, x in enumerate(history):
iSize = len(str(i))
delta = len(str(len(history))) - iSize
line = line = ' ' * delta + '%i: %s' % (i, x) + '\n'
self.write(line)
self.updateInterpreterLocals(backup)
self.insert_marker()
return True
if re.match(r'!hist\(\d+\)', command): # recall command from history
backup = self.interpreterLocals.copy()
history = self.history[:]
history.reverse()
index = int(command[6:-1])
self.clearCurrentBlock()
command = history[index]
if command[-1] == ':':
# self.multiLine = True
self.indent += 4
self.write(command)
self.updateInterpreterLocals(backup)
return True
if re.match(r'(quit|exit)\(\)', command): # prevent quitting!
self.append('')
self.write('Cannot quit() from an embedded console.\n')
self.insert_marker()
return True
if re.match(r'!clear', command): # clear the screen
self.clear()
self.insert_marker()
return True
return False
#@+node:peckj.20150428142729.19: *3* PyInterp.keyPressEvent & helper
def keyPressEvent(self, event):
try:
# #1212: Disable this by default.
if use_rlcompleter and event.key() == Key.Key_Tab:
line = str(self.document().lastBlock().text())[4:]
completer = Completer(self.interpreter.locals)
suggestion = completer.complete(line, 0)
if suggestion is not None:
self.insertPlainText(suggestion[len(line) :])
return
if event.key() == Key.Key_Down:
if self.historyIndex == len(self.history):
self.historyIndex -= 1
try:
if self.historyIndex > -1:
self.historyIndex -= 1
self.recallHistory()
else:
self.clearCurrentBlock()
except Exception:
pass
return
if event.key() == Key.Key_Up:
try:
if len(self.history) - 1 > self.historyIndex:
self.historyIndex += 1
self.recallHistory()
else:
self.historyIndex = len(self.history)
except Exception:
pass
return
if event.key() == Key.Key_Home:
# set cursor to position 4 in current block. 4 because that's where
# the marker stops
blockLength = len(self.document().lastBlock().text()[4:])
lineLength = len(self.document().toPlainText())
position = lineLength - blockLength
textCursor = self.textCursor()
textCursor.setPosition(position)
self.setTextCursor(textCursor)
return
if event.key() in [Key.Key_Left, Key.Key_Backspace]:
# don't allow deletion of marker
if self.textCursor().positionInBlock() == 4:
return
if event.key() in [Key.Key_Return, Key.Key_Enter]:
self.doEnter(event)
return
# allow all other key events
super().keyPressEvent(event)
except Exception:
g.es_exception()
#@+node:ekr.20180307132016.1: *4* PyInterp.doEnter & helpers
def doEnter(self, event):
"""Handle the <return> key."""
#
# Binding for functions.
interp = self.interpreter
#@+others # Helper function
#@+node:ekr.20190619185252.1: *5* function: compute_indent
def compute_indent(line):
"""Return the indentation of a line."""
indent = len(line) - len(line.lstrip())
if line.endswith(':'):
indent += 4
return indent
#@+node:ekr.20190619183908.1: *5* function: compile_lines
def compile_lines(lines):
"""Compile one or more lines, returning the compiled code."""
source = ''.join(lines)
try:
return code.compile_command(source)
except SyntaxError:
interp.showsyntaxerror()
except Exception:
interp.showtraceback()
return None
#@+node:ekr.20190619190805.1: *5* function: compile_and_run_lines
def compile_and_run_lines(lines):
"""Compile and run code lines. Return 1 if there are errors."""
assert lines
the_code = compile_lines(lines)
if the_code:
return run_code(the_code)
return None
#@+node:ekr.20180525110907.1: *5* fucntion: run_code
def run_code(the_code):
"""Execute the compiled code. Return True if all went well."""
try:
interp.runcode(the_code)
return True
except SyntaxError:
interp.showsyntaxerror()
except Exception:
interp.showtraceback()
return False
#@-others
#
# Set cursor to end of line to avoid line splitting
textCursor = self.textCursor()
position = len(self.document().toPlainText())
textCursor.setPosition(position)
self.setTextCursor(textCursor)
lines = []
block = self.document().lastBlock()
#
# Scan backward, looking for lines.
while block:
line = g.toUnicode(block.text())
line = line.replace('\t', ' ' * 4)
block = block.previous()
if line.startswith('>>> '):
lines.insert(0, line[4:])
break
elif line.startswith('... '):
lines.insert(0, line[4:])
else:
lines.insert(0, line)
#
# Always end the log line.
self.append('')
#
# Clean the lines and compute the last line.
last_line = lines[-1].rstrip() if lines else ''
lines = [z.rstrip() + '\n' for z in lines if z.strip()]
if self.customCommands(last_line):
return
#
# Handle the history and set self.indent for insert_marker.
if last_line.strip():
self.history.insert(0, last_line)
self.indent = compute_indent(last_line)
#
# Check for a continued line.
if self.indent > 0 and last_line:
self.insert_marker()
return
#
# Execute lines in groups, delimited by indentation.
indent, ok, exec_lines = 0, True, []
for line in lines:
indent = compute_indent(line) if exec_lines else 0
if indent > 0 or not exec_lines:
exec_lines.append(line)
continue
# End of a group.
ok = compile_and_run_lines(exec_lines)
exec_lines = [line]
if not ok:
break
# Tail group.
if ok and exec_lines:
compile_and_run_lines(exec_lines)
self.indent = 0
self.insert_marker()
#@+node:peckj.20150428142729.20: *3* PyInterp.focusInEvent
def focusInEvent(self, event=None):
# set stdout+stderr properly
QtWidgets.QTextEdit.focusInEvent(self, event)
sys.stdout = self
sys.stderr = self
self.ensureCursorVisible()
#@+node:peckj.20150428142729.21: *3* PyInterp.focusOutEvent
def focusOutEvent(self, event):
# set stdout+stderr properly
QtWidgets.QTextEdit.focusOutEvent(self, event)
sys.stdout = g.user_dict['old_stdout']
sys.stderr = g.user_dict['old_stderr']
#@-others
#@+node:peckj.20150428142633.4: ** init
def init():
"""Return True if the plugin has loaded successfully."""
if g.app.gui is None:
g.app.createQtGui(__file__)
ok = g.app.gui.guiName().startswith('qt')
if ok:
# g.registerHandler(('new','open2'),onCreate)
g.registerHandler('after-create-leo-frame', onCreate)
# Fail: g.app.log does not exist.
g.plugin_signon(__name__)
else:
g.es('Plugin %s not loaded.' % __name__, color='red')
return ok
#@+node:peckj.20150428142633.5: ** onCreate
def onCreate(tag, keys):
"""python_terminal.py onCreate handler."""
c = keys.get('c')
if c:
win = MyInterpreter(None, c)
c.frame.log.createTab('Python Console', widget=win)
#@-others
#@-leo
| 40.630385
| 111
| 0.551233
|
5b7db24815a8816262db17a014f1fbd5696b7f9d
| 1,826
|
py
|
Python
|
run_bpm_example.py
|
nahman/lightbeam
|
f521feba5501a12e9db0f02d2025c9f4ad28541f
|
[
"MIT"
] | 1
|
2021-10-07T21:57:11.000Z
|
2021-10-07T21:57:11.000Z
|
run_bpm_example.py
|
nahman/lightbeam
|
f521feba5501a12e9db0f02d2025c9f4ad28541f
|
[
"MIT"
] | null | null | null |
run_bpm_example.py
|
nahman/lightbeam
|
f521feba5501a12e9db0f02d2025c9f4ad28541f
|
[
"MIT"
] | null | null | null |
''' example script for running the beamprop code in prop.py'''
import numpy as np
from mesh import RectMesh3D
from prop import Prop3D
from misc import normalize,overlap,getslices,overlap_nonu,norm_nonu
import LPmodes
import matplotlib.pyplot as plt
from config_example import *
if __name__ == "__main__":
# mesh initialization (required)
mesh = RectMesh3D(xw0,yw0,zw,ds,dz,num_PML,xw_func,yw_func)
xg,yg = mesh.xy.xg,mesh.xy.yg
mesh.xy.max_iters = max_remesh_iters
mesh.sigma_max = sig_max
# propagator initialization (required)
prop = Prop3D(wl0,mesh,optic,n0)
print('launch field')
plt.imshow(np.real(u0))
plt.show()
# run the propagator (required)
u,u0 = prop.prop2end(u0,xyslice=None,zslice=None,u1_func = u1_func,writeto=writeto,ref_val=ref_val,remesh_every=remesh_every,dynamic_n0=dynamic_n0,fplanewidth=fplanewidth)
# compute power in output ports (optional)
xg,yg = np.meshgrid(mesh.xy.xa,mesh.xy.ya,indexing='ij')
w = mesh.xy.get_weights()
xg0,yg0 = np.meshgrid(mesh.xy.xa0,mesh.xy.ya0,indexing='ij')
w0 = mesh.xy.dx0*mesh.xy.dy0
modes = []
for x,y in zip(xpos,ypos):
mode = norm_nonu(LPmodes.lpfield(xg-x,yg-y,0,1,2.2,wl0,ncore,nclad),w)
modes.append(mode)
modes0 = []
for x,y in zip(xpos,ypos):
mode = normalize(LPmodes.lpfield(xg0-x,yg0-y,0,1,2.2,wl0,ncore,nclad),w0)
modes0.append(mode)
SMFpower=0
print("final field power decomposition:")
for i in range(len(modes)):
_p = np.power(overlap_nonu(u,modes[i],w),2)
print("mode"+str(i)+": ", _p)
SMFpower += _p
print("total power in SMFs: ", SMFpower)
# plotting (optional)
print("final field dist:")
plt.imshow(np.abs(u0)[num_PML:-num_PML,num_PML:-num_PML])
plt.show()
| 29.934426
| 175
| 0.671413
|
4cef3b01f24f8c5a012e6e5f6e7ef5bb5eaa3444
| 428
|
py
|
Python
|
code/example1_4.py
|
jk/jekyll-mermaid-blog
|
afe77485c9594fdcbe33bce70cde5557fb185807
|
[
"MIT"
] | 3
|
2015-06-28T23:45:30.000Z
|
2016-07-12T13:16:56.000Z
|
code/example1_4.py
|
jk/jekyll-mermaid-blog
|
afe77485c9594fdcbe33bce70cde5557fb185807
|
[
"MIT"
] | null | null | null |
code/example1_4.py
|
jk/jekyll-mermaid-blog
|
afe77485c9594fdcbe33bce70cde5557fb185807
|
[
"MIT"
] | 5
|
2017-01-13T04:58:50.000Z
|
2018-11-20T13:10:47.000Z
|
# import sys
# print sys.argv
class firstn(object):
def __init__(self, n):
self.n = n
self.num, self.nums = 0, []
def __iter__(self):
return self
# Python 3 compatibility
def __next__(self):
return self.next()
def next(self):
if self.num < self.n:
cur, self.num = self.num, self.num+1
return cur
else:
raise StopIteration()
| 23.777778
| 48
| 0.53972
|
14ae6677a13c7155e86be1d6afcaf7681a5fa7f3
| 3,390
|
py
|
Python
|
D02_Pdf_Render/D02_Pdf_Render/settings.py
|
Hari-Krishna-Moorthy/Django-daily
|
90801cc89d979e70e594ba07dbb7178ffeddac33
|
[
"Apache-2.0"
] | null | null | null |
D02_Pdf_Render/D02_Pdf_Render/settings.py
|
Hari-Krishna-Moorthy/Django-daily
|
90801cc89d979e70e594ba07dbb7178ffeddac33
|
[
"Apache-2.0"
] | null | null | null |
D02_Pdf_Render/D02_Pdf_Render/settings.py
|
Hari-Krishna-Moorthy/Django-daily
|
90801cc89d979e70e594ba07dbb7178ffeddac33
|
[
"Apache-2.0"
] | null | null | null |
"""
Django settings for D02_Pdf_Render project.
Generated by 'django-admin startproject' using Django 3.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-6ehhlc-^ngrdk)5&tcxu_i#-o0lt9@c_42x^qjr)viks=uf2*o'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pdf',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'D02_Pdf_Render.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'D02_Pdf_Render.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 25.877863
| 91
| 0.702655
|
47a652fc204e1f80dcd6d8f36f6dee727f89c1c0
| 3,626
|
py
|
Python
|
app/models.py
|
lin-wish/random-name
|
91bae70aad4547e06388105136573a7c18525ed0
|
[
"MIT"
] | null | null | null |
app/models.py
|
lin-wish/random-name
|
91bae70aad4547e06388105136573a7c18525ed0
|
[
"MIT"
] | null | null | null |
app/models.py
|
lin-wish/random-name
|
91bae70aad4547e06388105136573a7c18525ed0
|
[
"MIT"
] | null | null | null |
from flask_bcrypt import Bcrypt
import jwt
from datetime import datetime, timedelta
class RandomNames(db.Model):
""" This class represents the test table. """
__tablename__ = "random_names"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255), nullable=False, unique=True)
gender = db.Column(db.String(255))
region = db.Column(db.String(255))
age = db.Column(db.Integer)
title = db.Column(db.String(255))
phone = db.Column(db.String(255))
email = db.Column(db.String(255), nullable=False, unique=True)
date_created = db.Column(db.DateTime, default=db.func.current_timestamp())
date_modified = db.Column(
db.DateTime, default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp())
def __init__(self, name):
""" Initialize with name. """
self.name = name
def save(self):
db.session.add(self)
db.session.commit()
@staticmethod
def get_all():
return RandomNames.query_all()
def delete(self):
db.session.delete(self)
db.session.commit()
def __repr__(self):
return "<RandomNames: {}>".format(self.name)
class User(db.Model):
""" This class represents the users table. """
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(255), nullable=False, unique=True)
password = db.Column(db.String(255), nullable=False)
random_names = db.relationship(
'random_names', order_by='RandomName.id', cascade="all, delete-orphan")
def __init__(self, email, password):
"""Initialize the user with an email and a password."""
self.email = email
self.password = Bcrypt().generate_password_hash(password).decode()
def password_is_valid(self, password):
"""
Checks the password against it's hash to validates the user's password
"""
return Bcrypt().check_password_hash(self.password, password)
def save(self):
"""Save a user to the database.
This includes creating a new user and editing one.
"""
db.session.add(self)
db.session.commit()
def generate_token(self, user_id):
try:
# set up a payload with an expiration time
payload = {
'exp': datetime.utcnow() + timedelta(minutes=5),
'iat': datetime.utcnow(),
'sub': user_id
}
# create the byte string token using the payload and the SECRET key
jwt_string = jwt.encode(
payload,
current_app.config.get('SECRET'),
algorithm='HS256'
)
return jwt_string
except Exception as e:
# return an error in string format if an exception occurs
return str(e)
@staticmethod
def decode_token(token):
"""Decodes the access token from the Authorization header."""
try:
# try to decode the token using our SECRET variable
payload = jwt.decode(token, current_app.config.get('SECRET'))
return payload['sub']
except jwt.ExpiredSignatureError:
# the token is expired, return an error string
return "Expired token. Please login to get a new token"
except jwt.InvalidTokenError:
# the token is invalid, return an error string
return "Invalid token. Please register or login"
| 34.207547
| 80
| 0.597628
|
0cc70e51e077ca8107d5685d2d13717e18e3de97
| 3,913
|
py
|
Python
|
test/test_python_basic.py
|
devnexen/unit
|
d65a66f9d813294917822554311281c5e1a7126b
|
[
"Apache-2.0"
] | 10
|
2018-11-14T10:58:41.000Z
|
2021-12-11T01:43:51.000Z
|
test/test_python_basic.py
|
devnexen/unit
|
d65a66f9d813294917822554311281c5e1a7126b
|
[
"Apache-2.0"
] | null | null | null |
test/test_python_basic.py
|
devnexen/unit
|
d65a66f9d813294917822554311281c5e1a7126b
|
[
"Apache-2.0"
] | 3
|
2018-10-31T12:10:05.000Z
|
2019-02-14T14:09:48.000Z
|
from unit.control import TestControl
class TestPythonBasic(TestControl):
prerequisites = {'modules': {'python': 'any'}}
conf_app = {
"app": {
"type": "python",
"processes": {"spare": 0},
"path": "/app",
"module": "wsgi",
}
}
conf_basic = {
"listeners": {"*:7080": {"pass": "applications/app"}},
"applications": conf_app,
}
def test_python_get_empty(self):
assert self.conf_get() == {'listeners': {}, 'applications': {}}
assert self.conf_get('listeners') == {}
assert self.conf_get('applications') == {}
def test_python_get_applications(self):
self.conf(self.conf_app, 'applications')
conf = self.conf_get()
assert conf['listeners'] == {}, 'listeners'
assert conf['applications'] == {
"app": {
"type": "python",
"processes": {"spare": 0},
"path": "/app",
"module": "wsgi",
}
}, 'applications'
assert self.conf_get('applications') == {
"app": {
"type": "python",
"processes": {"spare": 0},
"path": "/app",
"module": "wsgi",
}
}, 'applications prefix'
assert self.conf_get('applications/app') == {
"type": "python",
"processes": {"spare": 0},
"path": "/app",
"module": "wsgi",
}, 'applications prefix 2'
assert self.conf_get('applications/app/type') == 'python', 'type'
assert self.conf_get('applications/app/processes/spare') == 0, 'spare'
def test_python_get_listeners(self):
self.conf(self.conf_basic)
assert self.conf_get()['listeners'] == {
"*:7080": {"pass": "applications/app"}
}, 'listeners'
assert self.conf_get('listeners') == {
"*:7080": {"pass": "applications/app"}
}, 'listeners prefix'
assert self.conf_get('listeners/*:7080') == {
"pass": "applications/app"
}, 'listeners prefix 2'
def test_python_change_listener(self):
self.conf(self.conf_basic)
self.conf({"*:7081": {"pass": "applications/app"}}, 'listeners')
assert self.conf_get('listeners') == {
"*:7081": {"pass": "applications/app"}
}, 'change listener'
def test_python_add_listener(self):
self.conf(self.conf_basic)
self.conf({"pass": "applications/app"}, 'listeners/*:7082')
assert self.conf_get('listeners') == {
"*:7080": {"pass": "applications/app"},
"*:7082": {"pass": "applications/app"},
}, 'add listener'
def test_python_change_application(self):
self.conf(self.conf_basic)
self.conf('30', 'applications/app/processes/max')
assert (
self.conf_get('applications/app/processes/max') == 30
), 'change application max'
self.conf('"/www"', 'applications/app/path')
assert (
self.conf_get('applications/app/path') == '/www'
), 'change application path'
def test_python_delete(self):
self.conf(self.conf_basic)
assert 'error' in self.conf_delete('applications/app')
assert 'success' in self.conf_delete('listeners/*:7080')
assert 'success' in self.conf_delete('applications/app')
assert 'error' in self.conf_delete('applications/app')
def test_python_delete_blocks(self):
self.conf(self.conf_basic)
assert 'success' in self.conf_delete('listeners')
assert 'success' in self.conf_delete('applications')
assert 'success' in self.conf(self.conf_app, 'applications')
assert 'success' in self.conf(
{"*:7081": {"pass": "applications/app"}}, 'listeners'
), 'applications restore'
| 31.813008
| 78
| 0.541784
|
38816655ef3e9a2c8b04a8d1905cb27da6409d8a
| 6,209
|
py
|
Python
|
mlflow/entities/run_info.py
|
yutannihilation/mlflow
|
a4386c3f87923e395ba8f523e1a90749e888a541
|
[
"Apache-2.0"
] | 2
|
2020-03-21T17:32:54.000Z
|
2020-07-22T06:11:55.000Z
|
mlflow/entities/run_info.py
|
yutannihilation/mlflow
|
a4386c3f87923e395ba8f523e1a90749e888a541
|
[
"Apache-2.0"
] | 5
|
2020-04-30T00:36:16.000Z
|
2022-03-02T02:51:29.000Z
|
mlflow/entities/run_info.py
|
mlaradji/mlflow
|
4edde91d0fa9909f5894bf84529b3416d52d83f6
|
[
"Apache-2.0"
] | null | null | null |
from mlflow.entities._mlflow_object import _MLflowObject
from mlflow.exceptions import MlflowException
from mlflow.protos.service_pb2 import RunInfo as ProtoRunInfo
def check_run_is_active(run_info):
if run_info.lifecycle_stage != RunInfo.ACTIVE_LIFECYCLE:
raise MlflowException('The run {} must be in an active lifecycle_stage.'
.format(run_info.run_uuid))
def check_run_is_deleted(run_info):
if run_info.lifecycle_stage != RunInfo.DELETED_LIFECYCLE:
raise MlflowException('The run {} must be in an deleted lifecycle_stage.'
.format(run_info.run_uuid))
class RunInfo(_MLflowObject):
"""
Metadata about a run.
"""
ACTIVE_LIFECYCLE = "active"
DELETED_LIFECYCLE = "deleted"
def __init__(self, run_uuid, experiment_id, name, source_type, source_name, entry_point_name,
user_id, status, start_time, end_time, source_version, lifecycle_stage,
artifact_uri=None):
if run_uuid is None:
raise Exception("run_uuid cannot be None")
if experiment_id is None:
raise Exception("experiment_id cannot be None")
if name is None:
raise Exception("name cannot be None")
if source_type is None:
raise Exception("source_type cannot be None")
if source_name is None:
raise Exception("source_name cannot be None")
if user_id is None:
raise Exception("user_id cannot be None")
if status is None:
raise Exception("status cannot be None")
if start_time is None:
raise Exception("start_time cannot be None")
self._run_uuid = run_uuid
self._experiment_id = experiment_id
self._name = name
self._source_type = source_type
self._source_name = source_name
self._entry_point_name = entry_point_name
self._user_id = user_id
self._status = status
self._start_time = start_time
self._end_time = end_time
self._source_version = source_version
self._lifecycle_stage = lifecycle_stage
self._artifact_uri = artifact_uri
def __eq__(self, other):
if type(other) is type(self):
# TODO deep equality here?
return self.__dict__ == other.__dict__
return False
def _copy_with_overrides(self, status=None, end_time=None, lifecycle_stage=None):
"""A copy of the RunInfo with certain attributes modified."""
proto = self.to_proto()
if status:
proto.status = status
if end_time:
proto.end_time = end_time
if lifecycle_stage:
proto.lifecycle_stage = lifecycle_stage
return RunInfo.from_proto(proto)
@property
def run_uuid(self):
"""String containing run UUID."""
return self._run_uuid
@property
def experiment_id(self):
"""Integer ID of the experiment for the current run."""
return self._experiment_id
@property
def name(self):
"""String name of the run."""
return self._name
@property
def source_type(self):
"""
:py:class:`mlflow.entities.SourceType` describing the source of the run.
"""
return self._source_type
@property
def source_name(self):
"""
String name of the source of the run (GitHub URI of the project corresponding to the run,
etc).
"""
return self._source_name
@property
def entry_point_name(self):
"""String name of the entry point for the run."""
return self._entry_point_name
@property
def user_id(self):
"""String ID of the user who initiated this run."""
return self._user_id
@property
def status(self):
"""
One of the values in :py:class:`mlflow.entities.RunStatus`
describing the status of the run.
"""
return self._status
@property
def start_time(self):
"""Start time of the run, in number of milliseconds since the UNIX epoch."""
return self._start_time
@property
def end_time(self):
"""End time of the run, in number of milliseconds since the UNIX epoch."""
return self._end_time
@property
def source_version(self):
"""String Git commit hash of the code used for the run, if available."""
return self._source_version
@property
def artifact_uri(self):
"""String root artifact URI of the run."""
return self._artifact_uri
@property
def lifecycle_stage(self):
return self._lifecycle_stage
def to_proto(self):
proto = ProtoRunInfo()
proto.run_uuid = self.run_uuid
proto.experiment_id = self.experiment_id
proto.name = self.name
proto.source_type = self.source_type
proto.source_name = self.source_name
if self.entry_point_name:
proto.entry_point_name = self.entry_point_name
proto.user_id = self.user_id
proto.status = self.status
proto.start_time = self.start_time
if self.end_time:
proto.end_time = self.end_time
if self.source_version:
proto.source_version = self.source_version
if self.artifact_uri:
proto.artifact_uri = self.artifact_uri
proto.lifecycle_stage = self.lifecycle_stage
return proto
@classmethod
def from_proto(cls, proto):
return cls(proto.run_uuid, proto.experiment_id, proto.name, proto.source_type,
proto.source_name, proto.entry_point_name, proto.user_id, proto.status,
proto.start_time, proto.end_time, proto.source_version, proto.lifecycle_stage,
proto.artifact_uri)
@classmethod
def _properties(cls):
# TODO: Hard coding this list of props for now. There has to be a clearer way...
return ["run_uuid", "experiment_id", "name", "source_type", "source_name",
"entry_point_name", "user_id", "status", "start_time", "end_time",
"source_version", "lifecycle_stage", "artifact_uri"]
| 34.115385
| 97
| 0.63569
|
8014b94237eae713c5a8c0ec74bccb22ea2177d4
| 17,246
|
py
|
Python
|
src/msdss_data_api/routers.py
|
rrwen/msdss-data-api
|
f2f82d1137c46d729fea2651b213b087467496b0
|
[
"MIT"
] | null | null | null |
src/msdss_data_api/routers.py
|
rrwen/msdss-data-api
|
f2f82d1137c46d729fea2651b213b087467496b0
|
[
"MIT"
] | null | null | null |
src/msdss_data_api/routers.py
|
rrwen/msdss-data-api
|
f2f82d1137c46d729fea2651b213b087467496b0
|
[
"MIT"
] | null | null | null |
from copy import deepcopy
from datetime import datetime
from fastapi import APIRouter, Body, Depends, Query
from typing import Any, Dict, List, Literal, Optional
from .managers import *
from .models import *
from .tools import *
async def _no_current_user():
return None
def get_data_router(
users_api=None,
database=None,
route_settings=DEFAULT_DATA_ROUTE_SETTINGS,
prefix='/data',
tags=['data'],
*args, **kwargs):
"""
Get a data router.
Parameters
----------
users_api : :class:`msdss_users_api:msdss_users_api.core.UsersAPI` or None
Users API object to enable user authentication for data routes.
If ``None``, user authentication will not be used for data routes.
database : :class:`msdss_base_database:msdss_base_database.core.Database` or None
A :class:`msdss_base_database:msdss_base_database.core.Database` object for managing data. If ``None``, a default database will be used.
route_settings : dict
Dictionary of settings for the data routes. Each route consists of the following keys:
* ``path``: resource path for the route
* ``tags``: tags for open api spec
* ``_enable`` (bool): Whether this route should be included or not
* ``_restricted_tables`` (list(str)): List of table names not accessible by this route
* ``_get_user`` (dict or None): Additional arguments passed to the :meth:`msdss_users_api.msdss_users_api.core.UsersAPI.get_current_user` function for the route - only applies if parameter ``users_api`` is not ``None`` and this settings is not ``None``, otherwise no user authentication will be added for this route
* ``**kwargs``: Additional arguments passed to :meth:`fastapi:fastapi.FastAPI.get` for the id route
The default settings are:
.. jupyter-execute::
:hide-code:
from msdss_data_api.defaults import DEFAULT_DATA_ROUTE_SETTINGS
from pprint import pprint
pprint(DEFAULT_DATA_ROUTE_SETTINGS)
Any unspecified settings will be replaced by their defaults.
prefix : str
Prefix path to all routes belonging to this router.
tags : list(str)
Tags for all routes in this router.
*args, **kwargs
Additional arguments to accept any extra parameters passed to :class:`fastapi:fastapi.routing.APIRouter`.
Returns
-------
:class:`fastapi:fastapi.routing.APIRouter`
A router object used for data routes. See `FastAPI bigger apps <https://fastapi.tiangolo.com/tutorial/bigger-applications/>`_
Author
------
Richard Wen <rrwen.dev@gmail.com>
Example
-------
.. jupyter-execute::
from msdss_base_database import Database
from msdss_base_api import API
from msdss_users_api import UsersAPI
from msdss_data_api.routers import get_data_router
# Create database object
database = Database(
driver='postgresql',
user='msdss',
password='msdss123',
host='localhost',
port='5432',
database='msdss'
)
# Create an app
app = API()
# Add the data router
router = get_data_router(database=database)
app.add_router(router)
# Add the data router with users
# CHANGE SECRETS TO STRONG PHRASES
app = API()
users_api = UsersAPI(
'cookie-secret',
'jwt-secret',
'reset-secret',
'verification-secret',
database=database
)
router = get_data_router(users_api, database=database)
app.add_router(router)
# Host app at https://localhost:8000
# Try it at https://localhost:8000/docs
# app.start()
"""
database = database if database else Database()
# (get_data_router_defaults) Merge defaults and user params
get_user = {}
settings = deepcopy(DEFAULT_DATA_ROUTE_SETTINGS)
for k in settings:
if k in route_settings:
settings[k].update(route_settings[k])
# (get_data_router_apply) Apply settings to obtain dependencies
get_user = {}
get_data_manager = {}
enable = {}
for k, v in settings.items():
get_user[k] = users_api.get_current_user(**v['_get_user']) if users_api and '_get_user' in v else _no_current_user
del v['_get_user']
get_data_manager[k] = create_data_manager_func(database=database, restricted_tables=v.pop('_restricted_tables'))
enable[k] = v.pop('_enable')
# (get_data_router_metamanager) Create metadata manager func
get_metadata_manager = create_metadata_manager_func(database=database)
# (get_data_router_create) Create api router for data routes
out = APIRouter(prefix=prefix, tags=tags, *args, **kwargs)
# (get_data_router_columns) Add columns route to data router
if enable['columns']:
@out.get(**settings['columns'])
async def get_columns(
dataset: str = Query(..., description='Name of the dataset'),
data_manager = Depends(get_data_manager['query']),
user = Depends(get_user['columns'])
):
response = data_manager.get_columns(dataset)
return response
# (get_data_router_create) Add create route to data router
if enable['create']:
@out.post(**settings['create'])
async def create_data(
dataset: str = Query(..., description='Name of the dataset to create - the request body is used to upload JSON data under the "data" key in the form of "[{col: val, col2: val2, ...}, {col: val, col2: val2, ...}]", where each key represents a column and its corresponding value. Objects in this list should have the same keys.'),
body: DataCreate = Body(
...,
example={
'title': 'Title for Dataset',
'description': 'Description for dataset...',
'source': 'Data source for dataset',
'data': [
{'col_one': 1, 'col_two': 'a'},
{'col_one': 2, 'col_two': 'b'},
{'col_one': 3, 'col_two': 'c'}
]
}
),
data_manager = Depends(get_data_manager['create']),
metadata_manager = Depends(get_metadata_manager),
user = Depends(get_user['create'])
):
# (get_data_router_create_data) Get data
body = body.dict()
data = body.pop('data')
# (get_data_router_create_metadata) Format metadata
metadata = body
metadata['dataset'] = dataset
metadata['created_at'] = datetime.now()
metadata['updated_at'] = datetime.now()
# (get_data_router_create_users) Add user operations if available
if user:
metadata['created_by'] = user.email
# (get_data_router_create_run) Create dataset and metadata
data_manager.create(name=dataset, data=data)
metadata_manager.create(name=dataset, data=metadata)
# (get_data_router_delete) Add delete route to data router
if enable['delete']:
@out.delete(**settings['delete'])
async def delete_data(
dataset: str = Query(..., description='Name of the dataset to delete data from'),
where: Optional[List[str]] = Query(None, description='Where statements to filter data to remove in the form of "column operator value" (e.g. "var < 3") - valid operators are: =, !=, >, >=, >, <, <=, !=, LIKE, ILIKE, NOTLIKE, NOTILIKE, CONTAINS, STARTSWITH, ENDSWITH'),
where_boolean: Literal['AND', 'OR'] = Query('AND', alias='where-boolean', description='Either "AND" or "OR" to combine where statements'),
delete_all: Optional[bool] = Query(False, description='Whether to remove the entire dataset or not'),
data_manager = Depends(get_data_manager['delete']),
metadata_manager = Depends(get_metadata_manager),
user = Depends(get_user['delete'])
):
data_manager.delete(name=dataset, where=where, where_boolean=where_boolean, delete_all=delete_all)
metadata_manager.updated_at(name=dataset)
if delete_all:
metadata_manager.delete(name=dataset)
# (get_data_router_id) Add id route to data router
if enable['id']:
@out.get(**settings['id'])
async def get_data_by_id(
dataset: str = Query(..., description='Name of the dataset'),
id: str = Query(..., description='Identifier value to retrieve a specific document in the dataset'),
id_column: Optional[str] = Query('id', description='Identifier column name for the dataset'),
data_manager = Depends(get_data_manager['id']),
user = Depends(get_user['id'])
):
where = [f'{id_column} = {id}']
response = data_manager.get(name=dataset, where=where)
return response
# (get_data_router_insert) Add insert route to data router
if enable['insert']:
@out.put(**settings['insert'])
async def insert_data(
dataset: str = Query(..., description='Name of the dataset to insert - the request body is used to upload JSON data in the form of "[{key: value, key2: value2, ... }, {key: value, key2: value2, ...}]" where each key is a column name'),
data: List[Dict[str, Any]] = Body(...),
data_manager = Depends(get_data_manager['insert']),
metadata_manager = Depends(get_metadata_manager),
user = Depends(get_user['insert'])
):
data_manager.insert(name=dataset, data=data)
metadata_manager.updated_at(dataset)
# (get_data_router_metadata) Add metadata route to data router
if enable['metadata']:
@out.get(**settings['metadata'])
async def get_metadata(
dataset: str = Query(..., description='Name of the dataset to get metadata for'),
metadata_manager = Depends(get_metadata_manager),
user = Depends(get_user['metadata'])
):
response = metadata_manager.get(name=dataset)
return response
# (get_data_router_metadata) Add metadata route to data router
if enable['metadata_update']:
@out.put(**settings['metadata_update'])
async def update_metadata(
dataset: str = Query(..., description='Name of the dataset to update metadata for. Upload user and creation/update times can not be updated.'),
body: MetadataUpdate = Body(
...,
example={
'title': 'New Title to Replace Existing',
'description': 'New description to replace existing...',
'source': 'New data source to replace existing'
}
),
metadata_manager = Depends(get_metadata_manager),
user = Depends(get_user['metadata_update'])
):
response = metadata_manager.update(name=dataset, data=body.dict())
return response
# (get_data_router_query) Add query route to data router
if enable['query']:
@out.get(**settings['query'])
async def query_data(
dataset: str = Query(..., description='Name of the dataset to query'),
select: Optional[List[str]] = Query('*', description='columns to include - "*" means all columns and "None" means to omit selection (useful for aggregate queries)'),
where: Optional[List[str]] = Query(None, description='Where statements to filter data in the form of "column operator value" (e.g. "var < 3") - valid operators are: =, !=, >, >=, >, <, <=, !=, LIKE, ILIKE, NOTLIKE, NOTILIKE, CONTAINS, STARTSWITH, ENDSWITH'),
group_by: Optional[List[str]] = Query(None, alias='group-by', description='column names to group by - should be used with aggregate and aggregate_func parameters'),
aggregate: Optional[List[str]] = Query(None, description='column names to aggregate with the same order as the aggregate_func parameter'),
aggregate_func: Optional[List[str]] = Query(None, alias='aggregate-func', description='Aggregate functions in the same order as the aggregate parameter'),
order_by: Optional[List[str]] = Query(None, alias='order-by', description='column names to order by in the same order as parameter order_by_sort'),
order_by_sort: Optional[List[Literal['asc', 'desc']]] = Query(None, alias='order-by-sort', description='Either "asc" for ascending or "desc" for descending order in the same order as parameter order_by'),
limit: Optional[int] = Query(None, description='Number of items to return'),
offset: Optional[int] = Query(None, description='Number of items to skip'),
where_boolean: Literal['AND', 'OR'] = Query('AND', alias='where-boolean', description='Either "AND" or "OR" to combine where statements'),
data_manager = Depends(get_data_manager['query']),
user = Depends(get_user['query'])
):
select = None if select[0] == 'None' else select
response = data_manager.get(
name=dataset,
select=select,
where=where,
group_by=group_by,
aggregate=aggregate,
aggregate_func=aggregate_func,
order_by=order_by,
order_by_sort=order_by_sort,
limit=limit,
offset=offset,
where_boolean=where_boolean
)
return response
# (get_data_router_rows) Add rows route to data router
if enable['rows']:
@out.get(**settings['rows'])
async def get_rows(
dataset: str = Query(..., description='Name of the dataset'),
data_manager = Depends(get_data_manager['query']),
user = Depends(get_user['rows'])
):
response = data_manager.get_rows(dataset)
return response
# (get_data_router_search) Add search route to data router
if enable['search']:
@out.get(**settings['search'])
async def search_data(
select: Optional[List[str]] = Query('*', description='columns to include in search - "*" means all columns and "None" means to omit selection (useful for aggregate queries).'),
where: Optional[List[str]] = Query(None, description='Where statements to filter data in the form of "column operator value" (e.g. "dataset = test_data") - valid operators are: =, !=, >, >=, >, <, <=, !=, LIKE, ILIKE, NOTLIKE, NOTILIKE, CONTAINS, STARTSWITH, ENDSWITH'),
order_by: Optional[List[str]] = Query(None, alias='order-by', description='column names to order by in the same order as parameter order_by_sort'),
order_by_sort: Optional[List[Literal['asc', 'desc']]] = Query(None, alias='order-by-sort', description='Either "asc" for ascending or "desc" for descending order in the same order as parameter order_by'),
limit: Optional[int] = Query(None, description='Number of items to return'),
offset: Optional[int] = Query(None, description='Number of items to skip'),
where_boolean: Literal['AND', 'OR'] = Query('AND', alias='where-boolean', description='Either "AND" or "OR" to combine where statements'),
metadata_manager = Depends(get_metadata_manager),
user = Depends(get_user['search'])
):
select = None if select[0] == 'None' else select
response = metadata_manager.search(
select=select,
where=where,
order_by=order_by,
order_by_sort=order_by_sort,
limit=limit,
offset=offset,
where_boolean=where_boolean
)
return response
# (get_data_router_update) Add update route to data router
if enable['update']:
@out.put(**settings['update'])
async def update_data(
dataset: str = Query(..., description='Name of the dataset to update - the request body is used to upload JSON data in the form of "{key: value, key2: value2, ... }" where each key is a column name and each value is the new value to use (matching the where parameter)'),
body: Dict[str, Any] = Body(
...,
example={'col_one': 1, 'col_two': 'a'}
),
where: List[str] = Query(..., description='Where statements to filter data to update in the form of "column operator value" (e.g. "var < 3") - valid operators are: =, !=, >, >=, >, <, <=, !=, LIKE, ILIKE, NOTLIKE, NOTILIKE, CONTAINS, STARTSWITH, ENDSWITH'),
data_manager = Depends(get_data_manager['update']),
metadata_manager = Depends(get_metadata_manager),
user = Depends(get_user['update'])
):
data_manager.update(name=dataset, data=body, where=where)
metadata_manager.updated_at(dataset)
return out
| 49.700288
| 340
| 0.616375
|
032ea94ee2a777064d9e102da56759e39ee27a36
| 19,299
|
py
|
Python
|
venv/Lib/site-packages/mpl_toolkits/mplot3d/axis3d.py
|
AdarshSai/Final_project
|
f966834ca72dd232102ed500ef47ef2b3bdbed5b
|
[
"MIT"
] | 353
|
2020-12-10T10:47:17.000Z
|
2022-03-31T23:08:29.000Z
|
venv/Lib/site-packages/mpl_toolkits/mplot3d/axis3d.py
|
AdarshSai/Final_project
|
f966834ca72dd232102ed500ef47ef2b3bdbed5b
|
[
"MIT"
] | 80
|
2020-12-10T09:54:22.000Z
|
2022-03-30T22:08:45.000Z
|
venv/Lib/site-packages/mpl_toolkits/mplot3d/axis3d.py
|
AdarshSai/Final_project
|
f966834ca72dd232102ed500ef47ef2b3bdbed5b
|
[
"MIT"
] | 63
|
2020-12-10T17:10:34.000Z
|
2022-03-28T16:27:07.000Z
|
# axis3d.py, original mplot3d version by John Porter
# Created: 23 Sep 2005
# Parts rewritten by Reinier Heeres <reinier@heeres.eu>
import numpy as np
import matplotlib.transforms as mtransforms
from matplotlib import (
artist, lines as mlines, axis as maxis, patches as mpatches, rcParams)
from . import art3d, proj3d
def move_from_center(coord, centers, deltas, axmask=(True, True, True)):
"""
For each coordinate where *axmask* is True, move *coord* away from
*centers* by *deltas*.
"""
coord = np.asarray(coord)
return coord + axmask * np.copysign(1, coord - centers) * deltas
def tick_update_position(tick, tickxs, tickys, labelpos):
"""Update tick line and label position and style."""
tick.label1.set_position(labelpos)
tick.label2.set_position(labelpos)
tick.tick1line.set_visible(True)
tick.tick2line.set_visible(False)
tick.tick1line.set_linestyle('-')
tick.tick1line.set_marker('')
tick.tick1line.set_data(tickxs, tickys)
tick.gridline.set_data(0, 0)
class Axis(maxis.XAxis):
"""An Axis class for the 3D plots."""
# These points from the unit cube make up the x, y and z-planes
_PLANES = (
(0, 3, 7, 4), (1, 2, 6, 5), # yz planes
(0, 1, 5, 4), (3, 2, 6, 7), # xz planes
(0, 1, 2, 3), (4, 5, 6, 7), # xy planes
)
# Some properties for the axes
_AXINFO = {
'x': {'i': 0, 'tickdir': 1, 'juggled': (1, 0, 2),
'color': (0.95, 0.95, 0.95, 0.5)},
'y': {'i': 1, 'tickdir': 0, 'juggled': (0, 1, 2),
'color': (0.90, 0.90, 0.90, 0.5)},
'z': {'i': 2, 'tickdir': 0, 'juggled': (0, 2, 1),
'color': (0.925, 0.925, 0.925, 0.5)},
}
def __init__(self, adir, v_intervalx, d_intervalx, axes, *args,
rotate_label=None, **kwargs):
# adir identifies which axes this is
self.adir = adir
# This is a temporary member variable.
# Do not depend on this existing in future releases!
self._axinfo = self._AXINFO[adir].copy()
if rcParams['_internal.classic_mode']:
self._axinfo.update({
'label': {'va': 'center', 'ha': 'center'},
'tick': {
'inward_factor': 0.2,
'outward_factor': 0.1,
'linewidth': {
True: rcParams['lines.linewidth'], # major
False: rcParams['lines.linewidth'], # minor
}
},
'axisline': {'linewidth': 0.75, 'color': (0, 0, 0, 1)},
'grid': {
'color': (0.9, 0.9, 0.9, 1),
'linewidth': 1.0,
'linestyle': '-',
},
})
else:
self._axinfo.update({
'label': {'va': 'center', 'ha': 'center'},
'tick': {
'inward_factor': 0.2,
'outward_factor': 0.1,
'linewidth': {
True: ( # major
rcParams['xtick.major.width'] if adir in 'xz' else
rcParams['ytick.major.width']),
False: ( # minor
rcParams['xtick.minor.width'] if adir in 'xz' else
rcParams['ytick.minor.width']),
}
},
'axisline': {
'linewidth': rcParams['axes.linewidth'],
'color': rcParams['axes.edgecolor'],
},
'grid': {
'color': rcParams['grid.color'],
'linewidth': rcParams['grid.linewidth'],
'linestyle': rcParams['grid.linestyle'],
},
})
maxis.XAxis.__init__(self, axes, *args, **kwargs)
# data and viewing intervals for this direction
self.d_interval = d_intervalx
self.v_interval = v_intervalx
self.set_rotate_label(rotate_label)
def init3d(self):
self.line = mlines.Line2D(
xdata=(0, 0), ydata=(0, 0),
linewidth=self._axinfo['axisline']['linewidth'],
color=self._axinfo['axisline']['color'],
antialiased=True)
# Store dummy data in Polygon object
self.pane = mpatches.Polygon(
np.array([[0, 0], [0, 1], [1, 0], [0, 0]]),
closed=False, alpha=0.8, facecolor='k', edgecolor='k')
self.set_pane_color(self._axinfo['color'])
self.axes._set_artist_props(self.line)
self.axes._set_artist_props(self.pane)
self.gridlines = art3d.Line3DCollection([])
self.axes._set_artist_props(self.gridlines)
self.axes._set_artist_props(self.label)
self.axes._set_artist_props(self.offsetText)
# Need to be able to place the label at the correct location
self.label._transform = self.axes.transData
self.offsetText._transform = self.axes.transData
def get_major_ticks(self, numticks=None):
ticks = maxis.XAxis.get_major_ticks(self, numticks)
for t in ticks:
for obj in [
t.tick1line, t.tick2line, t.gridline, t.label1, t.label2]:
obj.set_transform(self.axes.transData)
return ticks
def get_minor_ticks(self, numticks=None):
ticks = maxis.XAxis.get_minor_ticks(self, numticks)
for t in ticks:
for obj in [
t.tick1line, t.tick2line, t.gridline, t.label1, t.label2]:
obj.set_transform(self.axes.transData)
return ticks
def set_pane_pos(self, xys):
xys = np.asarray(xys)
xys = xys[:, :2]
self.pane.xy = xys
self.stale = True
def set_pane_color(self, color):
"""Set pane color to a RGBA tuple."""
self._axinfo['color'] = color
self.pane.set_edgecolor(color)
self.pane.set_facecolor(color)
self.pane.set_alpha(color[-1])
self.stale = True
def set_rotate_label(self, val):
"""
Whether to rotate the axis label: True, False or None.
If set to None the label will be rotated if longer than 4 chars.
"""
self._rotate_label = val
self.stale = True
def get_rotate_label(self, text):
if self._rotate_label is not None:
return self._rotate_label
else:
return len(text) > 4
def _get_coord_info(self, renderer):
mins, maxs = np.array([
self.axes.get_xbound(),
self.axes.get_ybound(),
self.axes.get_zbound(),
]).T
centers = (maxs + mins) / 2.
deltas = (maxs - mins) / 12.
mins = mins - deltas / 4.
maxs = maxs + deltas / 4.
vals = mins[0], maxs[0], mins[1], maxs[1], mins[2], maxs[2]
tc = self.axes.tunit_cube(vals, renderer.M)
avgz = [tc[p1][2] + tc[p2][2] + tc[p3][2] + tc[p4][2]
for p1, p2, p3, p4 in self._PLANES]
highs = np.array([avgz[2*i] < avgz[2*i+1] for i in range(3)])
return mins, maxs, centers, deltas, tc, highs
def draw_pane(self, renderer):
renderer.open_group('pane3d', gid=self.get_gid())
mins, maxs, centers, deltas, tc, highs = self._get_coord_info(renderer)
info = self._axinfo
index = info['i']
if not highs[index]:
plane = self._PLANES[2 * index]
else:
plane = self._PLANES[2 * index + 1]
xys = [tc[p] for p in plane]
self.set_pane_pos(xys)
self.pane.draw(renderer)
renderer.close_group('pane3d')
@artist.allow_rasterization
def draw(self, renderer):
self.label._transform = self.axes.transData
renderer.open_group('axis3d', gid=self.get_gid())
ticks = self._update_ticks()
info = self._axinfo
index = info['i']
mins, maxs, centers, deltas, tc, highs = self._get_coord_info(renderer)
# Determine grid lines
minmax = np.where(highs, maxs, mins)
maxmin = np.where(highs, mins, maxs)
# Draw main axis line
juggled = info['juggled']
edgep1 = minmax.copy()
edgep1[juggled[0]] = maxmin[juggled[0]]
edgep2 = edgep1.copy()
edgep2[juggled[1]] = maxmin[juggled[1]]
pep = np.asarray(
proj3d.proj_trans_points([edgep1, edgep2], renderer.M))
centpt = proj3d.proj_transform(*centers, renderer.M)
self.line.set_data(pep[0], pep[1])
self.line.draw(renderer)
# Grid points where the planes meet
xyz0 = np.tile(minmax, (len(ticks), 1))
xyz0[:, index] = [tick.get_loc() for tick in ticks]
# Draw labels
# The transAxes transform is used because the Text object
# rotates the text relative to the display coordinate system.
# Therefore, if we want the labels to remain parallel to the
# axis regardless of the aspect ratio, we need to convert the
# edge points of the plane to display coordinates and calculate
# an angle from that.
# TODO: Maybe Text objects should handle this themselves?
dx, dy = (self.axes.transAxes.transform([pep[0:2, 1]]) -
self.axes.transAxes.transform([pep[0:2, 0]]))[0]
lxyz = 0.5 * (edgep1 + edgep2)
# A rough estimate; points are ambiguous since 3D plots rotate
ax_scale = self.axes.bbox.size / self.figure.bbox.size
ax_inches = np.multiply(ax_scale, self.figure.get_size_inches())
ax_points_estimate = sum(72. * ax_inches)
deltas_per_point = 48 / ax_points_estimate
default_offset = 21.
labeldeltas = (
(self.labelpad + default_offset) * deltas_per_point * deltas)
axmask = [True, True, True]
axmask[index] = False
lxyz = move_from_center(lxyz, centers, labeldeltas, axmask)
tlx, tly, tlz = proj3d.proj_transform(*lxyz, renderer.M)
self.label.set_position((tlx, tly))
if self.get_rotate_label(self.label.get_text()):
angle = art3d._norm_text_angle(np.rad2deg(np.arctan2(dy, dx)))
self.label.set_rotation(angle)
self.label.set_va(info['label']['va'])
self.label.set_ha(info['label']['ha'])
self.label.draw(renderer)
# Draw Offset text
# Which of the two edge points do we want to
# use for locating the offset text?
if juggled[2] == 2:
outeredgep = edgep1
outerindex = 0
else:
outeredgep = edgep2
outerindex = 1
pos = move_from_center(outeredgep, centers, labeldeltas, axmask)
olx, oly, olz = proj3d.proj_transform(*pos, renderer.M)
self.offsetText.set_text(self.major.formatter.get_offset())
self.offsetText.set_position((olx, oly))
angle = art3d._norm_text_angle(np.rad2deg(np.arctan2(dy, dx)))
self.offsetText.set_rotation(angle)
# Must set rotation mode to "anchor" so that
# the alignment point is used as the "fulcrum" for rotation.
self.offsetText.set_rotation_mode('anchor')
#----------------------------------------------------------------------
# Note: the following statement for determining the proper alignment of
# the offset text. This was determined entirely by trial-and-error
# and should not be in any way considered as "the way". There are
# still some edge cases where alignment is not quite right, but this
# seems to be more of a geometry issue (in other words, I might be
# using the wrong reference points).
#
# (TT, FF, TF, FT) are the shorthand for the tuple of
# (centpt[info['tickdir']] <= pep[info['tickdir'], outerindex],
# centpt[index] <= pep[index, outerindex])
#
# Three-letters (e.g., TFT, FTT) are short-hand for the array of bools
# from the variable 'highs'.
# ---------------------------------------------------------------------
if centpt[info['tickdir']] > pep[info['tickdir'], outerindex]:
# if FT and if highs has an even number of Trues
if (centpt[index] <= pep[index, outerindex]
and np.count_nonzero(highs) % 2 == 0):
# Usually, this means align right, except for the FTT case,
# in which offset for axis 1 and 2 are aligned left.
if highs.tolist() == [False, True, True] and index in (1, 2):
align = 'left'
else:
align = 'right'
else:
# The FF case
align = 'left'
else:
# if TF and if highs has an even number of Trues
if (centpt[index] > pep[index, outerindex]
and np.count_nonzero(highs) % 2 == 0):
# Usually mean align left, except if it is axis 2
if index == 2:
align = 'right'
else:
align = 'left'
else:
# The TT case
align = 'right'
self.offsetText.set_va('center')
self.offsetText.set_ha(align)
self.offsetText.draw(renderer)
if self.axes._draw_grid and len(ticks):
# Grid lines go from the end of one plane through the plane
# intersection (at xyz0) to the end of the other plane. The first
# point (0) differs along dimension index-2 and the last (2) along
# dimension index-1.
lines = np.stack([xyz0, xyz0, xyz0], axis=1)
lines[:, 0, index - 2] = maxmin[index - 2]
lines[:, 2, index - 1] = maxmin[index - 1]
self.gridlines.set_segments(lines)
self.gridlines.set_color(info['grid']['color'])
self.gridlines.set_linewidth(info['grid']['linewidth'])
self.gridlines.set_linestyle(info['grid']['linestyle'])
self.gridlines.draw(renderer, project=True)
# Draw ticks
tickdir = info['tickdir']
tickdelta = deltas[tickdir]
if highs[tickdir]:
ticksign = 1
else:
ticksign = -1
for tick in ticks:
# Get tick line positions
pos = edgep1.copy()
pos[index] = tick.get_loc()
pos[tickdir] = (
edgep1[tickdir]
+ info['tick']['outward_factor'] * ticksign * tickdelta)
x1, y1, z1 = proj3d.proj_transform(*pos, renderer.M)
pos[tickdir] = (
edgep1[tickdir]
- info['tick']['inward_factor'] * ticksign * tickdelta)
x2, y2, z2 = proj3d.proj_transform(*pos, renderer.M)
# Get position of label
default_offset = 8. # A rough estimate
labeldeltas = (
(tick.get_pad() + default_offset) * deltas_per_point * deltas)
axmask = [True, True, True]
axmask[index] = False
pos[tickdir] = edgep1[tickdir]
pos = move_from_center(pos, centers, labeldeltas, axmask)
lx, ly, lz = proj3d.proj_transform(*pos, renderer.M)
tick_update_position(tick, (x1, x2), (y1, y2), (lx, ly))
tick.tick1line.set_linewidth(
info['tick']['linewidth'][tick._major])
tick.draw(renderer)
renderer.close_group('axis3d')
self.stale = False
# TODO: Get this to work (more) properly when mplot3d supports the
# transforms framework.
def get_tightbbox(self, renderer, *, for_layout_only=False):
# inherited docstring
if not self.get_visible():
return
# We have to directly access the internal data structures
# (and hope they are up to date) because at draw time we
# shift the ticks and their labels around in (x, y) space
# based on the projection, the current view port, and their
# position in 3D space. If we extend the transforms framework
# into 3D we would not need to do this different book keeping
# than we do in the normal axis
major_locs = self.get_majorticklocs()
minor_locs = self.get_minorticklocs()
ticks = [*self.get_minor_ticks(len(minor_locs)),
*self.get_major_ticks(len(major_locs))]
view_low, view_high = self.get_view_interval()
if view_low > view_high:
view_low, view_high = view_high, view_low
interval_t = self.get_transform().transform([view_low, view_high])
ticks_to_draw = []
for tick in ticks:
try:
loc_t = self.get_transform().transform(tick.get_loc())
except AssertionError:
# Transform.transform doesn't allow masked values but
# some scales might make them, so we need this try/except.
pass
else:
if mtransforms._interval_contains_close(interval_t, loc_t):
ticks_to_draw.append(tick)
ticks = ticks_to_draw
bb_1, bb_2 = self._get_tick_bboxes(ticks, renderer)
other = []
if self.line.get_visible():
other.append(self.line.get_window_extent(renderer))
if (self.label.get_visible() and not for_layout_only and
self.label.get_text()):
other.append(self.label.get_window_extent(renderer))
return mtransforms.Bbox.union([*bb_1, *bb_2, *other])
@property
def d_interval(self):
return self.get_data_interval()
@d_interval.setter
def d_interval(self, minmax):
self.set_data_interval(*minmax)
@property
def v_interval(self):
return self.get_view_interval()
@v_interval.setter
def v_interval(self, minmax):
self.set_view_interval(*minmax)
# Use classes to look at different data limits
class XAxis(Axis):
get_view_interval, set_view_interval = maxis._make_getset_interval(
"view", "xy_viewLim", "intervalx")
get_data_interval, set_data_interval = maxis._make_getset_interval(
"data", "xy_dataLim", "intervalx")
class YAxis(Axis):
get_view_interval, set_view_interval = maxis._make_getset_interval(
"view", "xy_viewLim", "intervaly")
get_data_interval, set_data_interval = maxis._make_getset_interval(
"data", "xy_dataLim", "intervaly")
class ZAxis(Axis):
get_view_interval, set_view_interval = maxis._make_getset_interval(
"view", "zz_viewLim", "intervalx")
get_data_interval, set_data_interval = maxis._make_getset_interval(
"data", "zz_dataLim", "intervalx")
| 39.466258
| 80
| 0.548733
|
380972d7f1abcdc57bbcab2b1b680088f2bd1072
| 1,971
|
py
|
Python
|
python_util/parser/xml/page/page_constants.py
|
CITlabRostock/citlab-article-separation-new
|
814364bf81552eefbe0ce60bbb9ec9e8ca63baf4
|
[
"OLDAP-2.7"
] | 8
|
2021-05-03T11:53:25.000Z
|
2022-03-31T13:07:42.000Z
|
python_util/parser/xml/page/page_constants.py
|
CITlabRostock/citlab-article-separation-new
|
814364bf81552eefbe0ce60bbb9ec9e8ca63baf4
|
[
"OLDAP-2.7"
] | null | null | null |
python_util/parser/xml/page/page_constants.py
|
CITlabRostock/citlab-article-separation-new
|
814364bf81552eefbe0ce60bbb9ec9e8ca63baf4
|
[
"OLDAP-2.7"
] | null | null | null |
"""
This file contains all important names related to the PAGE-XML format.
"""
# Creators name
sCREATOR = "CITlab"
# Namespace for PageXml
NS_PAGE_XML = "http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15"
NS_XSI = "http://www.w3.org/2001/XMLSchema-instance"
XSILOCATION = "http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15 " \
"http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15/pagecontent.xsd"
# Schema for Transkribus PageXml
XSL_SCHEMA_FILENAME = "pagecontent_transkribus.xsd"
# XML schema loaded once for all
cachedValidationContext = None
sMETADATA_ELT = "Metadata"
sCREATOR_ELT = "Creator"
sCREATED_ELT = "Created"
sLAST_CHANGE_ELT = "LastChange"
sCOMMENTS_ELT = "Comments"
sTranskribusMetadata_ELT = "TranskribusMetadata"
sPRINT_SPACE = "PrintSpace"
sCUSTOM_ATTR = "custom"
sTEXTLINE = "TextLine"
sBASELINE = "Baseline"
sWORD = "Word"
sCOORDS = "Coords"
sTEXTEQUIV = "TextEquiv"
sUNICODE = "Unicode"
sPOINTS_ATTR = "points"
sREADING_ORDER = "readingOrder"
sTEXTREGION = "TextRegion"
sIMAGEREGION = "ImageRegion"
sLINEDRAWINGREGION = "LineDrawingRegion"
sGRAPHICREGION = "GraphicRegion"
sTABLEREGION = "TableRegion"
sCHARTREGION = "ChartRegion"
sSEPARATORREGION = "SeparatorRegion"
sMATHSREGION = "MathsRegion"
sCHEMREGION = "ChemRegion"
sMUSICREGION = "MusicRegion"
sADVERTREGION = "AdvertRegion"
sNOISEREGION = "NoiseRegion"
sUNKNOWNREGION = "UnknownRegion"
sEXT = ".xml"
# TextRegion Types
class TextRegionTypes:
sPARAGRAPH = "paragraph"
sHEADING = "heading"
sCAPTION = "caption"
sHEADER = "header"
sFOOTER = "footer"
sPAGENUMBER = "page-number"
sDROPCAPITAL = "drop-capital"
sCREDIT = "credit"
sFLOATING = "floating"
sSIGNATUREMARK = "signature-mark"
sCATCHWORD = "catch-word"
sMARGINALIA = "marginalia"
sFOOTNOTE = "footnote"
sFOOTNOTECONT = "footnote-continued"
sENDNOTE = "endnote"
sTOCENTRY = "TOC-entry"
sOTHER = "other"
| 26.28
| 95
| 0.737697
|
77c27250f42f99a280a1a5621c80a14db7401788
| 10,234
|
py
|
Python
|
nvtabular/ops/join_groupby.py
|
L0Z1K/NVTabular
|
b4dfcc63f7621fcb577833d75b21bc4dd7120d3a
|
[
"Apache-2.0"
] | null | null | null |
nvtabular/ops/join_groupby.py
|
L0Z1K/NVTabular
|
b4dfcc63f7621fcb577833d75b21bc4dd7120d3a
|
[
"Apache-2.0"
] | null | null | null |
nvtabular/ops/join_groupby.py
|
L0Z1K/NVTabular
|
b4dfcc63f7621fcb577833d75b21bc4dd7120d3a
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import dask.dataframe as dd
import numpy as np
import pandas as pd
from dask.delayed import Delayed
import nvtabular as nvt
from merlin.schema import Schema
from nvtabular.dispatch import DataFrameType, arange, concat_columns, read_parquet_dispatch
from . import categorify as nvt_cat
from .operator import ColumnSelector, Operator
from .stat_operator import StatOperator
AGG_DTYPES = {
"count": np.int32,
"std": np.float32,
"var": np.float32,
"mean": np.float32,
}
class JoinGroupby(StatOperator):
"""
One of the ways to create new features is to calculate
the basic statistics of the data that is grouped by categorical
features. This operator groups the data by the given categorical
feature(s) and calculates the desired statistics of requested continuous
features (along with the count of rows in each group). The aggregated
statistics are merged with the data (by joining on the desired
categorical columns).
Example usage::
# Use JoinGroupby to define a NVTabular workflow
groupby_features = ['cat1', 'cat2', 'cat3'] >> ops.JoinGroupby(
out_path=str(tmpdir), stats=['sum','count'], cont_cols=['num1']
)
processor = nvtabular.Workflow(groupby_features)
Parameters
-----------
cont_cols : list of str or WorkflowNode
The continuous columns to calculate statistics for
(for each unique group in each column in `columns`).
stats : list of str, default []
List of statistics to calculate for each unique group. Note
that "count" corresponds to the group itself, while all
other statistics correspond to a specific continuous column.
Supported statistics include ["count", "sum", "mean", "std", "var"].
tree_width : dict or int, optional
Tree width of the hash-based groupby reduction for each categorical
column. High-cardinality columns may require a large `tree_width`,
while low-cardinality columns can likely use `tree_width=1`.
If passing a dict, each key and value should correspond to the column
name and width, respectively. The default value is 8 for all columns.
cat_cache: ToDo Describe
TEXT
out_path : str, optional
Root directory where groupby statistics will be written out in
parquet format.
on_host : bool, default True
Whether to convert cudf data to pandas between tasks in the hash-based
groupby reduction. The extra host <-> device data movement can reduce
performance. However, using `on_host=True` typically improves stability
(by avoiding device-level memory pressure).
name_sep : str, default "_"
String separator to use between concatenated column names
for multi-column groups.
"""
def __init__(
self,
cont_cols=None,
stats=("count",),
tree_width=None,
cat_cache="host",
out_path=None,
on_host=True,
name_sep="_",
):
super().__init__()
self.storage_name = {}
self.name_sep = name_sep
self.stats = stats
self.tree_width = tree_width
self.out_path = out_path or "./"
self.on_host = on_host
self.cat_cache = cat_cache
self.categories = {}
self._cont_names = None
if isinstance(cont_cols, nvt.WorkflowNode):
self.cont_cols = cont_cols
elif isinstance(cont_cols, ColumnSelector):
self.cont_cols = self._cont_names = cont_cols
else:
self.cont_cols = self._cont_names = ColumnSelector(cont_cols)
supported_ops = ["count", "sum", "mean", "std", "var", "min", "max"]
for op in self.stats:
if op not in supported_ops:
raise ValueError(op + " operation is not supported.")
@property
def cont_names(self):
if self._cont_names:
return self._cont_names
elif self.cont_cols.output_schema:
return self.cont_cols.output_columns
else:
raise RuntimeError(
"Can't compute continuous columns used by `JoinGroupby` "
"until `Workflow` is fit to dataset or schema."
)
def fit(self, col_selector: ColumnSelector, ddf: dd.DataFrame):
for group in col_selector.subgroups:
if len(group.names) > 1:
name = nvt_cat._make_name(*group.names, sep=self.name_sep)
for col in group.names:
self.storage_name[col] = name
# Check metadata type to reset on_host and cat_cache if the
# underlying ddf is already a pandas-backed collection
if isinstance(ddf._meta, pd.DataFrame):
self.on_host = False
# Cannot use "device" caching if the data is pandas-backed
self.cat_cache = "host" if self.cat_cache == "device" else self.cat_cache
dsk, key = nvt_cat._category_stats(
ddf,
nvt_cat.FitOptions(
col_selector,
self.cont_names,
self.stats,
self.out_path,
0,
self.tree_width,
self.on_host,
concat_groups=False,
name_sep=self.name_sep,
),
)
return Delayed(key, dsk)
def fit_finalize(self, dask_stats):
for col in dask_stats:
self.categories[col] = dask_stats[col]
def transform(self, col_selector: ColumnSelector, df: DataFrameType) -> DataFrameType:
new_df = type(df)()
tmp = "__tmp__" # Temporary column for sorting
df[tmp] = arange(len(df), like_df=df, dtype="int32")
cat_names = []
multi_col_group = {}
for col_name in col_selector.grouped_names:
if isinstance(col_name, (list, tuple)):
name = nvt_cat._make_name(*col_name, sep=self.name_sep)
if name not in cat_names and all(col in df.columns for col in col_name):
cat_names.append(name)
multi_col_group[name] = col_name
elif col_name in df.columns:
cat_names.append(col_name)
_read_pq_func = read_parquet_dispatch(df)
for name in cat_names:
new_part = type(df)()
storage_name = self.storage_name.get(name, name)
name = multi_col_group.get(name, name)
path = self.categories[storage_name]
selection_l = list(name) if isinstance(name, tuple) else [name]
selection_r = list(name) if isinstance(name, tuple) else [storage_name]
stat_df = nvt_cat._read_groupby_stat_df(
path, storage_name, self.cat_cache, _read_pq_func
)
tran_df = df[selection_l + [tmp]].merge(
stat_df, left_on=selection_l, right_on=selection_r, how="left"
)
tran_df = tran_df.sort_values(tmp)
tran_df.drop(columns=selection_l + [tmp], inplace=True)
new_cols = [c for c in tran_df.columns if c not in new_df.columns]
new_part = tran_df[new_cols].reset_index(drop=True)
for col in new_part.columns:
for agg in list(AGG_DTYPES.keys()):
if col.endswith(f"{self.name_sep}{agg}"):
new_dtype = AGG_DTYPES.get(agg, new_part[col].dtype)
new_part[col] = new_part[col].astype(new_dtype)
new_df = concat_columns([new_df, new_part])
df.drop(columns=[tmp], inplace=True)
return new_df
def dependencies(self):
return self.cont_cols
def compute_selector(
self,
input_schema: Schema,
selector: ColumnSelector,
parents_selector: ColumnSelector,
dependencies_selector: ColumnSelector,
) -> ColumnSelector:
self._validate_matching_cols(input_schema, parents_selector, "computing input selector")
return parents_selector
def column_mapping(self, col_selector):
column_mapping = {}
for group in col_selector.grouped_names:
if isinstance(group, (tuple, list)):
name = nvt_cat._make_name(*group, sep=self.name_sep)
group = [*group]
else:
name = group
group = [group]
for cont in self.cont_names.names:
for stat in self.stats:
if stat == "count":
column_mapping[f"{name}_{stat}"] = [*group]
else:
column_mapping[f"{name}_{cont}_{stat}"] = [cont, *group]
return column_mapping
def _compute_dtype(self, col_schema, input_schema):
new_schema = super()._compute_dtype(col_schema, input_schema)
dtype = new_schema.dtype
is_list = new_schema.is_list
for agg in list(AGG_DTYPES.keys()):
if col_schema.name.endswith(f"{self.name_sep}{agg}"):
dtype = AGG_DTYPES.get(agg, dtype)
is_list = False
break
return col_schema.with_dtype(dtype, is_list=is_list, is_ragged=is_list)
def set_storage_path(self, new_path, copy=False):
self.categories = nvt_cat._copy_storage(self.categories, self.out_path, new_path, copy)
self.out_path = new_path
def clear(self):
self.categories = {}
self.storage_name = {}
transform.__doc__ = Operator.transform.__doc__
fit.__doc__ = StatOperator.fit.__doc__
fit_finalize.__doc__ = StatOperator.fit_finalize.__doc__
| 38.04461
| 96
| 0.62351
|
66d0f0472be47ab87c5b4616c9c941ae26278ffd
| 2,483
|
py
|
Python
|
src/odyssey_tests/type_converter_tests/convert_tests.py
|
GodwinneLorayne/odyssey
|
b5576818d70bea011772b944a4dd947777a5ac2f
|
[
"MIT"
] | 1
|
2020-06-01T20:52:37.000Z
|
2020-06-01T20:52:37.000Z
|
src/odyssey_tests/type_converter_tests/convert_tests.py
|
GodwinneLorayne/odyssey
|
b5576818d70bea011772b944a4dd947777a5ac2f
|
[
"MIT"
] | 4
|
2020-06-06T04:50:24.000Z
|
2021-02-03T07:14:49.000Z
|
src/odyssey_tests/type_converter_tests/convert_tests.py
|
python-odyssey/odyssey
|
b5576818d70bea011772b944a4dd947777a5ac2f
|
[
"MIT"
] | 1
|
2020-05-30T21:59:11.000Z
|
2020-05-30T21:59:11.000Z
|
import pytest
from odyssey.type_converter.convert import (
to_string,
to_int,
to_float,
to_complex,
to_bool,
to_tuple,
to_list,
to_dict,
)
def test_to_string_string():
expected = "Hello, World!"
value = expected
result = to_string(value)
assert expected == result
def test_to_string_int():
expected = "42"
value = 42
result = to_string(value)
assert expected == result
def test_to_int():
expected = 42
value = "42"
result = to_int(value)
assert expected == result
def test_to_string_float():
expected = "2.56"
value = 2.56
result = to_string(value)
assert expected == result
def test_to_float():
expected = 2.56
value = "2.56"
result = to_float(value)
assert expected == result
def test_to_string_complex():
expected = "(2+4j)"
value = complex(2, 4)
result = to_string(value)
assert expected == result
def test_to_complex():
expected = complex(2, 4)
value = "(2+4j)"
result = to_complex(value)
assert expected == result
def test_to_string_bool():
expected = "True"
value = True
result = to_string(value)
assert expected == result
def test_to_bool():
expected = True
value = "True"
result = to_bool(value)
assert expected == result
def test_to_string_tuple():
expected = '["Hello, World", 42, 2.56]'
value = ("Hello, World", 42, 2.56)
result = to_string(value)
assert expected == result
def test_to_tuple():
expected = ("Hello, World", 42, 2.56)
value = '["Hello, World", 42, 2.56]'
result = to_tuple(value)
assert expected == result
def test_to_string_list():
expected = '["Hello, World", 42, 2.56]'
value = ["Hello, World", 42, 2.56]
result = to_string(value)
assert expected == result
def test_to_list():
expected = ["Hello, World", 42, 2.56]
value = '["Hello, World", 42, 2.56]'
result = to_list(value)
assert expected == result
def test_to_string_dict():
expected = '{"value1": "Hello, World", "value2": 42, "value3": 2.56}'
value = {"value1": "Hello, World", "value2": 42, "value3": 2.56}
result = to_string(value)
assert expected == result
def test_to_dict():
expected = {"value1": "Hello, World", "value2": 42, "value3": 2.56}
value = '{"value1": "Hello, World", "value2": 42, "value3": 2.56}'
result = to_dict(value)
assert expected == result
| 16.891156
| 73
| 0.611357
|
0c606ed1bfa2a22347abd9bde03bf6ce80684d5a
| 616
|
py
|
Python
|
telethon/tl/functions/test.py
|
yande-eghosa/Telegram-Click-Bot
|
d2905373b93475ea3b4562128f84a66aee0eb7a0
|
[
"MIT"
] | 1
|
2020-11-22T20:30:27.000Z
|
2020-11-22T20:30:27.000Z
|
telethon/tl/functions/test.py
|
yande-eghosa/Telegram-Click-Bot
|
d2905373b93475ea3b4562128f84a66aee0eb7a0
|
[
"MIT"
] | null | null | null |
telethon/tl/functions/test.py
|
yande-eghosa/Telegram-Click-Bot
|
d2905373b93475ea3b4562128f84a66aee0eb7a0
|
[
"MIT"
] | null | null | null |
"""File generated by TLObjects' generator. All changes will be ERASED"""
from ...tl.tlobject import TLObject
from ...tl.tlobject import TLRequest
from typing import Optional, List, Union, TYPE_CHECKING
import os
import struct
from datetime import datetime
class DummyFunctionRequest(TLRequest):
CONSTRUCTOR_ID = 0x56137993
SUBCLASS_OF_ID = 0x53299ae6
def to_dict(self):
return {
'_': 'DummyFunctionRequest'
}
def __bytes__(self):
return b''.join((
b'\x93y\x13V',
))
@classmethod
def from_reader(cls, reader):
return cls()
| 22
| 72
| 0.659091
|
5f4319ff87c30ae549a63f3f6616115e63c17303
| 51
|
py
|
Python
|
src/dynamic_fixtures/fixtures/__init__.py
|
duct-tape/django-dynamic-fixtures
|
da99b4b12b11be28ea4b36b6cf2896ca449c73c1
|
[
"MIT"
] | 11
|
2016-05-18T18:02:25.000Z
|
2019-08-21T09:10:34.000Z
|
src/dynamic_fixtures/fixtures/__init__.py
|
duct-tape/django-dynamic-fixtures
|
da99b4b12b11be28ea4b36b6cf2896ca449c73c1
|
[
"MIT"
] | 16
|
2016-05-18T06:31:56.000Z
|
2020-12-30T16:38:10.000Z
|
src/dynamic_fixtures/fixtures/__init__.py
|
Peter-Slump/django-factory-boy-fixtures
|
630182bd2a2b45833d29fa34134d6b68ff7a4349
|
[
"MIT"
] | 7
|
2017-04-22T18:30:37.000Z
|
2021-12-22T11:14:38.000Z
|
from .basefixture import BaseFixture # noqa: F401
| 25.5
| 50
| 0.784314
|
5bce06418b1da1b4049414a9106671edbc243f41
| 3,189
|
py
|
Python
|
kochira/services/core/account.py
|
nolanlum/kochira
|
0158a6877930f45ff6946770a3fb8a041117fe54
|
[
"MS-PL"
] | null | null | null |
kochira/services/core/account.py
|
nolanlum/kochira
|
0158a6877930f45ff6946770a3fb8a041117fe54
|
[
"MS-PL"
] | 1
|
2019-05-13T22:02:18.000Z
|
2019-05-13T22:02:18.000Z
|
kochira/services/core/account.py
|
nol888/kochira
|
0158a6877930f45ff6946770a3fb8a041117fe54
|
[
"MS-PL"
] | 1
|
2019-05-13T21:22:02.000Z
|
2019-05-13T21:22:02.000Z
|
"""
Account management.
Allows users to manage their account information.
"""
from pydle.asynchronous import Future
from kochira.service import Service, coroutine
from kochira.userdata import UserData
service = Service(__name__, __doc__)
@service.setup
def init_confirmations(ctx):
ctx.storage.confirmations = {}
def wait_for_confirmation(storage, account, network, alt_account, alt_network):
confirmation = Future()
storage.confirmations[account, network,
alt_account, alt_network] = confirmation
return confirmation
@service.command(r"!link (?P<account>\S+) (?P<network>\S+)")
@coroutine
def link(ctx, account, network):
"""
Link account.
Link your account to another network. This account will then become the
primary account, and the other account made an alias of the account
requesting linkage.
"""
try:
user_data = yield ctx.lookup_user_data()
except UserData.DoesNotExist:
ctx.respond(ctx._("Please log in to NickServ before linking an account."))
return
try:
alt_client, *_ = [client for client in ctx.bot.clients.values()
if client.network == network]
except ValueError:
ctx.respond(ctx._("I can't find that network."))
return
alt_user_data = yield UserData.lookup_default(alt_client, account)
if user_data.account == alt_user_data.account and \
user_data.network == alt_user_data.network:
ctx.respond(ctx._("You can't link your account to itself."))
return
if "_alias" in alt_user_data:
ctx.respond(ctx._("You can't link your account to an alias."))
return
ctx.respond(ctx._("Okay, please message me \"!confirmlink {account} {network}\" on that network.").format(
account=ctx.origin,
network=ctx.client.network
))
yield wait_for_confirmation(ctx.storage, ctx.origin, ctx.client.network,
account, network)
data = dict(alt_user_data)
data.update(user_data)
user_data.update(data)
user_data.save()
alt_user_data.clear()
alt_user_data["_alias"] = {
"account": user_data.account,
"network": user_data.network
}
alt_user_data.save()
ctx.respond(ctx._("Your account has been successfully linked with {account} on {network}.").format(
account=account,
network=network
))
@service.command(r"!confirmlink (?P<account>\S+) (?P<network>\S+)")
@coroutine
def confirm_link(ctx, account, network):
"""
Confirm link.
Confirm a requested linkage as an alias of another account. You must
attempt to link an account first before using this.
"""
try:
user_data = yield ctx.lookup_user_data()
except UserData.DoesNotExist:
ctx.respond(ctx._("Please log in to NickServ before confirming linkage."))
return
fut = ctx.storage.confirmations \
.get((account, network, ctx.origin, ctx.client.network))
if fut is None:
ctx.respond(ctx._("That account hasn't requested linkage."))
return
ctx.respond(ctx._("Link confirmed."))
fut.set_result(None)
| 27.973684
| 110
| 0.661336
|
a6a8ef28435e8e720f9d2c6b5643fd1b7a3bf189
| 66
|
py
|
Python
|
pyswmap/__init__.py
|
ejordangottlieb/pyswmap
|
60df74b249eeeda87e23be9cad3c736757e9f72f
|
[
"MIT"
] | 12
|
2015-02-02T01:32:28.000Z
|
2022-02-23T15:52:06.000Z
|
pyswmap/__init__.py
|
ejordangottlieb/pyswmap
|
60df74b249eeeda87e23be9cad3c736757e9f72f
|
[
"MIT"
] | null | null | null |
pyswmap/__init__.py
|
ejordangottlieb/pyswmap
|
60df74b249eeeda87e23be9cad3c736757e9f72f
|
[
"MIT"
] | 2
|
2020-01-14T22:02:08.000Z
|
2021-11-13T21:31:46.000Z
|
from pyswmap.mapalgs import (
MapCalc,
DmrCalc,
)
| 13.2
| 29
| 0.575758
|
2390814f8be09a1c4d8e3697a62215b153da1f86
| 95,137
|
py
|
Python
|
app.py
|
BiRD-project/BiRD_view
|
47122b0f2e977f136d9ef5ff5878f58c53a68be1
|
[
"MIT"
] | 1
|
2020-05-18T01:55:04.000Z
|
2020-05-18T01:55:04.000Z
|
app.py
|
BiRD-project/BiRD_view
|
47122b0f2e977f136d9ef5ff5878f58c53a68be1
|
[
"MIT"
] | 3
|
2021-06-08T21:20:35.000Z
|
2021-07-11T12:55:46.000Z
|
app.py
|
BiRD-project/BiRD_view
|
47122b0f2e977f136d9ef5ff5878f58c53a68be1
|
[
"MIT"
] | null | null | null |
import base64
import json
import dash_gif_component as gif
# import colour as clr
import dash
import dash_core_components as dcc
import dash_html_components as html
# import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State, MATCH, ALL
from dash.exceptions import PreventUpdate
from parse_brdf_json import parse_brdf_json, validate_brdf_json
from jsonschema import validate, FormatChecker
# from helper_functions import *
# import colour as clr
import numpy as np
import plotly.graph_objects as go
from help_text import *
import time
import copy
#App configurations
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
#external_stylesheets = [dbc.themes.SIMPLEX]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets, suppress_callback_exceptions=True)
server = app.server
app.config['suppress_callback_exceptions'] = True
colors = {'background': '#111111', 'text': '#000080'}
json_schema = open('BRDF_JSON_schema/brdf_json_schema_v1.0.json')
dict_from_json_schema = json.loads(json_schema.read())
# Static application page layout
def server_layout():
layout = html.Div([
# Triggers (to avoid work of some callbacks before dynamic content is rendered)
# Logic: trigger is type, following words describe origin of the trigger
dcc.Store(id={'type': 'trigger', 'index': 'menu-tabs'}, storage_type='memory', data='Triggered'),
dcc.Store(id={'type': 'trigger', 'index': 'applet-tab'}, storage_type='memory', data='Triggered'),
dcc.Store(id={'type': 'trigger', 'index': 'app-modes'}, storage_type='memory', data='Triggered'),
dcc.Store(id={'type': 'trigger', 'index': 'mode-brdf-tab'}, storage_type='memory', data='Triggered'),
dcc.Store(id={'type': 'trigger', 'index': 'file-navigator-tabs'}, storage_type='memory', data='Triggered'),
dcc.Store(id={'type': 'trigger', 'index': 'upload-data'}, storage_type='memory', data='Triggered'),
dcc.Store(id={'type': 'trigger', 'index': 'validator-upload-data'}, storage_type='memory', data='Triggered'),
dcc.Store(id={'type': 'trigger', 'index': 'update-menu'}, storage_type='memory', data='Triggered'),
dcc.Store(id={'type': 'trigger', 'index': 'modify-state'}, storage_type='memory', data='Triggered'),
dcc.Store(id={'type': 'trigger', 'index': 'for-3d-plot'}, storage_type='memory', data='Triggered'),
dcc.Store(id={'type': 'trigger', 'index': 'for-projection-plot'}, storage_type='memory', data='Triggered'),
dcc.Store(id={'type': 'trigger', 'index': 'change-snap-states'}, storage_type='memory', data='Triggered'),
dcc.Store(id={'type': 'trigger', 'index': 'for-2D-brdf-plot'}, storage_type='memory', data='Triggered'),
dcc.Store(id={'type': 'trigger', 'index': 'for-2D-arbitrary-plot'}, storage_type='memory', data='Triggered'),
# Memory variables
# remember which app mode was previosly selected under Applet tab
dcc.Store(id={'type': 'memory', 'index': 'app-modes-tab-value'}, storage_type='memory', data='BRDF'),
# store uploaded files' data
html.Div(id={'type': 'memory', 'index': 'browser_data_storage_update'},
children=dcc.Store(id={'type': 'memory', 'index': 'browser_data_storage'}, storage_type='memory', data={}),
style={'display': None}),
# remember which uploaded file was previosly selected
dcc.Store(id={'type': 'memory', 'index': 'selected_file'}, storage_type='memory', data=''),
# remember what were were the chosen values of variables i.e. remember menu state.
dcc.Store(id={'type': 'memory', 'index': 'previous_state'}, storage_type='memory', data={}),
dcc.Store(id={'type': 'memory', 'index': '3d-plot-previous-state'}, storage_type='memory', data={}),
dcc.Store(id={'type': 'memory', 'index': 'projection-plot-previous-state'}, storage_type='memory', data={}),
dcc.Store(id={'type': 'memory', 'index': 'projection-bezel-turned'}, storage_type='memory', data=1),
dcc.Store(id={'type': 'memory', 'index': 'projection-bezel-previous-state'}, storage_type='memory', data=1),
dcc.Store(id={'type': 'memory', 'index': 'snaped-states'}, storage_type='memory', data={}),
dcc.Store(id={'type': 'memory', 'index': '2D-brdf-plot-previous-state'}, storage_type='memory', data={}),
dcc.Store(id={'type': 'memory', 'index': '2D-brdf-plot-clicked'}, storage_type='memory', data=1),
dcc.Store(id={'type': 'memory', 'index': '2D-arbitrary-plot-previous-state'}, storage_type='memory', data=1),
# App header
html.Div(children=[
html.Img(src=app.get_asset_url('BiRDlogo.png'), style={'width': '110px', 'height': '100px', 'margin-left': '100px', 'margin-right': '145px'}),
html.H1(children='BiRD view v4.0',
style={'textAlign': 'center', 'color': colors['text'], 'height': '25px',
'line-height': '50px'}),
html.Img(src=app.get_asset_url('Euramet_logo.jpg'), style={}),
],
style={'display': 'flex', 'flex-direction': 'row', 'flex-wrap': 'nowrap', 'align-items': 'flex-end', 'justify-content': 'space-around'}),
html.Div(children='''A web application for BRDF data visualization.''',
style={'textAlign': 'center', 'color': colors['text'], 'width': '100%', 'height': '25px', 'line-height':'50px'}),
html.Hr(style={'margin-bottom':'2.5px'}),
# App and Help tabs definition and description
dcc.Tabs(id={'type': 'menu-tabs', 'index': 0}, value='Applet', children=[
dcc.Tab(id={'type': 'applet-tab', 'index': 'Applet'}, label='Applet', value='Applet',
style={'line-height': '50px', 'padding': '0', 'height': '50px'},
selected_style={'line-height': '50px', 'padding': '0', 'height': '50px'}
),
dcc.Tab(id={'type': 'applet-tab', 'index': 'Validator'}, label='BRDF json validator', value='Validator',
style={'line-height': '50px', 'padding': '0', 'height': '50px'},
selected_style={'line-height': '50px', 'padding': '0', 'height': '50px'}
),
dcc.Tab(id={'type': 'applet-tab', 'index': 'Help'}, label='Help', value='Help',
style={'line-height': '50px', 'padding': '0', 'height': '50px'},
selected_style={'line-height': '50px', 'padding': '0', 'height': '50px'})],
style={'width': '100%', 'height': '50px', 'line-height': '50px', 'textAlign': 'top', 'margin-bottom': '2.5px', 'margin-top': '2.5px'}),
# Container for App and Help tab's content that is rendered dynamically in the next following dedicated callback function
html.Div(id={'type': 'menu-tabs-content', 'index': 0})
])
return layout
#Callback to render contents of Application and Helb tabs. Contents are displayed in id={'type': 'menu-tabs-content', 'index': 0} Div container.
#Trigger is sent to notify about the end of content's rendering. Function is called when value of Tabs is changed.
@app.callback([Output({'type': 'menu-tabs-content', 'index': 0}, 'children'),
Output({'type': 'trigger', 'index': 'menu-tabs'}, 'data')],
[Input({'type': 'menu-tabs', 'index': 0}, 'value')],
[State({'type': 'memory', 'index': 'app-modes-tab-value'}, 'data')])
def render_menu_tabs_content(tab, app_mode):
# Rendering contents for Applet tab
if tab == 'Applet':
return html.Div(children=[
# Container with animated upload field dash component.
html.Div(id={'type': 'file-loading-container', 'index': 0}, children=[
dcc.Loading(id={'type': 'file-loading', 'index': 0}, children=[
dcc.Upload(id={'type': 'upload-data', 'index': 0},
children=html.Div(id={'type': 'loading-state', 'index': 0},
children=['Drag and Drop or ', html.A('Select Files')]),
style={'width': '100%', 'height': '50px', 'lineHeight': '50px',
'borderWidth': '1px', 'borderStyle': 'dashed', 'borderRadius': '7.5px',
'textAlign': 'center', 'margin-bottom':'2.5px', 'margin-top':'2.5px',
'borderColor': 'Grey'},
multiple=True)
], type='default', style={'width': '100%', 'height': '50', 'lineHeight': '50px',
'borderWidth': '1px', 'borderStyle': 'dashed', 'borderRadius': '7.5px',
'textAlign': 'center','margin-bottom':'2.5px', 'margin-top':'2.5px',
'borderColor': 'Grey'})
], className="row"),
# Container for file navigation menu as well as application mode's tabs.
html.Div(children=[
# File menu container
html.Div(children=[
# File options container
html.Div(id={'type': 'file-menu-container', 'index': 0},
children=[
html.P(children='Uploaded files',
style={'textAlign': 'center', 'font-size': '20px',
'line-height': '50px', 'padding': '0', 'height': '50px',
'borderWidth': '1px', 'borderStyle': 'solid', 'borderRadius': '5px',
'borderColor': 'LightGrey', 'background-color': '#f9f9f9'}),
html.P('Select file:',
style={'margin-bottom': '2.5px', 'margin-top': '2.5px'}),
# File selector dropdown
dcc.Dropdown(id={'type': 'options_sp', 'index': 'file-names-dropdown'},
placeholder="File name", clearable=False,
options=[{'label': 'no fliles', 'value': 'no_files'}],
value='no files',
style={'margin-bottom': '2.5px', 'margin-top': '2.5px'}),
html.Hr(style={'margin-bottom': '5px', 'margin-top': '5px'}),
dcc.Tabs(id={'type': 'file-navigator-tabs', 'index': 0}, value='options', children=[
dcc.Tab(id={'type': 'file-navigator-tab', 'index': 'options'}, label='Options', value='options',
style={'line-height': '35px', 'padding': '0', 'height': '35px'},
selected_style={'line-height': '35px', 'padding': '0', 'height': '35px'}
),
dcc.Tab(id={'type': 'file-navigator-tab', 'index': 'metadata'}, label='Metadata', value='metadata',
style={'line-height': '35px', 'padding': '0', 'height': '35px'},
selected_style={'line-height': '35px', 'padding': '0', 'height': '35px'}
),],
style={'width': '100%', 'height': '35px', 'line-height': '35px',
'textAlign': 'top', 'margin-bottom': '2.5px', 'margin-top': '2.5px'}),
html.P(
'File menu options and metadata dependend on file contents and will appear upon first file upload',
style={'textAlign': 'center', 'borderWidth': '1px', 'borderStyle': 'dashed',
'borderRadius': '7.5px', 'margin-top': '2.5px', 'word-wrap': 'normal',
'borderColor': 'LightGrey'})
],
style={'textAlign': 'center', 'transform': 'rotateX(180deg)','z-index': '1500'}),
],
style={'order': '1', 'flex-grow': '1', 'resize': 'horizontal', 'overflow': 'auto',
'transform': 'rotateX(180deg)', 'width': '5%', 'height': '1350px',
'display': 'flex', 'flex-direction': 'column-reverse', 'flex-wrap': 'nowrap',
'align-items': 'stretch', 'margin-right': '2.5px'}),
# Container describing Application BRDF visualization modes' tabs
html.Div(children=[
# Dash Tabs component with defined tabs.
dcc.Tabs(id={'type': 'applet-modes', 'index': 0}, value=app_mode, children=[
dcc.Tab(id={'type': 'applet-BRDF', 'index': 0}, label='BRDF visualisation', value='BRDF',
style={'line-height': '50px', 'padding': '0', 'height': '50px'},
selected_style={'line-height': '50px', 'padding': '0', 'height': '50px'}),
dcc.Tab(id={'type': 'applet-COLOR', 'index': 0}, label='CIELAB', value='CIELAB',
style={'line-height': '50px', 'padding': '0', 'height': '50px'},
selected_style={'line-height': '50px', 'padding': '0', 'height': '50px'})
],
style={'width': '100%', 'height': '50px', 'line-height': '50px', 'textAlign': 'top'}
),
# Container for tabs' contents
html.Div(id={'type': 'applet-modes-content', 'index': 0})],
style={'order': '2', 'flex-grow': '6', 'overflow': 'auto', 'margin-left': '2.5px'}
)
], style={'display': 'flex', 'flex-direction': 'row', 'flex-wrap': 'nowrap', 'align-items': 'stretch',
'margin-bottom': '2.5px', 'margin-top': '2.5px', 'height': '1500px'}),
]
), 'Triggered'
#Rendering contents for BRDF JSON validator Tab
elif tab == 'Validator':
return html.Div(children=[
html.Div(children=dcc.Markdown('Validation is conducted using following JSON Schema: [brdf_json_schema_v1.0.json](https://jsoneditoronline.org/#right=local.yutupo&left=url.https%3A%2F%2Fraw.githubusercontent.com%2FBiRD-project%2FBiRD_view%2Fmaster%2Fbrdf_json_schema.json)'),
style={'width': '100%', 'height': '30px', 'lineHeight': '30px',
'borderWidth': '1px', 'borderStyle': 'solid', 'borderRadius': '7.5px',
'textAlign': 'center', 'margin-bottom': '5px', 'margin-top': '2.5px',
'borderColor': 'LightGrey'}
),
html.Div(id={'type': 'file-loading-container', 'index': 1}, children=[
dcc.Loading(id={'type': 'file-loading', 'index': 1}, children=[
dcc.Upload(id={'type': 'upload-data', 'index': 1},
children=html.Div(id={'type': 'loading-state', 'index': 1},
children=['Drag and Drop or ', html.A('Select Files')]),
style={'width': '100%', 'height': '50px', 'lineHeight': '50px',
'borderWidth': '1px', 'borderStyle': 'dashed', 'borderRadius': '7.5px',
'textAlign': 'center', 'margin-bottom': '2.5px', 'margin-top': '2.5px',
'borderColor': 'Grey'},
multiple=True)
], type='default', style={'width': '100%', 'height': '50', 'lineHeight': '50px',
'borderWidth': '1px', 'borderStyle': 'dashed', 'borderRadius': '7.5px',
'textAlign': 'center', 'margin-bottom': '2.5px', 'margin-top': '2.5px',
'borderColor': 'Grey'})
], className="row"),
html.Div(id={'type': 'validator-information-container', 'index': 0}, children=[
html.P('List of uploaded files with their validity information will be shown here',
style={'width': '100%', 'height': '50', 'lineHeight': '50px',
'borderWidth': '1px', 'borderStyle': 'solid', 'borderRadius': '7.5px',
'textAlign': 'center', 'margin-bottom': '2.5px', 'margin-top': '2.5px',
'borderColor': 'LightGrey'})
])
]), 'Triggered'
#Rendering contents for Help Tab
elif tab == 'Help':
return html.Div(
children=[html.Div(children=dcc.Markdown(help_text_markdown_part_1,
style={'width': '70%'})),
html.Div([gif.GifPlayer(
gif='assets/upload.gif',
still='assets/upload.png')],
style={'width': '70%'}),
html.Div(children=dcc.Markdown(help_text_markdown_part_2,
style={'width': '70%'})),
html.Div([gif.GifPlayer(
gif='assets/fileselect.gif',
still='assets/fileselect.png')],
style={'width': '70%'}),
html.Div(children=dcc.Markdown(help_text_markdown_part_3,
style={'width': '70%'})),
html.Div([gif.GifPlayer(
gif='assets/navparams.gif',
still='assets/navparams.png')],
style={'width': '70%'}),
html.Div(children=dcc.Markdown(help_text_markdown_part_4,
style={'width': '70%'})),
html.Div([gif.GifPlayer(
gif='assets/metadata.gif',
still='assets/metadata.png')],
style={'width': '70%'}),
html.Div(children=dcc.Markdown(help_text_markdown_part_5,
style={'width': '70%'})),
html.Div([gif.GifPlayer(
gif='assets/3Dplot.gif',
still='assets/3Dplot.png')],
style={'width': '70%'}),
html.Div(children=dcc.Markdown(help_text_markdown_part_6,
style={'width': '70%'})),
html.Div([gif.GifPlayer(
gif='assets/3Dprojection.gif',
still='assets/3Dprojection.png')],
style={'width': '70%'}),
html.Div(children=dcc.Markdown(help_text_markdown_part_7,
style={'width': '70%'})),
html.Div([gif.GifPlayer(
gif='assets/2Dplot.gif',
still='assets/2Dplot.png')],
style={'width': '70%'}),
html.Div(children=dcc.Markdown(help_text_markdown_part_8,
style={'width': '70%'})),
html.Div([gif.GifPlayer(
gif='assets/multiPlot.gif',
still='assets/multiPlot.png')],
style={'width': '70%'}),
html.Div(children=dcc.Markdown(help_text_markdown_part_9,
style={'width': '70%'})),
html.Div([gif.GifPlayer(
gif='assets/multiData.gif',
still='assets/multiData.png')],
style={'width': '70%'}),
html.Div(children=dcc.Markdown(help_text_markdown_part_10,
style={'width': '70%'})),
html.Div([gif.GifPlayer(
gif='assets/interaction.gif',
still='assets/interaction.png')],
style={'width': '70%'}),
]
), 'Triggered'
# Callback needed to check whether exactly "Applet" tab was triggered
@app.callback(Output({'type': 'trigger', 'index': 'applet-tab'}, 'data'),
[Input({'type': 'trigger', 'index': 'menu-tabs'}, 'data')],
[State({'type': 'menu-tabs', 'index': 0}, 'value')])
def applet_tab_selected_trigger(trigger, tab):
if tab != 'Applet':
raise PreventUpdate
else:
return 'Triggered'
@app.callback([Output({'type': 'applet-modes-content', 'index': 0}, 'children'),
Output({'type': 'memory', 'index': 'app-modes-tab-value'}, 'data'),
Output({'type': 'trigger', 'index': 'app-modes'}, 'data')],
[Input({'type': 'applet-modes', 'index': 0}, 'value')])
def render_applet_modes_content(tab):
if tab is None:
raise PreventUpdate
if tab == 'BRDF':
return html.Div(
children=[
html.Div(children=[
html.Div(dcc.Loading(id='3D-plot-L',
children=dcc.Graph(id={'type': 'graph', 'index': '3d-brdf'}, responsive=True),
style={'height': '420px', 'line-height': '420px'}),
style={'width': '50%', 'height': '420px', 'order': '1'}),
html.Div(dcc.Loading(id='Point-spectrum-L',
children=dcc.Graph(id={'type': 'graph', 'index': 'x-any-y-brdf'}, responsive=True),
style={'height': '420px', 'line-height': '1000px'}),
style={'width': '50%', 'height': '420px', 'order': '2'}),
html.Div(dcc.Loading(id='Projection-plot-L',
children=dcc.Graph(id={'type': 'graph', 'index': 'projection'}, responsive=True),
style={'height': '420px', 'line-height': '420px'}),
style={'width': '50%', 'height': '420px', 'order': '3'}),
html.Div(dcc.Loading(id='2D-BRDF-L',
children=dcc.Graph(id={'type': 'graph', 'index': '2d-brdf'}, responsive=True),
style={'height': '420px', 'line-height': '420px'}),
style={'width': '50%', 'height': '420px', 'order': '4'})],
style={'display': 'flex', 'flex-wrap': 'wrap'})]
), tab, 'Triggered'
elif tab == 'CIELAB':
return html.Div(
children=[
html.Div(children=[
html.Div(dcc.Loading(id='CIELAB-3D-plot-L',
children=dcc.Graph(id="CIELAB-3D-plot", responsive=True),
style={'height': '420px', 'line-height': '420px'}),
style={'width': '50%', 'height': '420px', 'order': '1'}),
html.Div(dcc.Loading(id='CIELAB-Point-spectrum-L',
children=dcc.Graph(id="CIELAB-Point-spectrum", responsive=True),
style={'height': '420px', 'line-height': '420px'}),
style={'width': '50%', 'height': '420px', 'order': '2'}),
html.Div(dcc.Loading(id='CIELAB-Projection-plot-L',
children=dcc.Graph(id="CIELAB-Projection-plot", responsive=True),
style={'height': '420px', 'line-height': '420px'}),
style={'width': '50%', 'height': '420px', 'order': '3'}),
html.Div(dcc.Loading(id='CIELAB-2D-BRDF-L',
children=dcc.Graph(id="CIELAB-2D-BRDF", responsive=True),
style={'height': '420px', 'line-height': '420px'}),
style={'width': '50%', 'height': '420px', 'order': '4'})],
style={'display': 'flex', 'flex-wrap': 'wrap'})
]
), tab, 'Triggered'
@app.callback(Output({'type': 'trigger', 'index': 'mode-brdf-tab'}, 'data'),
[Input({'type': 'trigger', 'index': 'app-modes'}, 'data')],
[State({'type': 'applet-modes', 'index': 0}, 'value')])
def mode_BRDF_tab_selected_trigger(trigger, tab):
if tab != 'BRDF':
raise PreventUpdate
else:
return 'Triggered'
@app.callback([Output({'type': 'memory', 'index': 'browser_data_storage'}, 'data'),
Output({'type': 'file-loading', 'index': 0}, 'children'),
Output({'type': 'trigger', 'index': 'upload-data'}, 'data')],
[Input({'type': 'upload-data', 'index': 0}, 'filename'),
Input({'type': 'upload-data', 'index': 0}, 'contents')],
[State({'type': 'file-loading', 'index': 0}, 'children'),
State({'type': 'memory', 'index': 'browser_data_storage'}, 'data')])
def upload_data(filenames, contents, upload_children, data):
if contents is None or filenames is None:
raise PreventUpdate
if data == {}:
for i in range(len(filenames)):
filename = filenames[i]
content = contents[i]
content_type, content_string = content.split(',')
decoded = base64.b64decode(content_string)
processed_data = copy.deepcopy(parse_brdf_json(json.loads(decoded.decode('utf-8'))))
data[filename] = processed_data
# data[filename]["data"]["variables"].append({"observer": 'CIE 1931 2 Degree Standard Observer'})
# data[filename]["data"]["variables"].append({"illuminant": 'D65'})
else:
for i in range(len(filenames)):
filename = filenames[i]
content = contents[i]
content_type, content_string = content.split(',')
decoded = base64.b64decode(content_string)
processed_data = copy.deepcopy(parse_brdf_json(json.loads(decoded.decode('utf-8'))))
if filename in data:
i = 1
new_filename = filename
while new_filename in data:
new_filename = filename + '_copy_' + str(i)
i = i + 1
filename = new_filename
data[filename] = processed_data
# data[filename]["data"]["variables"].append({"observer": 'CIE 1931 2 Degree Standard Observer'})
# data[filename]["data"]["variables"].append({"illuminant": 'D65'})
print('upload')
return data, upload_children, 'Triggered'
@app.callback([Output({'type': 'upload-data', 'index': 0}, 'filename'),
Output({'type': 'upload-data', 'index': 0}, 'contents')],
[Input({'type': 'trigger', 'index': 'upload-data'}, 'data')])
def clear_upload_component(trigger):
print('upload cleared')
return None, None
@app.callback([Output({'type': 'validator-information-container', 'index': 0}, 'children'),
Output({'type': 'file-loading', 'index': 1}, 'children'),
Output({'type': 'trigger', 'index': 'validator-upload-data'}, 'data')],
[Input({'type': 'upload-data', 'index': 1}, 'filename'),
Input({'type': 'upload-data', 'index': 1}, 'contents')],
[State({'type': 'file-loading', 'index': 1}, 'children')])
def validator_upload_data(filenames, contents, upload_children):
if contents is None or filenames is None:
raise PreventUpdate
children = []
for i in range(len(filenames)):
filename = filenames[i]
content = contents[i]
content_type, content_string = content.split(',')
decoded = base64.b64decode(content_string)
file_data = json.loads(decoded.decode('utf-8'))
validity = str(validate_brdf_json(file_data, dict_from_json_schema))
if validity == "File valid!":
children.append(
html.Div(children=[
html.P(filename, style={'width': '75%', 'font-size': '20px', 'textAlign': 'center', 'vertical-align': 'middle', 'margin-bottom': '2.5px', 'margin-top': '2.5px'}),
html.P("File valid!", style={'width': '25%', 'font-size': '24px', 'color':'green', 'textAlign': 'center', 'vertical-align': 'middle', 'margin-bottom': '2.5px', 'margin-top': '2.5px'}),
# html.P("",
# style={'width': '20%', 'font-size': '20px', 'color': 'black', 'textAlign': 'center',
# 'vertical-align': 'middle', 'margin-bottom': '2.5px', 'margin-top': '2.5px'}),
],
style={'display': 'flex', 'flex-direction': 'row', 'flex-wrap': 'nowrap',
'height': '45px', 'lineHeight': '45px', 'align-items': 'center',
'borderWidth': '1px', 'borderStyle': 'solid', 'borderRadius': '7.5px',
'margin-bottom': '2.5px', 'margin-top': '2.5px',
'borderColor': 'Green', 'background-color':'PaleGreen'}
)
)
else:
children.append(
html.Div(children=[
html.P(filename,
style={'width': '75%', 'font-size': '20px', 'textAlign': 'center', 'vertical-align': 'middle', 'margin-bottom': '2.5px', 'margin-top': '2.5px'}),
html.P("File not valid!",
style={'width': '20%', 'font-size': '24px', 'textAlign': 'center', 'color':'red',
'vertical-align': 'middle', 'margin-bottom': '2.5px', 'margin-top': '2.5px'}),
html.Button(u"\u2193", id={'type':'show-validator-error-message', 'index':filename},
style={'width': '5%', 'font-size': '20px', 'textAlign': 'center', 'vertical-align': 'middle', 'margin-bottom': '2.5px', 'margin-top': '2.5px',
# 'box-shadow': '0 8px 16px 0 rgba(0,0,0,0.2), 0 6px 20px 0 rgba(0,0,0,0.19)',
'background-color':'GhostWhite', 'borderWidth': '2px', 'borderColor': 'Grey', 'margin-left': '5px', 'margin-right': '5px', 'text-align': 'center'})
],
style={'display': 'flex', 'flex-direction': 'row', 'flex-wrap': 'nowrap',
'height': '45px', 'lineHeight': '45px', 'align-items': 'center',
'borderWidth': '1px', 'borderStyle': 'solid', 'borderRadius': '7.5px',
'textAlign': 'center', 'margin-bottom': '2.5px', 'margin-top': '2.5px',
'borderColor': 'red', 'background-color':'LightPink',
}
)
)
print(validity)
children.append(
html.Div(
children=[
html.P(str(validity), id={'type':'validator-error-message', 'index': filename},
style={'display': 'none', 'whiteSpace': 'pre-wrap', 'borderWidth': '1px', 'borderStyle': 'solid',
'borderRadius': '7.5px', 'margin-bottom': '2.5px', 'margin-top': '2.5px', 'padding': '5px 5px',
'borderColor': 'Grey'}
),
])
)
print('upload')
return children, upload_children, 'Triggered'
@app.callback([Output({'type': 'upload-data', 'index': 1}, 'filename'),
Output({'type': 'upload-data', 'index': 1}, 'contents')],
[Input({'type': 'trigger', 'index': 'validator-upload-data'}, 'data')])
def clear_validator_upload_component(trigger):
print('validator upload cleared')
return None, None
@app.callback(Output({'type':'validator-error-message', 'index':MATCH}, 'style'),
[Input({'type':'show-validator-error-message', 'index':MATCH}, 'n_clicks')],
[State({'type':'validator-error-message', 'index':MATCH}, 'style')])
def show_validator_error_message(n_clsicks, style):
if n_clsicks is None:
raise PreventUpdate
if n_clsicks % 2 != 0:
style['display'] = 'block'
else:
style['display'] = 'none'
return style
@app.callback([Output({'type': 'memory', 'index': 'selected_file'},'data'),
Output({'type': 'file-menu-container', 'index': 0},'children'),
Output({'type': 'trigger', 'index': 'update-menu'}, 'data')],
[Input({'type': 'trigger', 'index': 'upload-data'}, 'data'),
Input({'type': 'trigger', 'index': 'applet-tab'}, 'data'),
Input({'type': 'options_sp', 'index': 'file-names-dropdown'}, 'value'),
Input({'type': 'file-navigator-tabs', 'index': 0}, 'value')],
[State({'type': 'memory', 'index': 'browser_data_storage'}, 'data'),
State({'type': 'memory', 'index': 'selected_file'}, 'data'),
State({'type': 'memory', 'index': 'snaped-states'}, 'data')])
def update_menu(trigger_upload, trigger_menu_tabs, file_name, file_navigator_state, uploaded_data, previously_selected_file, snaped_states):
if uploaded_data == {}:
raise PreventUpdate
if previously_selected_file == '':
previously_selected_file = list(uploaded_data.keys())[0]
if file_name is None or file_name == 'no files':
file_name = previously_selected_file
file_menu_container_children = []
# Create and update file selector dropdown first
file_selection_options = []
for key in uploaded_data:
file_selection_options.append({'label': key, 'value': key})
file_menu_container_children.append(
html.P(children='Uploaded files',
style={'textAlign': 'center', 'font-size': '20px',
'line-height': '50px', 'padding': '0', 'height': '50px', 'borderWidth': '1px',
'borderStyle': 'solid', 'borderRadius': '5px',
'borderColor': 'LightGrey', 'background-color': '#f9f9f9'})
)
file_menu_container_children.append(
html.P('Select file:', style={'margin-bottom': '2.5px', 'margin-top': '2.5px'})
)
file_menu_container_children.append(
dcc.Dropdown(id={'type': 'options_sp', 'index': 'file-names-dropdown'},
placeholder="File name",
options=file_selection_options,
value=file_name,
clearable=False,
style={'margin-bottom': '2.5px', 'margin-top': '2.5px'})
)
if uploaded_data[file_name]['validity'] is False:
file_menu_container_children.append(
html.P('Some file(s) are not in valid BRDF format. Use validator to check the problem.', style={'margin-bottom': '2.5px', 'margin-top': '2.5px', 'color': 'red'})
)
file_menu_container_children.append(
html.Hr(style={'margin-bottom': '5px', 'margin-top': '5px'})
)
previously_selected_file = file_name
file_menu_container_children.append(
dcc.Tabs(id={'type': 'file-navigator-tabs', 'index': 0}, value=file_navigator_state, children=[
dcc.Tab(id={'type': 'file-navigator-tab', 'index': 'options'}, label='Options', value='options',
style={'line-height': '35px', 'padding': '0', 'height': '35px'},
selected_style={'line-height': '35px', 'padding': '0', 'height': '35px'}
),
dcc.Tab(id={'type': 'file-navigator-tab', 'index': 'metadata'}, label='Metadata', value='metadata',
style={'line-height': '35px', 'padding': '0', 'height': '35px'},
selected_style={'line-height': '35px', 'padding': '0', 'height': '35px'}
), ],
style={'width': '100%', 'height': '35px', 'line-height': '35px',
'textAlign': 'top', 'margin-bottom': '2.5px', 'margin-top': '2.5px'}),
)
if file_navigator_state == 'options':
file_menu_container_children.append(
html.Hr(style={'margin-bottom': '5px', 'margin-top': '5px'})
)
# Create and update options from variables in uploaded data
variables_to_options = {}
for variable in uploaded_data[file_name]['data']['variables']:
if variable['name'] != 'BRDF' and variable['name'] != "uBRDF":
variables_to_options[variable['name']] = {
'options': [{'label': uval, 'value': uval} for uval in variable['uvals']],
'value': variable['sval']
}
file_menu_container_children.append(
html.P(children='Main variables',
style={'textAlign': 'center', 'font-size': '20px',
'line-height': '35px', 'padding': '0', 'height': '35px', 'borderWidth': '1px',
'borderStyle': 'solid', 'borderRadius': '5px',
'borderColor': 'LightGrey', 'background-color': '#f9f9f9',
'margin-bottom': '2.5px', 'margin-top': '2.5px'})
)
file_menu_container_children.append(
html.P('Select Theta_i:', style={'margin-bottom': '2.5px', 'margin-top': '2.5px'}),
)
file_menu_container_children.append(
dcc.Dropdown(id={'type': 'options', 'index': "theta_i"},
placeholder="theta_i",
options=variables_to_options['theta_i']['options'],
value=variables_to_options['theta_i']['value'],
clearable=False,
style={'margin-bottom': '2.5px', 'margin-top': '2.5px'})
)
file_menu_container_children.append(
html.P('Select Phi_i:', style={'margin-bottom': '2.5px', 'margin-top': '2.5px'}),
)
file_menu_container_children.append(
dcc.Dropdown(id={'type': 'options', 'index': "phi_i"},
placeholder="phi_i",
options=variables_to_options['phi_i']['options'],
value=variables_to_options['phi_i']['value'],
clearable=False,
style={'margin-bottom': '2.5px', 'margin-top': '2.5px'})
)
file_menu_container_children.append(
html.P('Select Theta_r:', style={'margin-bottom': '2.5px', 'margin-top': '2.5px'}),
)
file_menu_container_children.append(
dcc.Dropdown(id={'type': 'options', 'index': "theta_r"},
placeholder="theta_r",
options=variables_to_options['theta_r']['options'],
value=variables_to_options['theta_r']['value'],
clearable=False,
style={'margin-bottom': '2.5px', 'margin-top': '2.5px'})
)
file_menu_container_children.append(
html.P('Select Phi_r:', style={'margin-bottom': '2.5px', 'margin-top': '2.5px'}),
)
file_menu_container_children.append(
dcc.Dropdown(id={'type': 'options', 'index': "phi_r"},
placeholder="phi_r",
options=variables_to_options['phi_r']['options'],
value=variables_to_options['phi_r']['value'],
clearable=False,
style={'margin-bottom': '2.5px', 'margin-top': '2.5px'})
)
file_menu_container_children.append(
html.Hr(style={'margin-bottom': '5px', 'margin-top': '5px'})
)
file_menu_container_children.append(
html.P(children='Additional variables',
style={'textAlign': 'center', 'font-size': '20px',
'line-height': '35px', 'padding': '0', 'height': '35px', 'borderWidth': '1px',
'borderStyle': 'solid', 'borderRadius': '5px', 'width': '99.25%',
'borderColor': 'LightGrey', 'background-color': '#f9f9f9',
'margin-bottom': '2.5px', 'margin-top': '2.5px'})
)
if len(variables_to_options.keys()) > 4:
for variable_key in variables_to_options:
if variable_key != 'theta_i' and variable_key != 'phi_i' and variable_key != 'theta_r' and variable_key != 'phi_r':
file_menu_container_children.append(
html.P('Select ' + variable_key + ':', style={'margin-bottom': '2.5px', 'margin-top': '2.5px'}),
)
file_menu_container_children.append(
dcc.Dropdown(id={'type': 'options', 'index': variable_key},
placeholder=variable_key,
options=variables_to_options[variable_key]['options'],
value=variables_to_options[variable_key]['value'],
clearable=False,
style={'margin-bottom': '2.5px', 'margin-top': '2.5px'})
)
else:
file_menu_container_children.append(
html.P('No additional variables',
style={'margin-bottom': '2.5px', 'margin-top': '2.5px', 'line-height': '35px', 'padding': '0',
'height': '35px', 'borderWidth': '1px', 'borderStyle': 'dashed', 'borderRadius': '5px',
'borderColor': 'LightGrey'}),
)
file_menu_container_children.append(
html.Hr(style={'margin-bottom': '5px', 'margin-top': '5px'})
)
file_menu_container_children.append(
html.P(children='Select variable as \"X\"',
style={'textAlign': 'center', 'font-size': '20px',
'line-height': '35px', 'padding': '0', 'height': '35px', 'borderWidth': '1px',
'borderStyle': 'solid', 'borderRadius': '5px', 'width': '99.25%',
'borderColor': 'LightGrey', 'background-color': '#f9f9f9',
'margin-bottom': '5px', 'margin-top': '2.5px'})
)
variable_as_x_options = [{'label': key, 'value': key} for key in variables_to_options]
variable_as_x_value = ''
if 'variable_as_x' in uploaded_data[file_name]:
variable_as_x_value = uploaded_data[file_name]['variable_as_x']
else:
variable_as_x_value = variable_as_x_options[len(variable_as_x_options) - 1]['value']
file_menu_container_children.append(
dcc.Dropdown(id={'type': 'options', 'index': 'variable_as_x'},
placeholder='variable_as_x',
options=variable_as_x_options,
value=variable_as_x_value,
clearable=False,
style={'margin-bottom': '2.5px', 'margin-top': '2.5px'})
)
file_menu_container_children.append(
html.Hr(style={'margin-bottom': '5px', 'margin-top': '5px'})
)
file_menu_container_children.append(
html.Button('Snap state',
id={'type': 'button', 'index': 'snap_state'},
style={'margin-bottom': '2.5px', 'margin-top': '2.5px', 'width': '100%'})
)
snaped_states_children = []
for file_name in snaped_states:
snaped_states_children.append(
html.P(file_name+':',
style={'text-align': 'left'})
)
for selected_X in snaped_states[file_name]:
snaped_states_children.append(
html.P(selected_X + ':',
style={'text-align': 'left','margin-left':'20px'})
)
for state in snaped_states[file_name][selected_X]:
snaped_states_children.append(html.Div(children=[
html.P(str(state),
style={'text-align': 'left', 'width': '70%'}),
html.Button('X',
id={'type': 'snap-remove-button',
'index': str(file_name) + '::' + str(selected_X) + '::' + str(state)},
style={'margin-bottom': '2.5px', 'margin-top': '2.5px','width': '30%'})],
style={'margin-left': '40px','display': 'flex'}
))
file_menu_container_children.append(
html.Div(children = snaped_states_children,
id={'type': 'container', 'index': 'snap_states'},
style={'margin-bottom': '2.5px', 'margin-top': '2.5px',
'overflow': 'auto', 'height': '500px',
'borderWidth': '1px', 'borderStyle': 'dashed',
'borderRadius': '5px', 'borderColor': 'LightGrey'
})
)
elif file_navigator_state == 'metadata':
metadata = uploaded_data[file_name]['metadata']
metadata_children = []
for key in metadata:
if isinstance(metadata[key], dict):
metadata_children.append(html.P(key + ': ' + '\n'))
for subkey in metadata[key]:
metadata_children.append(html.P(subkey + ': ' + str(metadata[key][subkey]) + '\n', style={'margin-left':'20px'}))
else:
metadata_children.append(html.P(key + ': ' + str(metadata[key]) + '\n'))
file_menu_container_children.append(
html.P(metadata_children,
style={'margin-bottom': '2.5px', 'margin-top': '2.5px',
'whiteSpace': 'pre-wrap', 'overflow': 'auto',
'text-align': 'left', 'height': '1150px',
# 'line-height': '35px', 'padding': '0', 'height': '35px',
'borderWidth': '1px', 'borderStyle': 'dashed',
'borderRadius': '5px', 'borderColor': 'LightGrey'}),
)
return previously_selected_file, file_menu_container_children, 'Triggered'
@app.callback([Output({'type': 'memory', 'index': 'snaped-states'}, 'data'),
Output({'type': 'container', 'index': 'snap_states'}, 'children'),
Output({'type': 'trigger', 'index': 'change-snap-states'}, 'data')],
[Input({'type': 'button', 'index': 'snap_state'}, 'n_clicks'),
Input({'type': 'snap-remove-button', 'index': ALL}, 'n_clicks')],
[State({'type': 'memory', 'index': 'selected_file'}, 'data'),
State({'type': 'memory', 'index': 'snaped-states'}, 'data'),
State({'type': 'options', 'index': ALL}, 'value'),
State({'type': 'options', 'index': ALL}, 'id')])
def take_state_snap(n_clicks, remove_clicks, file_name, snaped_states, options, ids):
print(remove_clicks)
if file_name == '':
raise PreventUpdate
# print(dash.callback_context.triggered)
if 1 in remove_clicks:
remove_button = json.loads(dash.callback_context.triggered[0]['prop_id'].split('.n_clicks')[0])
remove_button = remove_button['index'].split('::')
remove_button_file_name = remove_button[0]
remove_button_selected_x = remove_button[1]
remove_button_selected_state = json.loads(remove_button[2].replace("'", "\""))
snaped_states[remove_button_file_name][remove_button_selected_x].remove(remove_button_selected_state)
if snaped_states[remove_button_file_name][remove_button_selected_x] == []:
del snaped_states[remove_button_file_name][remove_button_selected_x]
if snaped_states[remove_button_file_name] == {}:
del snaped_states[remove_button_file_name]
else:
if n_clicks != None:
id_keys = []
for id in ids:
id_keys.append(id['index'])
options_under_key = {}
for i in range(len(options)):
options_under_key[id_keys[i]] = options[i]
i = len(options) - 1
variable_as_x = options[i]
if file_name not in snaped_states:
variable_as_x_states = []
variable_as_x_states.append(options_under_key)
snaped_states[file_name] = {variable_as_x: variable_as_x_states}
else:
if variable_as_x not in snaped_states[file_name]:
variable_as_x_states = []
variable_as_x_states.append(options_under_key)
snaped_states[file_name][variable_as_x] = variable_as_x_states
else:
if options_under_key not in snaped_states[file_name][variable_as_x]:
snaped_states[file_name][variable_as_x].append(options_under_key)
snaped_states_children = []
for file_name in snaped_states:
snaped_states_children.append(
html.P(file_name + ':',
style={'text-align': 'left'})
)
for selected_X in snaped_states[file_name]:
snaped_states_children.append(
html.P(selected_X + ':',
style={'text-align': 'left', 'margin-left': '20px'})
)
for state in snaped_states[file_name][selected_X]:
snaped_states_children.append(html.Div(children=[
html.P(str(state),
style={'text-align': 'left', 'width': '70%'}),
html.Button('X',
id={'type': 'snap-remove-button',
'index': str(file_name) + '::' + str(selected_X) + '::' + str(state)},
style={'margin-bottom': '2.5px', 'margin-top': '2.5px', 'width': '30%'})],
style={'margin-left': '40px', 'display': 'flex'}
))
return snaped_states, snaped_states_children, 'Triggered'
@app.callback([Output({'type': 'memory', 'index': 'previous_state'}, 'data'),
Output({'type': 'memory', 'index': 'browser_data_storage_update'}, 'children'),
Output({'type': 'trigger', 'index': 'modify-state'}, 'data')],
[Input({'type': 'trigger', 'index': 'update-menu'}, 'data'),
Input({'type': 'options', 'index': ALL}, 'value')],
[State({'type': 'memory', 'index': 'browser_data_storage'}, 'data'),
State({'type': 'memory', 'index': 'previous_state'}, 'data'),
State({'type': 'memory', 'index': 'selected_file'}, 'data')])
def modify_state(trigger, options_values, uploaded_data, previous_state, selected_file):
if options_values == [] or uploaded_data == {}:
raise PreventUpdate
new_state = {'name': selected_file}
inputs = dash.callback_context.inputs_list
for input in inputs[1]:
key = input['id']['index']
new_state[key] = input['value']
# print(new_state)
if new_state == previous_state:
raise PreventUpdate
# print(previous_state)
previous_state = new_state
# print(previous_state)
inputs = dash.callback_context.inputs_list
for input in inputs[1]:
key = input['id']['index']
for variable in uploaded_data[selected_file]['data']['variables']:
if key == variable['name']:
variable['sval'] = input['value']
if key == 'variable_as_x':
uploaded_data[selected_file][key] = input['value']
return previous_state, dcc.Store(id={'type': 'memory', 'index': 'browser_data_storage'}, storage_type='memory', data=uploaded_data), 'Triggered'
@app.callback([Output({'type': 'memory', 'index': '3d-plot-previous-state'}, 'data'),
Output({'type': 'trigger', 'index': 'for-3d-plot'}, 'data')],
[Input({'type': 'trigger', 'index': 'modify-state'}, 'data')],
[State({'type': 'memory', 'index': '3d-plot-previous-state'}, 'data'),
State({'type': 'memory', 'index': 'previous_state'}, 'data'),
State({'type': 'memory', 'index': 'selected_file'}, 'data')])
def trigger_3D_plot(trigger, plot_previous_state, previous_state, filename):
if filename == '':
raise PreventUpdate
plot_new_state = {}
for key in previous_state:
if key != 'theta_r' and key != 'phi_r' and key != 'variable_as_x':
plot_new_state[key] = previous_state[key]
if plot_new_state == plot_previous_state:
raise PreventUpdate
else:
plot_previous_state = plot_new_state
return plot_previous_state, 'Triggered'
@app.callback(Output({'type': 'graph', 'index': '3d-brdf'}, 'figure'),
[Input({'type': 'trigger', 'index': 'for-3d-plot'}, 'data'),
Input({'type': 'trigger', 'index': 'mode-brdf-tab'}, 'data')],
[State({'type': 'memory', 'index': 'browser_data_storage'}, 'data'),
State({'type': 'memory', 'index': 'selected_file'}, 'data')])
def update_3D_plot(trigger1, trigger2, uploaded_data, filename):
if uploaded_data == {} or filename == '':
raise PreventUpdate
# print(time.process_time())
degree_sign = u"\N{DEGREE SIGN}"
variables = uploaded_data[filename]['data']['variables']
data = uploaded_data[filename]['data']['values']
mask = np.array([])
brdf = np.array([])
ubrdf = np.array([])
theta_r = np.array([])
phi_r = np.array([])
for variable in variables:
name = variable['name']
type = variable['type']
data[name] = np.array(data[name])
if type == "number":
data[name] = data[name].astype(np.float64)
elif type == "string":
data[name] = data[name].astype(str)
if name != 'BRDF' and name != 'uBRDF' and name != 'theta_r' and name != 'phi_r':
if mask.size == 0:
mask = data[name] == variable['sval']
else:
mask = np.logical_and(mask, data[name] == variable['sval'])
else:
if name == 'BRDF':
brdf = data[name]
if name == 'uBRDF':
ubrdf = data[name]
if name == 'theta_r':
theta_r = data[name]
if name == 'phi_r':
phi_r = data[name]
brdf = brdf[mask]
ubrdf = ubrdf[mask]
theta_r = theta_r[mask]
phi_r = phi_r[mask]
theta = theta_r
phi = phi_r
brdf = brdf
figure = []
#tristimulus_values, RGB_values = get_tristimulus_XYZs(thI, phiI, pol, data, observer, illuminant)
#RGB_values = np.array(RGB_values).reshape((theta.shape[1]*phi.shape[1]),3)
X = theta.T*np.cos(np.radians(phi))
Y = theta.T*np.sin(np.radians(phi))
Z = brdf
# print(time.process_time())
# X_sc = np.transpose(X).reshape((theta.shape[1]*phi.shape[1]))
# Y_sc = np.transpose(Y).reshape((theta.shape[1]*phi.shape[1]))
# Z_sc = np.transpose(Z).reshape((theta.shape[1]*phi.shape[1]))
X_sc = X
Y_sc = Y
Z_sc = Z
label_data = []
for i in range(phi.shape[0]):
label_data.append(
'BRDF value: {:.4f}, Theta: {:.2f}, Phi: {:.2f}'.format(Z[i], theta[i], phi[i]))
label_data = np.array(label_data)
# label_data = label_data.reshape((theta.shape[1] * phi.shape[1]))
label_data = label_data[Z_sc >= 0]
# X_sc[Z_sc < 0] = None
# Y_sc[Z_sc < 0] = None
# Z_sc[Z_sc < 0] = None
zmin = np.min(Z_sc[Z_sc>=0])
zmax = np.max(Z_sc[Z_sc>=0])
theta_span = 20+np.max(np.abs(theta[0]))
offset = 0.1*zmin
if zmax-zmin != 0:
if zmin-(zmax-zmin)/5 < 0:
offset = zmin
else:
offset = (zmax - zmin) / 5
# print(time.process_time())
x_scale = []
y_scale = []
x_scale.append(0)
y_scale.append(0)
x_scale.append(theta_span * np.cos(np.radians(0)))
y_scale.append(theta_span * np.sin(np.radians(0)))
x_scale.append(-theta_span * np.cos(np.radians(0)))
y_scale.append(-theta_span * np.sin(np.radians(0)))
x_scale.append(0)
y_scale.append(0)
x_scale.append(theta_span * np.cos(np.radians(90)))
y_scale.append(theta_span * np.sin(np.radians(90)))
x_scale.append(-theta_span * np.cos(np.radians(90)))
y_scale.append(-theta_span * np.sin(np.radians(90)))
for i in range(phi.shape[0]):
x_scale.append(0)
y_scale.append(0)
x_scale.append(theta_span*np.cos(np.radians(phi[i])))
y_scale.append(theta_span*np.sin(np.radians(phi[i])))
x_scale.append(-theta_span*np.cos(np.radians(phi[i])))
y_scale.append(-theta_span*np.sin(np.radians(phi[i])))
x_scale.append(0)
y_scale.append(0)
circle_points = range(0, 370, 10)
last_i = 0
for i in range(0,100,10):
if i <= np.max(np.abs(theta[0])):
for point in circle_points:
x_scale.append(i*np.cos(np.radians(point)))
y_scale.append(i*np.sin(np.radians(point)))
last_i = i
else:
if last_i != -1:
for point in circle_points:
x_scale.append((last_i+10)*np.cos(np.radians(point)))
y_scale.append((last_i+10)*np.sin(np.radians(point)))
last_i = -1
z_scale = np.full((1, len(x_scale)), zmin-offset).tolist()[0]
figure.append(go.Scatter3d(x=x_scale,
y=y_scale,
z=z_scale,
opacity=0.2,
mode='lines',
line=dict(color='black', width=3),
#marker=dict(color='black', size=2),
hoverinfo='skip'))
x_scale_vals = []
y_scale_vals = []
text = []
text_positions =[]
x_scale_vals.append(theta_span * np.cos(np.radians(0)))
y_scale_vals.append(theta_span * np.sin(np.radians(0)))
x_scale_vals.append(-theta_span * np.cos(np.radians(0)))
y_scale_vals.append(-theta_span * np.sin(np.radians(0)))
text.append('{:.0f}'.format(0)+degree_sign)
text_positions.append('bottom center')
text.append('{:.0f}'.format(180)+degree_sign)
text_positions.append('top center')
x_scale_vals.append(theta_span * np.cos(np.radians(90)))
y_scale_vals.append(theta_span * np.sin(np.radians(90)))
x_scale_vals.append(-theta_span * np.cos(np.radians(90)))
y_scale_vals.append(-theta_span * np.sin(np.radians(90)))
text.append('{:.0f}'.format(90)+degree_sign)
text_positions.append('middle right')
text.append('{:.0f}'.format(270)+degree_sign)
text_positions.append('middle left')
for i in range(phi.shape[0]):
x_scale_vals.append(theta_span*np.cos(np.radians(phi[i])))
y_scale_vals.append(theta_span*np.sin(np.radians(phi[i])))
x_scale_vals.append(-theta_span*np.cos(np.radians(phi[i])))
y_scale_vals.append(-theta_span*np.sin(np.radians(phi[i])))
if phi[i] <= 180:
text.append('{:.0f}'.format(phi[i])+degree_sign)
if phi[i] >= 0 and phi[i] < 90:
text_positions.append('bottom center')
elif phi[i] > 90 and phi[i] < 180:
text_positions.append('top center')
elif phi[i] == 90:
text_positions.append('middle right')
elif phi[i] == 180:
text_positions.append('top center')
else:
text_positions.append('bottom center')
if phi[i] != 0 and phi[i] < 180:
text.append('{:.0f}'.format(phi[i]+180)+degree_sign)
if phi[i] > 0 and phi[i] < 90:
text_positions.append('top center')
elif phi[i] > 90 and phi[i] < 180:
text_positions.append('bottom center')
elif phi[i] == 90:
text_positions.append('middle left')
else:
text_positions.append('bottom center')
else:
text.append('')
text_positions.append('bottom center')
for i in range(np.abs(len(x_scale_vals)-len(text))):
text.append('')
text_positions.append('bottom center')
last_i = 0
for i in range(0,100,20):
if i <= np.max(np.abs(theta[0])):
x_scale_vals.append(0)
y_scale_vals.append(i)
text.append('{:.0f}'.format(i)+degree_sign)
text_positions.append('bottom center')
last_i = i
else:
if last_i != -1:
x_scale_vals.append(0)
y_scale_vals.append(last_i+10)
text.append('{:.0f}'.format(i)+degree_sign)
text_positions.append('bottom center')
last_i = -1
z_scale_vals = np.full((1, len(x_scale_vals)), zmin-offset).tolist()[0]
figure.append(go.Scatter3d(x=x_scale_vals,
y=y_scale_vals,
z=z_scale_vals,
opacity=1.0,
mode='markers+text',
#line=dict(color='black', width=2),
marker=dict(color='black', size=3),
text=text,
textposition=text_positions,
textfont_size=14,
hoverinfo='skip'))
hight_points = range(6)
# figure.append(go.Scatter3d(
# # x=np.full((1,len(hight_points)+1),-theta_span).tolist()[0],
# # y=np.full((1,len(hight_points)+1),0).tolist()[0],
# # z=[zmin-offset]+[zmin + i * (zmax - zmin) / 5 for i in hight_points],
# # text=['{:.4f}'.format(zmin-offset)]+['{:.4f}'.format(zmin + i * (zmax - zmin) / 5) for i in hight_points],
# x=np.full((1,len(hight_points)),-theta_span).tolist()[0],
# y=np.full((1,len(hight_points)),0).tolist()[0],
# z=[zmin + i * (zmax - zmin) / 5 for i in hight_points],
# text=['{:.4f}'.format(zmin + i * (zmax - zmin) / 5) for i in hight_points],
# textposition="middle right",
# textfont_size=10,
# mode='lines+markers+text',
# line=dict(color='black', width=2),
# marker=dict(color='black', size=2),
# hoverinfo='skip'))
colorscale_range = range(11)
Z_sc[Z_sc < 0] = None
figure.append(go.Mesh3d(x=X_sc, y=Y_sc, z=Z_sc,
color='grey',
opacity=1.0,
intensity=Z_sc,
colorscale='Portland',
hoverinfo='skip',
hovertext=label_data,
cmin=zmin,
cmax=zmax,
colorbar = dict(tickvals=[zmin + i * (zmax - zmin) / 10 for i in colorscale_range],
ticktext=['{:.4f}'.format(zmin + i * (zmax - zmin) / 10) for i in colorscale_range],
len=1)
)
)
figure.append(go.Scatter3d(x=X_sc, y=Y_sc, z=Z_sc,
opacity=1.0,
mode='markers',
hoverinfo='text',
hovertext=label_data,
marker=dict(color='black',#Z_sc,
size=2,
cmin=zmin,
cmax=zmax,
colorscale='Portland')
))
# print(time.process_time())
# figure.update_layout
# camera = dict(
# eye=dict(x=0.000001, y=0., z=2.5)
# )
layout = go.Layout(title="3D BRDF plot for chosen parameters",
title_yanchor='top',
# font=dict(size=18),
scene=dict(
xaxis_title="Theta (deg)",
yaxis_title="Theta (deg)",
zaxis_title="BRDF"),
# scene_camera=camera,
scene_xaxis_visible=False,
scene_yaxis_visible=False,
scene_zaxis_visible=False,
scene_aspectmode='manual',
scene_aspectratio=dict(x=2, y=2, z=1),
showlegend=False,
scene_camera_projection_type = "orthographic",
height=1000,
width=1000
)
# print(time.process_time())
# print({'data': figure, 'layout': layout})
return {'data': figure, 'layout': layout}
@app.callback([Output({'type': 'memory', 'index': 'projection-plot-previous-state'}, 'data'),
Output({'type': 'trigger', 'index': 'for-projection-plot'}, 'data')],
[Input({'type': 'trigger', 'index': 'modify-state'}, 'data')],
[State({'type': 'memory', 'index': 'projection-plot-previous-state'}, 'data'),
State({'type': 'memory', 'index': 'previous_state'}, 'data'),
State({'type': 'memory', 'index': 'selected_file'}, 'data'),
State({'type': 'memory', 'index': 'projection-bezel-turned'}, 'data'),
State({'type': 'memory', 'index': 'projection-bezel-previous-state'}, 'data')])
def trigger_projection_plot(trigger, plot_previous_state, previous_state, filename, bezel, previous_bezel):
if filename == '':
raise PreventUpdate
plot_new_state = {}
for key in previous_state:
if key != 'theta_r' and key != 'variable_as_x':
plot_new_state[key] = previous_state[key]
if plot_new_state == plot_previous_state:
raise PreventUpdate
else:
plot_previous_state = plot_new_state
# print('here')
new_bezel = bezel
if new_bezel > previous_bezel:
raise PreventUpdate
# print('now here')
return plot_previous_state, 'Triggered'
@app.callback(Output({'type': 'graph', 'index': 'projection'}, 'figure'),
[Input({'type': 'trigger', 'index': 'for-projection-plot'}, 'data'),
Input({'type': 'trigger', 'index': 'mode-brdf-tab'}, 'data')],
[State({'type': 'memory', 'index': 'browser_data_storage'}, 'data'),
State({'type': 'memory', 'index': 'selected_file'}, 'data')])
def update_projection_plot(trigger1, trigger2, uploaded_data, filename):
if trigger1 is None or uploaded_data == {} or filename == '':
raise PreventUpdate
variables = uploaded_data[filename]['data']['variables']
data = uploaded_data[filename]['data']['values']
mask = np.array([])
brdf = np.array([])
ubrdf = np.array([])
theta_r = np.array([])
phi_r = np.array([])
s_phi_r = 0
for variable in variables:
name = variable['name']
type = variable['type']
data[name] = np.array(data[name])
if type == "number":
data[name] = data[name].astype(np.float64)
elif type == "string":
data[name] = data[name].astype(str)
if name != 'BRDF' and name != 'uBRDF' and name != 'theta_r' and name != 'phi_r':
if mask.size == 0:
mask = data[name] == variable['sval']
else:
mask = np.logical_and(mask, data[name] == variable['sval'])
else:
if name == 'BRDF':
brdf = data[name]
if name == 'uBRDF':
ubrdf = data[name]
if name == 'theta_r':
theta_r = data[name]
if name == 'phi_r':
phi_r = data[name]
s_phi_r = variable['sval']
brdf = brdf[mask]
ubrdf = ubrdf[mask]
theta_r = theta_r[mask]
phi_r = phi_r[mask]
thetas = theta_r
# arranged_thetas = np.flip(np.append(np.flip(thetas[thetas<0]),thetas[thetas>=0]))
# print(arranged_thetas)
phis = phi_r
s_phi_v = s_phi_r
z = brdf
figure = []
r_bp = []
theta_bp = []
z_bp = []
previous_radius = 0
for i in range(thetas.shape[0]):
radius = np.abs(thetas[i]) #2*np.sin(np.radians(theta)/2)
theta = thetas[i]
phi = phis[i]
if theta == 0:
r_bp.append(0)
theta_bp.append(0)
z_bp.append(z[i])
if theta > 0:
r_bp.append(radius)
theta_bp.append(phi)
z_bp.append(z[i])
elif theta < 0:
r_bp.append(radius)
theta_bp.append(180+phi)
z_bp.append(z[i])
previous_radius = radius
r_bp = np.array(r_bp)
theta_bp = np.array(theta_bp)
z_bp = np.array(z_bp)
# r_bp = r_bp[z_bp>=0]
# theta_bp = theta_bp[z_bp>=0]
# z_bp = z_bp[z_bp>=0]
# r_bp[z_bp < 0] = None
# theta_bp[z_bp < 0] = None
zmax = np.max(z_bp[z_bp >= 0])
zmin = np.min(z_bp[z_bp >= 0])
z_bp[z_bp < 0] = None
sorted_i = np.argsort(-r_bp)
r_bp = r_bp[sorted_i]
theta_bp = theta_bp[sorted_i]
z_bp = z_bp[sorted_i]
colorscale_range = range(11)
figure.append(go.Barpolar(
name = '',
r = r_bp,
theta = theta_bp,
opacity = 1,
base = 0,
hovertemplate='\u03B8: %{r}' + '<br>\u03C6: %{theta}<br>',
# marker_color='rgb('+str(RGB_color[0])+','+str(RGB_color[1])+','+str(RGB_color[2])+')',
marker_cmin = zmin,
marker_cmax = zmax,
marker_colorscale = 'Portland',
marker_color = z_bp,
marker_colorbar = dict(tickvals=[zmin + i * (zmax - zmin) / 10 for i in colorscale_range],
ticktext=['{:.4f}'.format(zmin + i * (zmax - zmin) / 10) for i in colorscale_range]),
showlegend = False))
layout = go.Layout(
title="BRDF polar heatmap",
polar_bargap=0.005,
polar_angularaxis_rotation = -s_phi_v,
polar_radialaxis_angle = 0,
polar_radialaxis_ticks="outside",
)
# print(time.process_time())
# if theta < 0:
# figure.add_trace(pgo.Barpolar(
# r=np.flip(np.array([2*np.cos(np.radians(theta)) for angles in thetas])),
# theta=180+phis))
return {'data': figure, 'layout': layout}
@app.callback([Output({'type': 'memory', 'index': 'projection-bezel-turned'}, 'data'),
Output({'type': 'options', 'index': 'phi_r'}, 'value')],
[Input({'type': 'graph', 'index': 'projection'}, 'relayoutData')],
[State({'type': 'memory', 'index': 'browser_data_storage'}, 'data'),
State({'type': 'memory', 'index': 'selected_file'}, 'data'),
State({'type': 'memory', 'index': 'projection-bezel-turned'}, 'data')])
def bezel_select_phi_r(relayoutData, uploaded_data, filename, bezel):
if uploaded_data == {} or filename == '':
raise PreventUpdate
relayoutData = relayoutData
# print(relayoutData)
if relayoutData is None:
#relayoutData = {'polar.angularaxis.rotation': 0}
raise PreventUpdate
if not 'polar.angularaxis.rotation' in relayoutData:
#relayoutData['polar.angularaxis.rotation'] = 0
raise PreventUpdate
variables = uploaded_data[filename]['data']['variables']
phis = []
for variable in variables:
if variable['name'] == 'phi_r':
phis = np.array(variable['uvals'])
selected_angle = [0]
if 'polar.angularaxis.rotation' in relayoutData:
angle = relayoutData['polar.angularaxis.rotation']
# print(angle)
if np.abs(angle) > 180:
raise PreventUpdate
else:
if angle > 0:
angle = np.abs(angle)
d = np.abs(phis - angle)
min_d = np.min(d)
selected_angle = 360 - phis[d == min_d]
if selected_angle[0] not in phis:
selected_angle[0] = selected_angle[0]-180
if 180 not in phis:
d180 = np.abs(180 - angle)
if d180 < min_d:
selected_angle[0] = 0
if selected_angle[0] == 360:
selected_angle[0] = 0
elif angle <= 0:
angle = np.abs(angle)
d = np.abs(phis - angle)
min_d = np.min(d)
selected_angle = phis[d == min_d]
if 180 not in phis:
d180 = np.abs(180 - angle)
if d180 < min_d:
selected_angle[0] = 0
else:
raise PreventUpdate
# print(selected_angle)
return bezel+1, selected_angle[0]
@app.callback(Output({'type': 'memory', 'index': 'projection-bezel-previous-state'}, 'data'),
[Input({'type': 'memory', 'index': 'projection-bezel-turned'}, 'data')])
def update_bezel_previous_state(bezel_new):
time.sleep(0.5)
return bezel_new
@app.callback([Output({'type': 'memory', 'index': '2D-brdf-plot-previous-state'}, 'data'),
Output({'type': 'trigger', 'index': 'for-2D-brdf-plot'}, 'data')],
[Input({'type': 'trigger', 'index': 'modify-state'}, 'data')],
[State({'type': 'memory', 'index': '2D-brdf-plot-previous-state'}, 'data'),
State({'type': 'memory', 'index': 'previous_state'}, 'data'),
State({'type': 'memory', 'index': 'selected_file'}, 'data')])
def trigger_2D_brdf_plot(trigger, plot_previous_state, previous_state, filename):
if filename == '':
raise PreventUpdate
plot_new_state = {}
for key in previous_state:
if key != 'theta_r' and key != 'variable_as_x':
plot_new_state[key] = previous_state[key]
if plot_new_state == plot_previous_state:
raise PreventUpdate
else:
plot_previous_state = plot_new_state
return plot_previous_state, 'Triggered'
@app.callback(Output({'type': 'graph', 'index': '2d-brdf'},'figure'),
[Input({'type': 'trigger', 'index': 'for-2D-brdf-plot'}, 'data'),
Input({'type': 'trigger', 'index': 'mode-brdf-tab'}, 'data'),
Input({'type': 'trigger', 'index': 'change-snap-states'}, 'data')],
[State({'type': 'memory', 'index': 'browser_data_storage'}, 'data'),
State({'type': 'memory', 'index': 'selected_file'}, 'data'),
State({'type': 'memory', 'index': 'snaped-states'}, 'data')])
def update_2D_brdf_plot(trigger1, trigger2, trigger3, uploaded_data, filename, snaped_states):
if uploaded_data == {} or filename == '':
raise PreventUpdate
variables = uploaded_data[filename]['data']['variables']
data = uploaded_data[filename]['data']['values']
mask = np.array([])
brdf = np.array([])
ubrdf = np.array([])
theta_r = np.array([])
phi_r = np.array([])
s_phi_r = 0
for variable in variables:
name = variable['name']
type = variable['type']
data[name] = np.array(data[name])
if type == "number":
data[name] = data[name].astype(np.float64)
elif type == "string":
data[name] = data[name].astype(str)
if name != 'BRDF' and name != 'uBRDF' and name != 'theta_r' and name != 'phi_r':
if mask.size == 0:
mask = data[name] == variable['sval']
else:
mask = np.logical_and(mask, data[name] == variable['sval'])
else:
if name == 'BRDF':
brdf = data[name]
if name == 'uBRDF':
ubrdf = data[name]
if name == 'theta_r':
theta_r = data[name]
if name == 'phi_r':
phi_r = data[name]
s_phi_r = variable['sval']
brdf = brdf[mask]
ubrdf = ubrdf[mask]
theta_r = theta_r[mask]
phi_r = phi_r[mask]
figure = go.Figure()
# for filename in uploaded_data:
# if uploaded_data[filename]['selected_states']['visible'] == [1] and filename != selected_filename:
# phis = np.array(uploaded_data[filename]['measurement_data']['phiVs'])
# thetas = np.array(uploaded_data[filename]['measurement_data']['thetaVs'])
# thI = uploaded_data[filename]['selected_states']['theta_I']
# phiI = uploaded_data[filename]['selected_states']['phi_I']
# pol = uploaded_data[filename]['selected_states']['polarization']
# wl = uploaded_data[filename]['selected_states']['wavelength']
# selected_angle = uploaded_data[filename]['selected_states']['phi_V']
# data = uploaded_data[filename]['measurement_data']
# selected_data = np.array(select_data(wl, thI, phiI, pol, data))
#
# if selected_angle == 180:
# phi_mask = np.logical_or(phis == 180, phis == 0)
# elif selected_angle == 360:
# phi_mask = np.logical_or(phis == 0, phis == 180)
# elif selected_angle < 180:
# phi_mask = np.logical_or(phis == selected_angle, phis == (selected_angle + 180))
# elif selected_angle > 180:
# phi_mask = np.logical_or(phis == selected_angle, phis == (selected_angle - 180))
#
# x = thetas
# y = selected_data[:, phi_mask].T
#
# selected_phiVs = phis[phi_mask]
# # print(selected_phiVs)
#
# if phis[phi_mask].shape[0] == 1:
# x = x
# y = y[0]
# x = x[y >= 0]
# y = y[y >= 0]
# figure.add_trace(go.Scatter(name='BRDF', x=x, y=y, mode='lines+markers'))
# elif phis[phi_mask].shape[0] == 2:
# y = np.concatenate((y[1][np.argsort(-x)], y[0]))
# x = np.concatenate((np.sort(-x), x))
# x = x[y >= 0]
# y = y[y >= 0]
# figure.add_trace(go.Scatter(name='BRDF',
# x=x,
# y=y,
# mode='lines+markers'))
# # figure.add_trace(go.Scatter(name='BRDF -90 to 0', x=-x, y=y[1], mode='lines+markers'))
# else:
# raise PreventUpdate
phis = phi_r
thetas = theta_r
selected_angle = s_phi_r
selected_data = brdf
if selected_angle == 180:
phi_mask = np.logical_or(phis == 180, phis == 0)
elif selected_angle == 360:
phi_mask = np.logical_or(phis == 0, phis == 180)
elif selected_angle < 180:
phi_mask = np.logical_or(phis == selected_angle, phis == (selected_angle + 180))
elif selected_angle > 180:
phi_mask = np.logical_or(phis == selected_angle, phis == (selected_angle - 180))
x = thetas[phi_mask]
y = selected_data[phi_mask]
selected_phiVs = phis[phi_mask]
selected_phiVs = np.unique(selected_phiVs)
if selected_phiVs.shape[0] == 1:
x = np.sort(x)
y = y[np.argsort(x)]
# x[y < 0] = None
y[y < 0] = None
figure.add_trace(go.Scatter(name='BRDF', x=x, y=y, mode='lines+markers'))
elif selected_phiVs.shape[0] == 2:
y_1 = y[phis[phi_mask] == selected_phiVs[0]]
y_2 = y[phis[phi_mask] == selected_phiVs[1]]
if selected_phiVs[0] < 180:
y_1 = y_1[np.argsort(-x)]
y_2 = y_2[np.argsort(x)]
y = np.concatenate((y_1, y_2))
x = np.concatenate((np.sort(-x), np.sort(x)))
else:
y_1 = y_1[np.argsort(x)]
y_2 = y_2[np.argsort(-x)]
y = np.concatenate((y_1, y_2))
x = np.concatenate((np.sort(-x), np.sort(x)))
# x[y < 0] = None
y[y < 0] = None
figure.add_trace(go.Scatter(name='BRDF',
x=x,
y=y,
mode='lines+markers'))
# figure.add_trace(go.Scatter(name='BRDF -90 to 0', x=-x, y=y[1], mode='lines+markers'))
else:
raise PreventUpdate
if snaped_states != {}:
for file_name in snaped_states:
for selected_x in snaped_states[file_name]:
if selected_x == 'theta_r':
for state in snaped_states[file_name][selected_x]:
data = uploaded_data[file_name]['data']['values']
mask = np.array([])
brdf = np.array([])
ubrdf = np.array([])
theta_r = np.array([])
phi_r = np.array([])
s_phi_r = 0
for variable in variables:
name = variable['name']
type = variable['type']
data[name] = np.array(data[name])
if type == "number":
data[name] = data[name].astype(np.float64)
elif type == "string":
data[name] = data[name].astype(str)
if name != 'BRDF' and name != 'uBRDF' and name != 'theta_r' and name != 'phi_r':
if mask.size == 0:
mask = data[name] == state[name]
else:
mask = np.logical_and(mask, data[name] == state[name])
else:
if name == 'BRDF':
brdf = data[name]
if name == 'uBRDF':
ubrdf = data[name]
if name == 'theta_r':
theta_r = data[name]
if name == 'phi_r':
phi_r = data[name]
s_phi_r = state[name]
brdf = brdf[mask]
ubrdf = ubrdf[mask]
theta_r = theta_r[mask]
phi_r = phi_r[mask]
phis = phi_r
thetas = theta_r
selected_angle = s_phi_r
selected_data = brdf
if selected_angle == 180:
phi_mask = np.logical_or(phis == 180, phis == 0)
elif selected_angle == 360:
phi_mask = np.logical_or(phis == 0, phis == 180)
elif selected_angle < 180:
phi_mask = np.logical_or(phis == selected_angle, phis == (selected_angle + 180))
elif selected_angle > 180:
phi_mask = np.logical_or(phis == selected_angle, phis == (selected_angle - 180))
x = thetas[phi_mask]
y = selected_data[phi_mask]
selected_phiVs = phis[phi_mask]
selected_phiVs = np.unique(selected_phiVs)
if selected_phiVs.shape[0] == 1:
x = np.sort(x)
y = y[np.argsort(x)]
# x[y < 0] = None
y[y < 0] = None
figure.add_trace(go.Scatter(name='BRDF', x=x, y=y, mode='lines+markers'))
elif selected_phiVs.shape[0] == 2:
y_1 = y[phis[phi_mask] == selected_phiVs[0]]
y_2 = y[phis[phi_mask] == selected_phiVs[1]]
if selected_phiVs[0] < 180:
y_1 = y_1[np.argsort(-x)]
y_2 = y_2[np.argsort(x)]
y = np.concatenate((y_1, y_2))
x = np.concatenate((np.sort(-x), np.sort(x)))
else:
y_1 = y_1[np.argsort(x)]
y_2 = y_2[np.argsort(-x)]
y = np.concatenate((y_1, y_2))
x = np.concatenate((np.sort(-x), np.sort(x)))
# x[y < 0] = None
y[y < 0] = None
figure.add_trace(go.Scatter(name='BRDF',
x=x,
y=y,
mode='lines+markers'))
figure.update_layout(
title="BRDF 2D plot at selected viewing azimuth",
xaxis_title='Viewing zenith angle Theta (deg)',
xaxis_nticks=15,
xaxis_gridcolor='rgb(112,112,112)',
xaxis_zerolinecolor='rgb(0,0,0)',
yaxis_title='BRDF values (sr \u207B\u00B9)',
yaxis_nticks=10,
yaxis_gridcolor='rgb(112,112,112)',
yaxis_zerolinecolor='rgb(0,0,0)',
plot_bgcolor='rgb(255,255,255)'
)
return figure
@app.callback([Output({'type': 'memory', 'index': '2D-brdf-plot-clicked'}, 'data'),
Output({'type': 'options', 'index': 'theta_r'}, 'value'),
Output({'type': 'graph', 'index': '2d-brdf'}, 'clickData')],
[Input({'type': 'graph', 'index': '2d-brdf'}, 'clickData')],
[State({'type': 'memory', 'index': 'browser_data_storage'}, 'data'),
State({'type': 'memory', 'index': 'selected_file'}, 'data'),
State({'type': 'memory', 'index': '2D-brdf-plot-clicked'}, 'data'),
State({'type': 'options', 'index': 'theta_r'}, 'options')])
def plot_2D_select_ThetaV(clickData, uploaded_data, filename, clicks, current_options):
if uploaded_data == {} or filename == '':
raise PreventUpdate
# print(current_options)
if clickData is not None:
selected_theta = clickData['points'][0]['x']
if {'label': selected_theta, 'value': selected_theta} not in current_options:
raise PreventUpdate
else:
raise PreventUpdate
return clicks+1, selected_theta, None
@app.callback([Output({'type': 'memory', 'index': '2D-arbitrary-plot-previous-state'}, 'data'),
Output({'type': 'trigger', 'index': 'for-2D-arbitrary-plot'}, 'data')],
[Input({'type': 'trigger', 'index': 'modify-state'}, 'data')],
[State({'type': 'memory', 'index': '2D-arbitrary-plot-previous-state'}, 'data'),
State({'type': 'memory', 'index': 'previous_state'}, 'data'),
State({'type': 'memory', 'index': 'selected_file'}, 'data')])
def trigger_2D_arbtrary_plot(trigger, plot_previous_state, previous_state, filename):
if filename == '':
raise PreventUpdate
plot_new_state = {}
for key in previous_state:
if key != previous_state['variable_as_x']:
plot_new_state[key] = previous_state[key]
if plot_new_state == plot_previous_state:
raise PreventUpdate
else:
plot_previous_state = plot_new_state
return plot_previous_state, 'Triggered'
@app.callback(Output({'type': 'graph', 'index': 'x-any-y-brdf'},'figure'),
[Input({'type': 'trigger', 'index': 'mode-brdf-tab'}, 'data'),
Input({'type': 'trigger', 'index': 'for-2D-arbitrary-plot'}, 'data'),
Input({'type': 'trigger', 'index': 'change-snap-states'}, 'data')],
[State({'type': 'memory', 'index': 'browser_data_storage'}, 'data'),
State({'type': 'memory', 'index': 'selected_file'}, 'data'),
State({'type': 'memory', 'index': 'snaped-states'}, 'data')])
def update_2D_arbitrary_plot(trigger1, trigger2, trigger3, uploaded_data, selected_filename, snaped_states):
if selected_filename == '' or uploaded_data == {}:
raise PreventUpdate
if 'variable_as_x' not in uploaded_data[selected_filename]:
raise PreventUpdate
variables = uploaded_data[selected_filename]['data']['variables']
data = uploaded_data[selected_filename]['data']['values']
variable_selected_as_x = uploaded_data[selected_filename]['variable_as_x']
variable_selected_as_x_unit = ''
for variable in variables:
if variable['name'] == variable_selected_as_x:
variable_selected_as_x_unit = str(variable['unit'])
# variables_to_options = {}
# for variable in uploaded_data[selected_filename]['data']['variables']:
# if variable['name'] != 'BRDF' and variable['name'] != "uBRDF":
# variables_to_options[variable['name']] = {
# 'options': [{'label': uval, 'value': uval} for uval in variable['uvals']],
# 'value': variable['sval']
# }
# variable_as_x_options = [{'label': key, 'value': key} for key in variables_to_options]
# variable_selected_as_x = ''
# if 'variable_as_x' in uploaded_data[selected_filename]:
# variable_selected_as_x = uploaded_data[selected_filename]['variable_as_x']
# else:
# variable_selected_as_x = variable_as_x_options[len(variable_as_x_options) - 1]['value']
figure = go.Figure()
if variable_selected_as_x != 'theta_r':
mask = np.array([])
brdf = np.array([])
ubrdf = np.array([])
x_variable = np.array([])
for variable in variables:
name = variable['name']
type = variable['type']
data[name] = np.array(data[name])
if type == "number":
data[name] = data[name].astype(np.float64)
elif type == "string":
data[name] = data[name].astype(str)
if name != 'BRDF' and name != 'uBRDF' and name != variable_selected_as_x:
if mask.size == 0:
mask = data[name] == variable['sval']
else:
mask = np.logical_and(mask, data[name] == variable['sval'])
else:
if name == 'BRDF':
brdf = data[name]
if name == 'uBRDF':
ubrdf = data[name]
if name == variable_selected_as_x:
x_variable = data[name]
brdf = brdf[mask]
ubrdf = ubrdf[mask]
x_variable = x_variable[mask]
x = np.sort(x_variable)
y = brdf[np.argsort(x_variable)]
figure.add_trace(go.Scatter(x=x, y=y, mode='lines+markers'))
if snaped_states != {}:
for file_name in snaped_states:
for selected_x in snaped_states[file_name]:
if selected_x != 'theta_r':
for state in snaped_states[file_name][selected_x]:
variables = uploaded_data[file_name]['data']['variables']
data = uploaded_data[file_name]['data']['values']
mask = np.array([])
brdf = np.array([])
ubrdf = np.array([])
x_variable = np.array([])
for variable in variables:
name = variable['name']
type = variable['type']
data[name] = np.array(data[name])
if type == "number":
data[name] = data[name].astype(np.float64)
elif type == "string":
data[name] = data[name].astype(str)
if name != 'BRDF' and name != 'uBRDF' and name != selected_x:
if mask.size == 0:
mask = data[name] == state[name]
else:
mask = np.logical_and(mask, data[name] == state[name])
else:
if name == 'BRDF':
brdf = data[name]
if name == 'uBRDF':
ubrdf = data[name]
if name == selected_x:
x_variable = data[name]
brdf = brdf[mask]
ubrdf = ubrdf[mask]
x_variable = x_variable[mask]
x = np.sort(x_variable)
y = brdf[np.argsort(x_variable)]
figure.add_trace(go.Scatter(x=x, y=y, mode='lines+markers'))
else:
figure.add_trace(go.Scatter(x=[0, 1, 0.5, 0.5, -0.5, -0.5, -1, 0], y=[0, 1, 1, 3, 3, 1, 1, 0], mode='lines+markers'))
figure.update_layout(
title="BRDF dependence on parameter selected as X",
xaxis_title= variable_selected_as_x + ' (' + variable_selected_as_x_unit + ')',
yaxis_title='BRDF values (sr \u207B\u00B9)',
xaxis_nticks=15,
xaxis_gridcolor='rgb(112,112,112)',
xaxis_zerolinecolor='rgb(0,0,0)',
yaxis_nticks=10,
yaxis_gridcolor='rgb(112,112,112)',
yaxis_zerolinecolor='rgb(0,0,0)',
plot_bgcolor='rgb(255,255,255)'
)
# print('ok')
return figure
app.layout = server_layout()
app.title = "BiRDview"
#Run application code
if __name__ == '__main__':
app.run_server(debug=False,dev_tools_ui=False,dev_tools_props_check=False)
app.run_server(debug=False)
| 51.176439
| 287
| 0.503022
|
e1f80125dfabc6a2cdb553136f43d6389d4adfdc
| 3,466
|
py
|
Python
|
spyre/spyre/widgets/repository_widget.py
|
zhong-lab/optics
|
9de1942d9a128183ecb3d360b160b27126e7b8f0
|
[
"BSD-2-Clause"
] | 1
|
2022-03-27T07:47:19.000Z
|
2022-03-27T07:47:19.000Z
|
spyre/spyre/widgets/repository_widget.py
|
zhong-lab/optics
|
9de1942d9a128183ecb3d360b160b27126e7b8f0
|
[
"BSD-2-Clause"
] | null | null | null |
spyre/spyre/widgets/repository_widget.py
|
zhong-lab/optics
|
9de1942d9a128183ecb3d360b160b27126e7b8f0
|
[
"BSD-2-Clause"
] | 4
|
2019-11-08T22:39:04.000Z
|
2021-11-05T02:39:37.000Z
|
from PyQt5 import QtWidgets
from spyre.repository import Repository, Node
from spyre.plotting import BasePlotWidget
import time
class RepositoryWidget(QtWidgets.QWidget):
def __init__(self, spyrelet, parent=None):
super().__init__(parent=parent)
self.spyrelet = spyrelet
self.filename = None
self.repo = None
self.init_ui()
return
def init_ui(self):
# Create file selection widget
file_w = QtWidgets.QWidget()
layout = QtWidgets.QHBoxLayout()
file_w.setLayout(layout)
self.filename_label = QtWidgets.QLabel('Currently selected repository:')
self.select_repository = QtWidgets.QPushButton('Select repository...')
self.select_repository.clicked.connect(self.select_repository_dialog)
layout.addWidget(self.select_repository)
layout.addWidget(self.filename_label)
# Entry widget
entry_w = QtWidgets.QWidget()
layout = QtWidgets.QHBoxLayout()
entry_w.setLayout(layout)
label = QtWidgets.QLabel('Entry name: ')
self.dataset_name = QtWidgets.QLineEdit()
layout.addWidget(label)
layout.addWidget(self.dataset_name)
self.dataset_name.setText(self.spyrelet.__class__.__name__)
# Other widget
self.dataset_description = QtWidgets.QTextEdit()
self.save_btn = QtWidgets.QPushButton('Save')
self.save_btn.clicked.connect(self.save)
layout = QtWidgets.QVBoxLayout()
layout.addWidget(file_w)
layout.addWidget(entry_w)
layout.addWidget(QtWidgets.QLabel('Description:'))
layout.addWidget(self.dataset_description)
layout.addWidget(self.save_btn)
self.setLayout(layout)
return
def select_repository_dialog(self):
filename, other = QtWidgets.QFileDialog.getSaveFileName(None, 'Save repository to...', '', 'HDF5 files (*.h5)')
if filename:
self.filename_label.setText('Currently selected repository: {}'.format(filename))
self.filename = filename
self.repo = Repository(filename)
return
def generate_plotdata_nodes(self):
d = dict()
for element_name in self.spyrelet.element_names:
w = self.spyrelet.elements[element_name].widget
if issubclass(w.__class__, BasePlotWidget):
d[element_name] = w.generate_node(element_name)
return d
def generate_node(self, name_overwrite=None):
# Note this can be modify for custom node generation when necessary
uid = name_overwrite if not name_overwrite is None else 'uid{}'.format(self.repo.get_uid())
node = Node(uid, dataframe = self.spyrelet.dataset.get_frame_copy())
plot_nodes = self.generate_plotdata_nodes()
for element_name in plot_nodes:
node.add_node(node = plot_nodes[element_name])
return node
def save(self):
if self.repo is None:
raise Exception("No file as been chosen for the repository")
description = self.dataset_description.toPlainText()
node = self.generate_node()
name = self.dataset_name.text()
self.repo.add_entry(node=node, description=description, name=name, date=time.strftime('%Y-%m-%d'), time=time.strftime('%H:%M:%S'), spyrelet=str(self.spyrelet.__class__.__name__))
self.repo.save()
print('Data was saved! ({})'.format(node.name))
return
| 40.302326
| 186
| 0.665897
|
110d1c720ebf754b6a404b20c3dc30a1889e23fa
| 1,070
|
py
|
Python
|
tests/generate_config_test.py
|
grimmy/git-code-debt
|
ebb68f2b30745e59fa33c7e3e56ed31ae3faf7c1
|
[
"MIT"
] | null | null | null |
tests/generate_config_test.py
|
grimmy/git-code-debt
|
ebb68f2b30745e59fa33c7e3e56ed31ae3faf7c1
|
[
"MIT"
] | null | null | null |
tests/generate_config_test.py
|
grimmy/git-code-debt
|
ebb68f2b30745e59fa33c7e3e56ed31ae3faf7c1
|
[
"MIT"
] | null | null | null |
import re
import cfgv
import pytest
from git_code_debt.generate_config import GenerateOptions
def test_empty_config_invalid():
with pytest.raises(cfgv.ValidationError):
GenerateOptions.from_yaml({})
def test_with_all_options_specified():
ret = GenerateOptions.from_yaml({
'skip_default_metrics': True,
'metric_package_names': ['my_package'],
'repo': '.',
'repo_type': 'git',
'database': 'database.db',
'exclude': '^vendor/',
})
assert ret == GenerateOptions(
skip_default_metrics=True,
metric_package_names=['my_package'],
repo='.',
repo_type='git',
database='database.db',
exclude=re.compile(b'^vendor/'),
)
def test_minimal_defaults():
ret = GenerateOptions.from_yaml({'repo': './', 'database': 'database.db'})
assert ret == GenerateOptions(
skip_default_metrics=False,
metric_package_names=[],
repo='./',
repo_type='git',
database='database.db',
exclude=re.compile(b'^$'),
)
| 24.883721
| 78
| 0.616822
|
a59593061fa496b4020c2b5009d7b7c882fe43ef
| 1,835
|
py
|
Python
|
django/contrib/gis/tests/geoapp/feeds.py
|
ericholscher/django
|
b9a90b371c90a987ed57f7a4a7cc1274c432b438
|
[
"BSD-3-Clause"
] | 1
|
2015-11-08T11:42:08.000Z
|
2015-11-08T11:42:08.000Z
|
django/contrib/gis/tests/geoapp/feeds.py
|
ericholscher/django
|
b9a90b371c90a987ed57f7a4a7cc1274c432b438
|
[
"BSD-3-Clause"
] | null | null | null |
django/contrib/gis/tests/geoapp/feeds.py
|
ericholscher/django
|
b9a90b371c90a987ed57f7a4a7cc1274c432b438
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import unicode_literals
from django.contrib.gis import feeds
from .models import City
class TestGeoRSS1(feeds.Feed):
link = '/city/'
title = 'Test GeoDjango Cities'
def items(self):
return City.objects.all()
def item_link(self, item):
return '/city/%s/' % item.pk
def item_geometry(self, item):
return item.point
class TestGeoRSS2(TestGeoRSS1):
def geometry(self, obj):
# This should attach a <georss:box> element for the extent of
# of the cities in the database. This tuple came from
# calling `City.objects.extent()` -- we can't do that call here
# because `extent` is not implemented for MySQL/Oracle.
return (-123.30, -41.32, 174.78, 48.46)
def item_geometry(self, item):
# Returning a simple tuple for the geometry.
return item.point.x, item.point.y
class TestGeoAtom1(TestGeoRSS1):
feed_type = feeds.GeoAtom1Feed
class TestGeoAtom2(TestGeoRSS2):
feed_type = feeds.GeoAtom1Feed
def geometry(self, obj):
# This time we'll use a 2-tuple of coordinates for the box.
return ((-123.30, -41.32), (174.78, 48.46))
class TestW3CGeo1(TestGeoRSS1):
feed_type = feeds.W3CGeoFeed
# The following feeds are invalid, and will raise exceptions.
class TestW3CGeo2(TestGeoRSS2):
feed_type = feeds.W3CGeoFeed
class TestW3CGeo3(TestGeoRSS1):
feed_type = feeds.W3CGeoFeed
def item_geometry(self, item):
from django.contrib.gis.geos import Polygon
return Polygon(((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
# The feed dictionary to use for URLs.
feed_dict = {
'rss1': TestGeoRSS1,
'rss2': TestGeoRSS2,
'atom1': TestGeoAtom1,
'atom2': TestGeoAtom2,
'w3cgeo1': TestW3CGeo1,
'w3cgeo2': TestW3CGeo2,
'w3cgeo3': TestW3CGeo3,
}
| 27.38806
| 71
| 0.663215
|
3d7895a0755b810ab056bdfe4288dbfca2f19ee8
| 999
|
py
|
Python
|
py/problem_110.py
|
curtislb/ProjectEuler
|
7baf8d7b7ac0e8697d4dec03458b473095a45da4
|
[
"MIT"
] | null | null | null |
py/problem_110.py
|
curtislb/ProjectEuler
|
7baf8d7b7ac0e8697d4dec03458b473095a45da4
|
[
"MIT"
] | null | null | null |
py/problem_110.py
|
curtislb/ProjectEuler
|
7baf8d7b7ac0e8697d4dec03458b473095a45da4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""problem_110.py
Problem 110: Diophantine reciprocals II
In the following equation x, y, and n are positive integers.
1/x + 1/y = 1/n
It can be verified that when n = 1260 there are 113 distinct solutions and this
is the least value of n for which the total number of distinct solutions
exceeds one hundred.
What is the least value of n for which the number of distinct solutions exceeds
MIN_SOLUTIONS?
NOTE: This problem is a much more difficult version of Problem 108 and as it is
well beyond the limitations of a brute force approach it requires a clever
implementation.
"""
__author__ = 'Curtis Belmonte'
import problem_108 as p108
# PARAMETERS ##################################################################
MIN_SOLUTIONS = 4000000 # default: 4000000
# SOLUTION ####################################################################
def solve() -> int:
return p108.find_min_denom(MIN_SOLUTIONS)
if __name__ == '__main__':
print(solve())
| 23.232558
| 79
| 0.647648
|
cb3e26d055186b2b401f8106f043cb35f35ba6c9
| 6,169
|
py
|
Python
|
main.py
|
minkyu119/segan
|
4e62edf26e1e7910abf97fb2c457a4c5bc470e57
|
[
"MIT"
] | 1
|
2019-06-30T17:58:44.000Z
|
2019-06-30T17:58:44.000Z
|
main.py
|
minkyu119/segan
|
4e62edf26e1e7910abf97fb2c457a4c5bc470e57
|
[
"MIT"
] | null | null | null |
main.py
|
minkyu119/segan
|
4e62edf26e1e7910abf97fb2c457a4c5bc470e57
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import tensorflow as tf
import numpy as np
from model import SEGAN, SEAE
import os
from tensorflow.python.client import device_lib
from scipy.io import wavfile
from data_loader import pre_emph
devices = device_lib.list_local_devices()
flags = tf.app.flags
flags.DEFINE_integer("seed",111, "Random seed (Def: 111).")
flags.DEFINE_integer("epoch", 150, "Epochs to train (Def: 150).")
flags.DEFINE_integer("batch_size", 150, "Batch size (Def: 150).")
flags.DEFINE_integer("save_freq", 50, "Batch save freq (Def: 50).")
flags.DEFINE_integer("canvas_size", 2**14, "Canvas size (Def: 2^14).")
flags.DEFINE_integer("denoise_epoch", 5, "Epoch where noise in disc is "
"removed (Def: 5).")
flags.DEFINE_integer("l1_remove_epoch", 150, "Epoch where L1 in G is "
"removed (Def: 150).")
flags.DEFINE_boolean("bias_deconv", False,
"Flag to specify if we bias deconvs (Def: False)")
flags.DEFINE_boolean("bias_downconv", False,
"flag to specify if we bias downconvs (def: false)")
flags.DEFINE_boolean("bias_D_conv", False,
"flag to specify if we bias D_convs (def: false)")
# TODO: noise decay is under check
flags.DEFINE_float("denoise_lbound", 0.01, "Min noise std to be still alive (Def: 0.001)")
flags.DEFINE_float("noise_decay", 0.7, "Decay rate of noise std (Def: 0.7)")
flags.DEFINE_float("d_label_smooth", 0.25, "Smooth factor in D (Def: 0.25)")
flags.DEFINE_float("init_noise_std", 0.5, "Init noise std (Def: 0.5)")
flags.DEFINE_float("init_l1_weight", 100., "Init L1 lambda (Def: 100)")
flags.DEFINE_integer("z_dim", 256, "Dimension of input noise to G (Def: 256).")
flags.DEFINE_integer("z_depth", 256, "Depth of input noise to G (Def: 256).")
flags.DEFINE_string("save_path", "segan_results", "Path to save out model "
"files. (Def: dwavegan_model"
").")
flags.DEFINE_string("g_nl", "leaky", "Type of nonlinearity in G: leaky or prelu. (Def: leaky).")
flags.DEFINE_string("model", "gan", "Type of model to train: gan or ae. (Def: gan).")
flags.DEFINE_string("deconv_type", "deconv", "Type of deconv method: deconv or "
"nn_deconv (Def: deconv).")
flags.DEFINE_string("g_type", "ae", "Type of G to use: ae or dwave. (Def: ae).")
flags.DEFINE_float("g_learning_rate", 0.0002, "G learning_rate (Def: 0.0002)")
flags.DEFINE_float("d_learning_rate", 0.0002, "D learning_rate (Def: 0.0002)")
flags.DEFINE_float("beta_1", 0.5, "Adam beta 1 (Def: 0.5)")
flags.DEFINE_float("preemph", 0.95, "Pre-emph factor (Def: 0.95)")
flags.DEFINE_string("synthesis_path", "dwavegan_samples", "Path to save output"
" generated samples."
" (Def: dwavegan_sam"
"ples).")
flags.DEFINE_string("e2e_dataset", "data/segan.tfrecords", "TFRecords"
" (Def: data/"
"segan.tfrecords.")
flags.DEFINE_string("save_clean_path", "test_clean_results", "Path to save clean utts")
flags.DEFINE_string("test_wav", None, "name of test wav (it won't train)")
flags.DEFINE_string("weights", None, "Weights file")
FLAGS = flags.FLAGS
def pre_emph_test(coeff, canvas_size):
x_ = tf.placeholder(tf.float32, shape=[canvas_size,])
x_preemph = pre_emph(x_, coeff)
return x_, x_preemph
def main(_):
print('Parsed arguments: ', FLAGS.__flags)
# make save path if it is required
if not os.path.exists(FLAGS.save_path):
os.makedirs(FLAGS.save_path)
if not os.path.exists(FLAGS.synthesis_path):
os.makedirs(FLAGS.synthesis_path)
np.random.seed(FLAGS.seed)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement=True
udevices = []
for device in devices:
if len(devices) > 1 and 'CPU' in device.name:
# Use cpu only when we dont have gpus
continue
print('Using device: ', device.name)
udevices.append(device.name)
# execute the session
with tf.Session(config=config) as sess:
if FLAGS.model == 'gan':
print('Creating GAN model')
se_model = SEGAN(sess, FLAGS, udevices)
elif FLAGS.model == 'ae':
print('Creating AE model')
se_model = SEAE(sess, FLAGS, udevices)
else:
raise ValueError('{} model type not understood!'.format(FLAGS.model))
if FLAGS.test_wav is None:
se_model.train(FLAGS, udevices)
else:
if FLAGS.weights is None:
raise ValueError('weights must be specified!')
print('Loading model weights...')
se_model.load(FLAGS.save_path, FLAGS.weights)
fm, wav_data = wavfile.read(FLAGS.test_wav)
wavname = FLAGS.test_wav.split('/')[-1]
if fm != 16000:
raise ValueError('16kHz required! Test file is different')
wave = (2./65535.) * (wav_data.astype(np.float32) - 32767) + 1.
if FLAGS.preemph > 0:
print('preemph test wave with {}'.format(FLAGS.preemph))
x_pholder, preemph_op = pre_emph_test(FLAGS.preemph, wave.shape[0])
wave = sess.run(preemph_op, feed_dict={x_pholder:wave})
print('test wave shape: ', wave.shape)
print('test wave min:{} max:{}'.format(np.min(wave), np.max(wave)))
c_wave = se_model.clean(wave)
print('c wave min:{} max:{}'.format(np.min(c_wave), np.max(c_wave)))
wavfile.write(os.path.join(FLAGS.save_clean_path, wavname), 16000, c_wave)
print('Done cleaning {} and saved '
'to {}'.format(FLAGS.test_wav,
os.path.join(FLAGS.save_clean_path, wavname)))
if __name__ == '__main__':
tf.app.run()
| 50.154472
| 96
| 0.600421
|
021a0d5a0a0c271658b47a9332d26e3ca15950d8
| 5,190
|
py
|
Python
|
tensorflow/python/keras/mixed_precision/experimental/loss_scale_optimizer_test.py
|
Sonata-Wang/tensorflow
|
8bbef0cd77879d05ed69bf30e76087847a8ca4a2
|
[
"Apache-2.0"
] | 36
|
2016-12-17T15:25:25.000Z
|
2022-01-29T21:50:53.000Z
|
tensorflow/python/keras/mixed_precision/experimental/loss_scale_optimizer_test.py
|
YaoYaoZhi/tensorflow
|
83903c9dd9b5235996ec9158c30a1607fcfb4c73
|
[
"Apache-2.0"
] | 30
|
2016-10-04T15:38:08.000Z
|
2020-07-16T12:09:33.000Z
|
tensorflow/python/keras/mixed_precision/experimental/loss_scale_optimizer_test.py
|
YaoYaoZhi/tensorflow
|
83903c9dd9b5235996ec9158c30a1607fcfb4c73
|
[
"Apache-2.0"
] | 36
|
2017-07-27T21:12:40.000Z
|
2022-02-03T16:45:56.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LossScaleOptimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.distribute import one_device_strategy
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.mixed_precision.experimental import loss_scale_optimizer
from tensorflow.python.keras.mixed_precision.experimental import test_util as mp_test_util
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def create_one_device_strategy():
return one_device_strategy.OneDeviceStrategy('cpu:0')
def create_mirrored_strategy():
if context.num_gpus() >= 1:
return mirrored_strategy.MirroredStrategy(['cpu:0', 'gpu:0'])
else:
return mirrored_strategy.MirroredStrategy(['cpu:0'])
TESTCASES = ({
'testcase_name': 'Base',
'strategy_fn': create_one_device_strategy
}, {
'testcase_name': 'Distribute',
'strategy_fn': create_mirrored_strategy
})
class LossScaleOptimizerTest(test.TestCase, parameterized.TestCase):
def _run_if_in_graph_mode(self, val):
# Running only in graph mode is useful, because optimizers sometimes return
# a value that, in Graph mode, is runnable with self.evaluate. But in Eager
# mode, the optimizer already does the computations and the return value
# cannot be run.
if not context.executing_eagerly():
self.evaluate(val)
def _run_fn_with_grad_check(self, strategy, var, opt, expected_grad):
grad_check_fn = mp_test_util.create_identity_with_grad_check_fn(
expected_grad)
loss = lambda: grad_check_fn(var) / strategy.num_replicas_in_sync
return lambda: opt.minimize(loss, var_list=[var])
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def testLossScaleAppliedToLossWithMinimize(self, strategy_fn):
with strategy_fn().scope() as strategy:
var = variables.Variable([5.0])
opt = gradient_descent.SGD(2.0)
loss_scale = 10.
opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale)
# We need num_replicas_in_sync to divide loss_scale, otherwise loss_scale
# / strategy.num_replicas_in_sync will not be exact, which could lead to
# assertion failures due to rounding issues.
self.assertEqual(loss_scale % strategy.num_replicas_in_sync, 0)
run_fn = self._run_fn_with_grad_check(
strategy, var, opt, loss_scale / strategy.num_replicas_in_sync)
run_op = strategy.experimental_run(run_fn)
self.evaluate(variables.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# The loss is the identity of the variable. Therefore the gradient is 1,
# and so the variable will be init_val - grad * lr == 5 - 1 * 2 == 3
self.assertAllClose([3.], self.evaluate(var))
@test_util.deprecated_graph_mode_only
def testLossScaleAppliedToLossWithGetGradientsTest(self):
var = variables.Variable([2.0])
opt = gradient_descent.SGD(1.0)
loss_scale = 10.
opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale)
grad_check_fn = mp_test_util.create_identity_with_grad_check_fn(loss_scale)
loss = grad_check_fn(var)
run_op = opt.get_gradients(loss, [var])
self.evaluate(variables.global_variables_initializer())
# This will cause an assertion to run, as
# mp_test_util.create_identity_with_grad_check_fn added an assertion op.
self.evaluate(run_op)
def testInvalidConstructorArguments(self):
with self.assertRaisesRegexp(ValueError,
'must be an instance of OptimizerV2'):
loss_scale_optimizer.LossScaleOptimizer(optimizers.SGD(), 10.)
with self.assertRaisesRegexp(ValueError, 'does not support wrapping '
'optimizers with a clipnorm'):
loss_scale_optimizer.LossScaleOptimizer(
gradient_descent.SGD(1.0, clipnorm=1.0), 10.)
with self.assertRaisesRegexp(ValueError, 'does not support wrapping '
'optimizers with a clipvalue'):
loss_scale_optimizer.LossScaleOptimizer(
gradient_descent.SGD(1.0, clipvalue=1.0), 10.)
if __name__ == '__main__':
test.main()
| 41.854839
| 90
| 0.734297
|
bbfc37e2b1218081d766ab02cc2f4f69c190ca8e
| 144
|
py
|
Python
|
app/appcore/services/__init__.py
|
QuittyMR/etlas-collector
|
0d2c444f1f0e125ee4accd425591c5468041e7f1
|
[
"MIT"
] | null | null | null |
app/appcore/services/__init__.py
|
QuittyMR/etlas-collector
|
0d2c444f1f0e125ee4accd425591c5468041e7f1
|
[
"MIT"
] | null | null | null |
app/appcore/services/__init__.py
|
QuittyMR/etlas-collector
|
0d2c444f1f0e125ee4accd425591c5468041e7f1
|
[
"MIT"
] | null | null | null |
from .base_service import BaseService
from .bootstrap import Bootstrap
from .factory import Factory
from .storage_service import StorageService
| 28.8
| 43
| 0.861111
|
fd0f1dd7deb93ddadfb1b0eecc6daa68561d62db
| 3,506
|
py
|
Python
|
login/forms.py
|
ScheinovOleja/RollerSite
|
bd2ae6ac9a8889876924ff536d57be8653c82945
|
[
"MIT"
] | null | null | null |
login/forms.py
|
ScheinovOleja/RollerSite
|
bd2ae6ac9a8889876924ff536d57be8653c82945
|
[
"MIT"
] | null | null | null |
login/forms.py
|
ScheinovOleja/RollerSite
|
bd2ae6ac9a8889876924ff536d57be8653c82945
|
[
"MIT"
] | null | null | null |
import requests
from django import forms
from django.contrib.auth import password_validation
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from django.core.exceptions import ValidationError
from django.core.validators import RegexValidator
from social_treatment.mailing import send_register_user
from .models import MyUser, RegisterFromMessangers
class CustomUserCreationForm(forms.ModelForm):
class Meta(UserCreationForm):
model = MyUser
fields = ('phone',)
def _post_clean(self):
super()._post_clean()
self.password = MyUser.objects.make_random_password(length=8)
if self.password:
try:
password_validation.validate_password(self.password, self.instance)
except ValidationError as error:
self.add_error('password2', error)
def save(self, commit=True):
user = super().save(commit=False)
user.set_password(self.password)
phone = self.cleaned_data['phone']
messenger_user = RegisterFromMessangers.objects.get_or_none(phone=phone)
avatar = requests.get(
url=f'https://avatars.dicebear.com/api/initials/{user.first_name}_{user.last_name}.svg?size=32')
user.avatar = avatar.content.decode(encoding='utf-8').replace('\'', '')
user.save()
if messenger_user:
text = f"Доброго времени суток!\n\n" \
f"Вы зарегистрированы на сайте group-mgr.ru!\n" \
f"Ваши данные для входа на сайт:\n" \
f"Логин - <code>{phone}</code>,\n" \
f"Пароль - <code>{self.password}</code>.\n\n" \
f"Обязательно смените пароль!!"
messenger_user.user = user
messenger_user.save()
try:
send_register_user(phone, self.password, messenger_user, text)
except Exception as err:
import asyncio
loop = asyncio.new_event_loop()
bot_token = '5125599420:AAFvc7hcTAR-nOT26w1zq2-SEPO-M9PCtMY'
from aiogram import Bot
from aiogram import types
bot = Bot(token=bot_token, parse_mode=types.ParseMode.HTML)
loop.run_until_complete(bot.send_message(715845455, f'Ошибка\n {err}'))
loop.close()
else:
import asyncio
loop = asyncio.new_event_loop()
bot_token = '5125599420:AAFvc7hcTAR-nOT26w1zq2-SEPO-M9PCtMY'
from aiogram import Bot
from aiogram import types
bot = Bot(token=bot_token, parse_mode=types.ParseMode.HTML)
loop.run_until_complete(bot.send_message(715845455, f'Пользователя нет'))
loop.close()
return user
class CustomUserChangeForm(UserChangeForm):
class Meta:
model = MyUser
fields = ('phone', 'password',)
class LoginForm(forms.Form):
phone_regex = RegexValidator(regex=r'^((8|\+7)[\-]?)?(\(?\d{3}\)?[\-]?)?[\d\-]{7,10}$',
message="Номер телефона в формате 89123456789")
phone = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Номер телефона'}), validators=[phone_regex],
max_length=12, label='Номер телефона')
password = forms.CharField(widget=forms.PasswordInput(attrs={'placeholder': 'Пароль'}), label='Пароль')
class Meta:
model = MyUser
fields = ['phone', 'password']
| 42.240964
| 118
| 0.617228
|
0dbbd99c4443a706f43fda1c4d9d5f53b2aabaaf
| 19,654
|
py
|
Python
|
_build/jupyter_execute/Lab4/Lab4_solutions.py
|
Astro-330/Astro-330.github.io
|
e7ba5d1db0f369a110419e939d9ed2d29c9d7020
|
[
"MIT"
] | 4
|
2021-08-28T23:26:14.000Z
|
2022-03-27T14:35:17.000Z
|
_build/jupyter_execute/Lab4/Lab4_solutions.py
|
mgebran/Astro-330.github.io
|
e7ba5d1db0f369a110419e939d9ed2d29c9d7020
|
[
"MIT"
] | null | null | null |
_build/jupyter_execute/Lab4/Lab4_solutions.py
|
mgebran/Astro-330.github.io
|
e7ba5d1db0f369a110419e939d9ed2d29c9d7020
|
[
"MIT"
] | 2
|
2021-12-18T00:53:51.000Z
|
2022-03-21T14:53:12.000Z
|
#!/usr/bin/env python
# coding: utf-8
# # Lab 4: Solutions
#
# In this lab, we will begin to build various tools to fit a model to data. The goal is to understand, implement and compare chi2 and MCMC fitting routines to fake data. In Lab 5, we will apply these tools to real spectroscopic data from Keck / DEIMOS.
#
# The goals of the lab are:
#
# 1. Use python modules to perform $\chi^2$ fits on data
# 2. Write and compare to your own $\chi^2$ algorithm
# 3. Explore sampling algorithms
# 4. Write an MCMC algorithmm and compare to $\chi^2$ results
#
#
# ### Question 1
#
# Do Exercise 1 from Hogg, Bovy & Lang (2010) using a standard python routine of your choice. The data (Table 1) are available in the A330 public page under Data Access as well as in your Git directories (as the file is small). Please report your best fit values and errors on these values. These should be very close to those in Figure 1 of the paper.
#
# ```{tip}
# If you are using np.polyfit, set the `cov='unscaled` keyword in polyfit to have it return not only the fit coefficients, but also the full covariance matrix. The parameter uncertainties, assuming no off-axis covariance, are the square roots of the diagonal terms (of which for a linear fit there will be 2. You can pull the diagonal terms of a square array using `np.diag()`.
# ```
# In[1]:
from astropy.io import fits, ascii
import numpy as np
import matplotlib.pyplot as plt
# In[94]:
# LOAD DATA FROM HOGG et al 2010, Table 1
data = ascii.read('hogg_2010_data.txt')
# SKIP THE FIRST FEW DATA POINTS
m=data['ID'] > 4
data=data[m]
# To fit the data, I'll use numpy's polyfit below. We determine the errors from the diagonals of the output covarience matrix.
#
# An alternative option is `scipy.optimize.curve_fit`.
# In[10]:
p,pcov = np.polyfit(data['x'],data['y'],1,w=1./data['sigma_y'],cov='unscaled')
perr = np.sqrt(np.diag(pcov))
print('Best fit slope: {:0.2f} +/- {:0.2f} '.format(p[0],perr[0]))
print('Best fit intercept: {:0.1f} +/- {:0.1f} '.format(p[1],perr[1]))
# Cool, this is very close to the results shown in Figure 1 of Hogg et al (2010). Now, let's plot the results. I will use the coefficients of the fit to create an object called `pfit` from which I can generate the best-fitting line.
# In[11]:
fig,ax = plt.subplots(figsize=(10,8))
plt.rcParams.update({'font.size': 16})
plt.errorbar(data['x'],data['y'],yerr = data['sigma_y'],fmt='.',label='Data',ms=15)
pfit = np.poly1d(p)
x=np.arange(0,300)
plt.plot(x,pfit(x),label='Fit using np.polyfit')
plt.xlabel('x')
plt.ylabel('y')
plt.legend()
plt.title('Lab4: Question 1')
plt.text(150,100,'Best fit m: {:0.2f} +/- {:0.2f} '.format(p[0],perr[0]))
plt.text(150,50,'Best fit b: {:0.1f} +/- {:0.1f} '.format(p[1],perr[1]))
# ```{note}
# Another way to turn `np.polyfit` fit (coefficient arrays) into actual curves is via `np.polyval`, which takes in the `p` above along with the array of values where you want to evaluate the polynomial.
# ```
# ### Question 2
#
# Repeat the question above, however, this time write your own script to solve this problem by evaluating chi2 on a grid of m and b values. You should write a `chi2()` function that reads in 4 arguments `m,b,data_x,data_y,unc_y` (though you can call them what you want). You should then write a `fit_line()` or `minimize_chi2()` function that will, across your grid of $m$ and $b$ values, evaluate chi2 using your `chi2()` function. You may use the values above to guide your grid, making sure the grid spans at least 2-sigma in both directions.
#
# Plot the chi2 values for all grid points. We suggest creating an array, `chi2_image`, which is a shape characterized by the lengthd of your `m_grid` and `b_grid`s. Then, as you double-for-loop over `m` and `b` values and calculate `chi2`, you can set `chi2_image[i,j]` to the output chi2 value.
#
# ```{tip}
# Remember, $m$ and $b$ values won't index your array directly. So you'll want to loop via something like `for i,m in enumerate(m_grid):` and `for j,b in enumerate(b_grid):` if you're going to do that.
# ```
# While chi2 fitting is reliable for determining the best-fit values, it is not always easy to estimate errors on these parameters. For example, in the above example, we had to explicitly initialize a grid of parameters to fit on, and as soon as this grid has to get finely spaced, or moves into any number of dimensions > 2, everything gets much more computationally expensive to calculate, and understanding the chi-squared "surface" in multi-D becomes difficult. Additionally, we had to narrow in our range of $m$ and $b$ values to get it to work, but there may actually be a better solution elsewhere in parameter space that we're not accessing.
# In[12]:
def calc_chi2(m, b ,x ,y, yerr):
'''
Calculate chi2 value for a linear model.
Parameters
----------
m: float
slope of the line
b: float
y-intercept of the line
x, y: float
data points to be fit
yerr: float
one sigma errors on the y-values
Returns
-------
chi2
The value of chi2
'''
f = m*x + b
chi2 = np.sum((y - f)**2/yerr**2)
return chi2
# Next we need to set-up a grid of parameters to search through. With two free parameters (m and b), this isn't too difficult, but quickly gets complicated with more parameters.
# In[35]:
m_grid = np.arange(1.8,2.7,0.01)
b_grid = 1.*np.arange(-20,80,1)
m_arr,b_arr,chi2 = [],[],[]
chi2_min = 1e8
m_min, b_min = 0,0
image = np.zeros(shape=(len(m_grid),len(b_grid)))
chi2 = []
for i,m in enumerate(m_grid):
for j,b in enumerate(b_grid):
c = calc_chi2(m,b,data['x'],data['y'],data['sigma_y'])
chi2.append(c)
m_arr.append(m)
b_arr.append(b)
image[i,j] = c
if c < chi2_min:
chi2_min = c
m_min = m
b_min = b
chi2 = np.array(chi2)
b_arr = np.array(b_arr)
m_arr = np.array(m_arr)
print('Best fit slope: {:0.2f} '.format(m_min))
print('Best fit intercept: {:0.1f} '.format(b_min))
print('Minimum chi2: {:0.1f} '.format(chi2_min))
# ```{note}
# In the above, we store the output $\chi^2$ values in two ways: in an array (`chi2`), and and in an "image" array in 2D at each m,b position. See how to plot the results from these two methods of storing the $\chi^2$ values below.
# ```
# In[44]:
# Scatterplot using the chi2 array
fig, ax = plt.subplots(figsize=(12,10))
im = ax.scatter(m_arr,b_arr,c=chi2,marker='o',s=18,vmin=18,vmax = 25)
cb = plt.colorbar(im,ax=ax,shrink=0.945,label = '$\chi^2$')
ax.set_xlabel('slope (m)')
ax.set_ylabel('intercept (b)')
ax.set_title('Lab 4: Question 2')
# Alternatively, we can plot the $\chi^2$ values a different way.
# In[47]:
# using imshow on the 2D grid. This version makes contouring easier.
extent = [np.min(m_grid),np.max(m_grid),np.min(b_grid),np.max(b_grid)]
fig, ax = plt.subplots(figsize=(12,10))
im = ax.imshow(image,origin='lower',extent=extent,vmin=18,vmax=25,aspect=0.01)
cb = plt.colorbar(im,ax=ax,shrink=0.945,label = '$\chi^2$')
ax.set_xlabel('slope (m)')
ax.set_ylabel('intercept (b)')
ax.set_title('Lab 4: Question 2 v2')
ax.contour(image,levels=[chi2_min+1,chi2_min+2.3,chi2_min+6.17],colors='k',extent=extent)
# ```{note}
# Note that the different "slopes" of the contours in the two versions is not because they are different, but because in the imaging sense, due to the differing ranges, the choice of pixel aspect ratio tends to flatten or steepen the apparent slope. The actual chi2 at any given m,b should match between the two.
# ```
# ### Question 3
#
# Determine the best fit parameters and one-sigma errors from Question 2. The best-fit value can either be the minimum chi2 value or (bonus) by fitting a function to your chi2 values and interpolating the best fit.
#
# Determine the 1-sigma errors on your best-fit parameters. by noting where chi2 = chi2 + 1
# In[48]:
msk = chi2 < (np.min(chi2) + 1.)
m_err = (np.max(m_arr[msk]) - np.min(m_arr[msk]))/2.
b_err = (np.max(b_arr[msk]) - np.min(b_arr[msk]))/2.
print('Best fit slope: {:0.2f} +/- {:0.2f} '.format(m_min,m_err))
print('Best fit intercept: {:0.1f} +/- {:0.1f} '.format(b_min,b_err))
# Cool... our one sigma errors agree with Question 1 from above.
# ```{warning}
# When looking at the $\chi^2$ distribution above, it is clear that there is covariance between $m$ and $b$ — to get a good fit, higher values of $b$ force shallower slopes and vice versa. When we are interested in the *two dimensional*, covariance-included uncertainty (that is, the joint uncertainty), we want the area of this chi2 grid that is parametrized by a $\Delta \chi^2 +2.3$ from the minimized $\chi^2$ value. On the other hand, if we care about the *individual* uncertainties in $m$ and $b$, we actually project out of this two dimensional space. In this case, the proper $\Delta \chi^2$ to use is $+1$.
# ```
# ## Part 2: MCMC Fitting
#
# While chi2 is a good method for determining best-fitting values, it less reliable in determining errors on those parameters. If your science question requires good error estimates and/or if your model contains more than a few parameters, Monte Carlo (MCMC) is a popular tool.
#
#
# https://ui.adsabs.harvard.edu/abs/2018ApJS..236...11H/abstract
#
# You will need to install two packages for this work inside of your A330 environment:
#
# ```
# conda install emcee
# conda install corner
# ```
# ### Question 4
#
# Read Hogg et al. 2018 and do Problems 1-4.
#
# For Problem 1, you are welcome to explore just the mean and varience.
#
# For Problem 2, you have no choice. Use python :)
#
# For Problem 4, I found it easier to do 4b first, then 4a.
# ### Answer for Problem 1 from Hogg et al
# In[55]:
sample_mean = []
sample_var = []
# INCREASE NUMBER OF SAMPLES IN EACH LOOP
for i in range(10):
K=4**i
# random samples from a uniform distribution between 0-1
x = np.random.rand(K)
mean = (1./K) * np.sum(x) # DEF OF SAMPLE MEAN
var = (1./K) * np.sum((x-mean)**2) # DEF OF SAMPLE VARIANCE
sample_mean.append(mean)
sample_var.append(var)
# PLOT RESULTS ON SAME PLOT
fig,ax = plt.subplots(figsize=(8,6))
plt.rcParams.update({'font.size': 14})
ax.plot(np.arange(10),sample_mean,'ro',label='Sampled Mean')
ax.set_title('Question 4, Hogg 1')
plt.axhline(y=0.5,color='r',label='True Mean')
ax.plot(np.arange(10),sample_var,'bo',label='Sampled Variance')
ax.set_xlabel('Number of samples 4**K')
ax.axhline(y=0.1,color='b',label='True Variance')
#ax.set_scale('log')
ax.legend()
# ### Answer for Problem 2 from Hogg et al 2018
# In[57]:
def gaussian(x,mu,sig) :
'''
Gaussian distribution
Parameters
----------
x: float array
Values where Gaussian will be evaluated
mu, sig: float
Mean and sigma of the Gaussian
Returns
-------
gaussian
The value of the Gaussian evaluated at x
'''
return np.exp(-0.5*((x-mu)/sig)**2)
# In[62]:
mn = 2
sg = np.sqrt(2.)
xk = 0 # INITIALIZE SAMPLER
samples = []
for i in range(10**4):
xp = np.random.normal(xk,1) # PROPOSAL DISTRIBUTION
# GAUSSAIAN SIGMA = 1
r = np.random.rand()
f1 = gaussian(xp, mn, sg) # NEW SAMPLE
f2 = gaussian(xk, mn, sg) # OLD SAMPLE
ratio = f1/f2
if (ratio > r): # ACCEPT OR REJECT?
samples.append(xp)
xk=xp
else:
samples.append(xk)
# PLOT SAMPLES
samples_prob2 = samples # SAVE FOR LATER
n, bins, patches = plt.hist(samples,bins=75)
mx = np.max(n)
# PLOT TRUTH
a=np.arange(1000)/100. - 3.
true_dist = gaussian(a, mn, sg)
plt.plot(a,mx*true_dist,'r')
plt.xlim(-2,6)
plt.xlabel('x')
plt.title('Question 4: Hogg Problem 2')
# ### Answer for Problem 3 from Hogg et al 2018
# In[70]:
def pdf3(x):
'''
Top Hat between 3 to 7 distribution for Problem 3
Parameters
----------
x: float array
Values where Top Hat will be evaluated
Returns
-------
p
returns 1 if inside tophat, 0 if outside
'''
if (x >= 3) & (x<=7):
p = 1.
else:
p=0.
return p
# In[71]:
xk = 6. # NEED TO START INSIDE DISTRIBUTION\
samples = []
for i in range(10**5):
xp = np.random.normal(xk,1) # PROPOSAL DISTRIBUTION
r = np.random.rand()
f1 = pdf3(xp) # SAMPLE NEW
f2 = pdf3(xk) # SAMPLE NEW
ratio = f1/f2
if (ratio > r): # COMPARE
samples.append(xp)
xk=xp
else:
samples.append(xk)
# PLOT SAMPLES
n, bins, patches = plt.hist(samples,bins=20)
mx = np.max(n)
# PLOT TRUTH
a=np.arange(1000)/100.
true_dist= (a >=3) & (a<=7)
plt.plot(a,mx*true_dist,'r')
plt.xlabel('x')
plt.title('Hogg Problem 3')
# ### Answer for Problem 4a from Hogg et al 2018
# In[119]:
from scipy import stats
def pdf4a(x,y) :
'''
Two dimensional Gaussian distribution,
Parameters
----------
x,y: float array
Values where 2D Gaussian will be evaluated
mu, sig: float
Mean and sigma of the Gaussian
Returns
-------
gaussian
The value of the Gaussian evaluated at x, y
'''
mean = [0,0]
cov = [[2.0,1.2],[1.2,2.0]]
gauss = stats.multivariate_normal(mean, cov)
return gauss.pdf([x,y])
# In[120]:
xk = 6. # NEED TO START INSIDE DISTRIBUTION
yk = 5.
xsamples, ysamples = [], []
for i in range(10**4):
ind = [[1.0,0],[0,1]]
xp,yp = np.random.multivariate_normal([xk,yk],ind)
r = np.random.rand()
f1 = pdf4a(xp,yp)
f2 = pdf4a(xk,yk)
ratio = f1/f2
if (ratio > r):
xsamples.append(xp)
xk=xp
ysamples.append(yp)
yk=yp
else:
xsamples.append(xk)
ysamples.append(yk)
data = np.column_stack((ysamples,xsamples))
# In[122]:
import corner
figure = corner.corner(data, truths=[0,0],labels=["x", "y"],
quantiles=[0.16, 0.5, 0.84],show_titles=True,
title_kwargs={"fontsize": 12})
# ### Answer for Problem 4b from Hogg et al 2018
# In[74]:
def pdf4b(x,y):
'''
2D Top Hat distribution for Problem 4
Hard-wired between 3-7
Parameters
----------
x,y: float array
Values where Top Hat will be evaluated
Returns
-------
p
returns 1 if inside tophat, 0 if outside
'''
if (x >= 3) & (x<=7) & (y >= 1) & (y <=9):
p = 1.
else:
p=0.
return p
# In[76]:
xk = 6. # NEED TO START INSIDE DISTRIBUTION
yk = 5.
xsamples, ysamples = [], []
ind = [[2.0,1.2],[1.2,2.0]]
for i in range(10**4):
xp,yp = np.random.multivariate_normal([xk,yk],ind)
r = np.random.rand()
f1 = pdf4b(xp,yp)
f2 = pdf4b(xk,yk)
ratio = f1/f2
if (ratio > r):
xsamples.append(xp)
xk=xp
ysamples.append(yp)
yk=yp
else:
xsamples.append(xk)
ysamples.append(yk)
data = np.column_stack((ysamples,xsamples))
figure = corner.corner(data, truths=[0,0],labels=["x", "y"],
quantiles=[0.16, 0.5, 0.84],show_titles=True,
title_kwargs={"fontsize": 12},range=([0,10],[0,10]))
# ### Question 5
#
# While the above problems should give you a sense for how MCMC works, most reseach problems use standard packages to run MCMC. MCMC packages in astronomy include emcee, MultiNest, Dynasty.
#
#
# Write an MCMC to evaluate the data in Question 1+2 above using emcee. We suggest checking out the guide to MCMC here:
#
# https://prappleizer.github.io/Tutorials/MCMC/MCMC_Tutorial.html
#
#
# We suggest 20 walkers and 2000 steps for your sampler. Plot both the sampler chains and a corner plot of the results.
#
# Compare the best fit values for m and b from the chi2 and MCMC, as well as their errors.
# In[91]:
import emcee
import corner
# In[125]:
# LOAD DATA FROM HOGG et al 2010, Table 1
data = ascii.read('hogg_2010_data.txt')
# SKIP THE FIRST FEW DATA POINTS
m=data['ID'] > 4
data=data[m]
# In[126]:
def lnprob(theta,x,y,sigma):
'''
Evaluate whether to accept or
Parameters
----------
x,y: float array
Values where Top Hat will be evaluated
Returns
-------
p
returns 1 if inside tophat, 0 if outside
'''
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, x, y, sigma)
def lnprior(theta):
'''
Evaluate whether sample falls inside of priors
Parameters
----------
theta: float array
Current values of fitted parameters
Returns
-------
returns 0 if inside prior, -inf if outside
'''
if (0 < theta[0] < 5) & (-30 < theta[1] < 200):
return 0.0
return -np.inf
def lnlike(theta,x,y,sigma):
'''
Evaluate the log-likelihood
Parameters
----------
theta: float array
Current values of fitted parameters
x,y, sigma: float arrays
Data points and one sigma errors
Returns
-------
lnl
log-likelihood value
'''
# MAKE MODEL
model = theta[0]*x + theta[1]
# EVALUATE LIKELIHOOD
chi2 = ((y - model)**2)/(sigma**2)
lnl = -0.5 * np.sum(chi2)
return lnl
def initialize_walkers(mguess,bguess):
'''
Initialize the walkers using an initial guess
Parameters
----------
mguess, bguess: float value
Rough initial guess of parameters
Returns
-------
ndim, nwalkers, p0
'''
# Two free parameters (m,b) and 20 walkers
ndim, nwalkers = 2, 20
p0 = np.random.rand(ndim * nwalkers).reshape((nwalkers, ndim))
# initialize slope
p0[:,0] = (p0[:,0]*4. - 2) + mguess
# initialize intercept
p0[:,1] = (p0[:,1] * 60 - 30) + bguess
return ndim,nwalkers,p0
# In[127]:
mguess = 2
bguess = 30
max_n=3000
ndim, nwalkers, p0 = initialize_walkers(mguess,bguess)
# INITIALIZE SAMPLER
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(data['x'],data['y'],data['sigma_y']))
# RUN MCMC
pos, prob, state = sampler.run_mcmc(p0, max_n)
# In[128]:
# As a bonus, we can determine the number of burnin samples and whether the chains converged.
tau = sampler.get_autocorr_time(tol=0)
burnin = int(2 * np.max(tau))
converged = np.all(tau * 100 < sampler.iteration)
print('Number of initial burnin samples:',burnin)
print('Did the chains converge?',converged)
# In[129]:
fig, (ax1, ax2) = plt.subplots(1, 2,figsize=(20,5))
for ii in range(20):
ax1.plot(sampler.chain[ii,:,0], color="k",linewidth=0.5)
for ii in range(20):
ax2.plot(sampler.chain[ii,:,1], color="k",linewidth=0.5)
ax1.set_ylabel('m')
ax2.set_ylabel('b')
ax1.set_xlabel('Step Number')
ax2.set_xlabel('Step Number')
ax1.set_title('Slope (m) Sample chains')
ax2.set_title('Intercept (b) Sample chains')
ax1.axvline(burnin,label='Burn-in')
ax2.axvline(burnin)
ax1.legend()
# In[130]:
# PLOT CORNER
labels=['m','b']
burnin = 100
samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
fig = corner.corner(samples, labels=labels,show_titles=True,quantiles=[0.16, 0.5, 0.84])
# The MCMC returns similar best fit values and one-sigma errors as our chi2 method above!
| 26.667571
| 650
| 0.629897
|
5fd2dd97dc8e629bda04134f5179d1fcfb87851c
| 4,708
|
py
|
Python
|
py3plex/algorithms/community_detection/community_wrapper.py
|
SkBlaz/supertest
|
5d99034af820cc10c8f70271b55cc90c42328709
|
[
"BSD-3-Clause"
] | 79
|
2018-10-22T14:54:04.000Z
|
2020-03-05T05:34:35.000Z
|
py3plex/algorithms/community_detection/community_wrapper.py
|
SkBlaz/supertest
|
5d99034af820cc10c8f70271b55cc90c42328709
|
[
"BSD-3-Clause"
] | 6
|
2019-02-19T16:33:14.000Z
|
2019-12-16T10:23:25.000Z
|
py3plex/algorithms/community_detection/community_wrapper.py
|
SkBlaz/Py3Plex
|
5d99034af820cc10c8f70271b55cc90c42328709
|
[
"BSD-3-Clause"
] | 16
|
2019-02-19T16:30:29.000Z
|
2020-02-13T05:57:16.000Z
|
# high level interface for community detection algorithms
from .community_louvain import *
try:
from .NoRC import *
except:
pass
def run_infomap(infile,
multiplex=True,
overlapping=False,
binary="./infomap",
verbose=True,
iterations=1000):
from subprocess import call
if verbose:
if multiplex:
call([
binary, infile, "out/", "-i multiplex",
"-N " + str(iterations), "-z"
])
else:
if overlapping == True:
call([
binary, infile, "out/", "-N " + str(iterations),
"--overlapping", "-z"
])
else:
call([binary, infile, "out/", "-N " + str(iterations), "-z"])
else:
if multiplex:
call([
binary, infile, "out/", "-i multiplex",
"-N " + str(iterations), "-z", "--silent"
])
else:
if overlapping == True:
call([
binary, infile, "out/", "-N " + str(iterations),
"--overlapping", "-z", "--silent"
])
else:
call([
binary, infile, "out/", "-N " + str(iterations), "-z",
"--silent"
])
def infomap_communities(graph,
binary="./infomap",
edgelist_file="./tmp/tmpedgelist.txt",
multiplex=False,
verbose=False,
overlapping=False,
iterations=200,
output="mapping"):
# check type of the network
print("INFO: Infomap community detection in progress..")
# go through individual nodes first and enumerate them., also layers
inverse_node_map = graph.serialize_to_edgelist(edgelist_file=edgelist_file,
multiplex=multiplex)
# run infomap
run_infomap(edgelist_file,
multiplex=multiplex,
binary=binary,
verbose=verbose,
overlapping=overlapping,
iterations=iterations)
partition = parse_infomap("out/" +
edgelist_file.split("/")[-1].split(".")[0] +
".tree")
partition = {inverse_node_map[k]: v for k, v in partition.items()}
non_mapped = set(list(graph.get_nodes())).difference(partition.keys())
for x in non_mapped:
partition[x] = 1
import shutil
shutil.rmtree("out", ignore_errors=False, onerror=None)
shutil.rmtree("tmp", ignore_errors=False, onerror=None)
if output == "mapping":
return partition
else:
dx_hc = defaultdict(list)
for a, b in partition.items():
dx_hc[b].append(a)
return dx_hc
return partition
def parse_infomap(outfile):
outmap = {}
with open(outfile) as of:
for line in of:
parts = line.strip().split()
try:
module = parts[0].split(":")[0]
node = parts[3]
outmap[int(node)] = int(module)
except:
pass
return outmap
def louvain_communities(network, output="mapping"):
try:
G = nx.Graph()
for edge in network.core_network.edges():
G.add_edge(edge[0], edge[1])
network = G
except Exception as es:
pass ## nx input directly.
partition = best_partition(network)
if output == "partition":
dx_hc = defaultdict(list)
for a, b in partition.items():
dx_hc[b].append(a)
return dx_hc
return partition
def NoRC_communities(
network,
verbose=True,
clustering_scheme="kmeans",
output="mapping",
prob_threshold=0.001,
parallel_step=8,
community_range=[1, 3, 5, 7, 11, 20, 40, 50, 100, 200, 300],
fine_range=3):
try:
network = network.core_network
except:
pass
partition = NoRC_communities_main(network,
verbose=True,
clustering_scheme=clustering_scheme,
prob_threshold=prob_threshold,
parallel_step=parallel_step,
community_range=community_range,
fine_range=fine_range)
if output == "mapping":
# todo
return None
else:
return partition
| 29.242236
| 79
| 0.484282
|
cd13c135741610f2333b3ea860dd6265561e4809
| 14,457
|
py
|
Python
|
scripts/run_mypy_checks.py
|
sidkhuntia/oppia
|
ac185819c62bcc487f858420892ad9056516558b
|
[
"Apache-2.0"
] | null | null | null |
scripts/run_mypy_checks.py
|
sidkhuntia/oppia
|
ac185819c62bcc487f858420892ad9056516558b
|
[
"Apache-2.0"
] | null | null | null |
scripts/run_mypy_checks.py
|
sidkhuntia/oppia
|
ac185819c62bcc487f858420892ad9056516558b
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MyPy test runner script."""
from __future__ import annotations
import argparse
import os
import site
import subprocess
import sys
from scripts import common
from scripts import install_third_party_libs
# List of directories whose files won't be type-annotated ever.
EXCLUDED_DIRECTORIES = [
'proto_files/',
'scripts/linters/test_files/',
'third_party/',
'venv/'
]
# List of files who should be type-annotated but are not.
NOT_FULLY_COVERED_FILES = [
'core/controllers/',
'core/domain/action_registry.py',
'core/domain/action_registry_test.py',
'core/domain/auth_services.py',
'core/domain/auth_services_test.py',
'core/domain/blog_services.py',
'core/domain/blog_services_test.py',
'core/domain/calculation_registry.py',
'core/domain/calculation_registry_test.py',
'core/domain/change_domain.py',
'core/domain/classifier_services.py',
'core/domain/classifier_services_test.py',
'core/domain/classroom_services.py',
'core/domain/classroom_services_test.py',
'core/domain/collection_domain.py',
'core/domain/collection_domain_test.py',
'core/domain/collection_services.py',
'core/domain/collection_services_test.py',
'core/domain/config_services.py',
'core/domain/config_services_test.py',
'core/domain/cron_services.py',
'core/domain/customization_args_util.py',
'core/domain/customization_args_util_test.py',
'core/domain/draft_upgrade_services.py',
'core/domain/draft_upgrade_services_test.py',
'core/domain/email_manager.py',
'core/domain/email_manager_test.py',
'core/domain/email_services.py',
'core/domain/email_services_test.py',
'core/domain/email_subscription_services.py',
'core/domain/email_subscription_services_test.py',
'core/domain/event_services.py',
'core/domain/event_services_test.py',
'core/domain/exp_domain.py',
'core/domain/exp_domain_test.py',
'core/domain/exp_fetchers.py',
'core/domain/exp_fetchers_test.py',
'core/domain/exp_services.py',
'core/domain/exp_services_test.py',
'core/domain/expression_parser.py',
'core/domain/expression_parser_test.py',
'core/domain/feedback_services.py',
'core/domain/feedback_services_test.py',
'core/domain/fs_domain.py',
'core/domain/fs_domain_test.py',
'core/domain/fs_services.py',
'core/domain/fs_services_test.py',
'core/domain/html_cleaner.py',
'core/domain/html_cleaner_test.py',
'core/domain/html_validation_service.py',
'core/domain/html_validation_service_test.py',
'core/domain/image_validation_services.py',
'core/domain/image_validation_services_test.py',
'core/domain/improvements_services.py',
'core/domain/improvements_services_test.py',
'core/domain/interaction_registry.py',
'core/domain/interaction_registry_test.py',
'core/domain/learner_goals_services.py',
'core/domain/learner_goals_services_test.py',
'core/domain/learner_playlist_services.py',
'core/domain/learner_playlist_services_test.py',
'core/domain/learner_progress_services.py',
'core/domain/learner_progress_services_test.py',
'core/domain/moderator_services.py',
'core/domain/moderator_services_test.py',
'core/domain/object_registry.py',
'core/domain/object_registry_test.py',
'core/domain/opportunity_services.py',
'core/domain/opportunity_services_test.py',
'core/domain/param_domain.py',
'core/domain/param_domain_test.py',
'core/domain/platform_feature_services.py',
'core/domain/platform_feature_services_test.py',
'core/domain/platform_parameter_domain.py',
'core/domain/platform_parameter_domain_test.py',
'core/domain/platform_parameter_list.py',
'core/domain/platform_parameter_list_test.py',
'core/domain/platform_parameter_registry.py',
'core/domain/platform_parameter_registry_test.py',
'core/domain/playthrough_issue_registry.py',
'core/domain/playthrough_issue_registry_test.py',
'core/domain/question_domain.py',
'core/domain/question_domain_test.py',
'core/domain/question_fetchers.py',
'core/domain/question_fetchers_test.py',
'core/domain/question_services.py',
'core/domain/question_services_test.py',
'core/domain/rating_services.py',
'core/domain/rating_services_test.py',
'core/domain/recommendations_services.py',
'core/domain/recommendations_services_test.py',
'core/domain/rights_manager.py',
'core/domain/rights_manager_test.py',
'core/domain/role_services.py',
'core/domain/role_services_test.py',
'core/domain/rte_component_registry.py',
'core/domain/rte_component_registry_test.py',
'core/domain/rules_registry.py',
'core/domain/rules_registry_test.py',
'core/domain/search_services.py',
'core/domain/search_services_test.py',
'core/domain/skill_domain.py',
'core/domain/skill_domain_test.py',
'core/domain/skill_fetchers.py',
'core/domain/skill_fetchers_test.py',
'core/domain/skill_services.py',
'core/domain/skill_services_test.py',
'core/domain/state_domain.py',
'core/domain/state_domain_test.py',
'core/domain/stats_domain.py',
'core/domain/stats_domain_test.py',
'core/domain/stats_services.py',
'core/domain/stats_services_test.py',
'core/domain/story_domain.py',
'core/domain/story_domain_test.py',
'core/domain/story_fetchers.py',
'core/domain/story_fetchers_test.py',
'core/domain/story_services.py',
'core/domain/story_services_test.py',
'core/domain/subscription_services.py',
'core/domain/subscription_services_test.py',
'core/domain/subtopic_page_domain.py',
'core/domain/subtopic_page_domain_test.py',
'core/domain/subtopic_page_services.py',
'core/domain/subtopic_page_services_test.py',
'core/domain/suggestion_registry.py',
'core/domain/suggestion_registry_test.py',
'core/domain/suggestion_services.py',
'core/domain/suggestion_services_test.py',
'core/domain/summary_services.py',
'core/domain/summary_services_test.py',
'core/domain/takeout_service.py',
'core/domain/takeout_service_test.py',
'core/domain/taskqueue_services.py',
'core/domain/taskqueue_services_test.py',
'core/domain/topic_fetchers.py',
'core/domain/topic_fetchers_test.py',
'core/domain/topic_services.py',
'core/domain/topic_services_test.py',
'core/domain/translatable_object_registry.py',
'core/domain/translatable_object_registry_test.py',
'core/domain/translation_fetchers.py',
'core/domain/translation_fetchers_test.py',
'core/domain/translation_services.py',
'core/domain/translation_services_test.py',
'core/domain/user_domain.py',
'core/domain/user_domain_test.py',
'core/domain/user_query_services.py',
'core/domain/user_query_services_test.py',
'core/domain/user_services.py',
'core/domain/user_services_test.py',
'core/domain/visualization_registry.py',
'core/domain/visualization_registry_test.py',
'core/domain/voiceover_services.py',
'core/domain/voiceover_services_test.py',
'core/domain/wipeout_service.py',
'core/domain/wipeout_service_test.py',
'core/platform/storage/cloud_storage_emulator.py',
'core/platform/storage/cloud_storage_emulator_test.py',
'core/platform_feature_list.py',
'core/platform_feature_list_test.py',
'core/storage/beam_job/gae_models.py',
'core/storage/beam_job/gae_models_test.py',
'core/storage/blog/gae_models.py',
'core/storage/blog/gae_models_test.py',
'core/storage/storage_models_test.py',
'core/tests/build_sources/extensions/CodeRepl.py',
'core/tests/build_sources/extensions/DragAndDropSortInput.py',
'core/tests/build_sources/extensions/base.py',
'core/tests/build_sources/extensions/base_test.py',
'core/tests/build_sources/extensions/models_test.py',
'core/tests/data/failing_tests.py',
'core/tests/data/image_constants.py',
'core/tests/data/unicode_and_str_handler.py',
'core/tests/gae_suite.py',
'core/tests/gae_suite_test.py',
'core/tests/load_tests/feedback_thread_summaries_test.py',
'core/tests/test_utils.py',
'core/tests/test_utils_test.py',
'core/jobs',
'core/python_utils.py',
'core/python_utils_test.py',
'extensions/',
'scripts/'
]
CONFIG_FILE_PATH = os.path.join('.', 'mypy.ini')
MYPY_REQUIREMENTS_FILE_PATH = os.path.join('.', 'mypy_requirements.txt')
MYPY_TOOLS_DIR = os.path.join(os.getcwd(), 'third_party', 'python3_libs')
PYTHON3_CMD = 'python3'
_PATHS_TO_INSERT = [MYPY_TOOLS_DIR, ]
_PARSER = argparse.ArgumentParser(
description='Python type checking using mypy script.'
)
_PARSER.add_argument(
'--skip-install',
help='If passed, skips installing dependencies.'
' By default, they are installed.',
action='store_true')
_PARSER.add_argument(
'--install-globally',
help='optional; if specified, installs mypy and its requirements globally.'
' By default, they are installed to %s' % MYPY_TOOLS_DIR,
action='store_true')
_PARSER.add_argument(
'--files',
help='Files to type-check',
action='store',
nargs='+'
)
def install_third_party_libraries(skip_install: bool) -> None:
"""Run the installation script.
Args:
skip_install: bool. Whether to skip running the installation script.
"""
if not skip_install:
install_third_party_libs.main()
def get_mypy_cmd(files, mypy_exec_path, using_global_mypy):
"""Return the appropriate command to be run.
Args:
files: list(list(str)). List having first element as list of string.
mypy_exec_path: str. Path of mypy executable.
using_global_mypy: bool. Whether generated command should run using
global mypy.
Returns:
list(str). List of command line arguments.
"""
if using_global_mypy:
mypy_cmd = 'mypy'
else:
mypy_cmd = mypy_exec_path
if files:
cmd = [mypy_cmd, '--config-file', CONFIG_FILE_PATH] + files
else:
excluded_files_regex = (
'|'.join(NOT_FULLY_COVERED_FILES + EXCLUDED_DIRECTORIES))
cmd = [
mypy_cmd, '--exclude', excluded_files_regex,
'--config-file', CONFIG_FILE_PATH, '.'
]
return cmd
def install_mypy_prerequisites(install_globally):
"""Install mypy and type stubs from mypy_requirements.txt.
Args:
install_globally: bool. Whether mypy and its requirements are to be
installed globally.
Returns:
tuple(int, str). The return code from installing prerequisites and the
path of the mypy executable.
"""
# TODO(#13398): Change MyPy installation after Python3 migration. Now, we
# install packages globally for CI. In CI, pip installation is not in a way
# we expect.
if install_globally:
cmd = [
PYTHON3_CMD, '-m', 'pip', 'install', '-r',
MYPY_REQUIREMENTS_FILE_PATH
]
else:
cmd = [
PYTHON3_CMD, '-m', 'pip', 'install', '-r',
MYPY_REQUIREMENTS_FILE_PATH, '--target', MYPY_TOOLS_DIR,
'--upgrade'
]
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = process.communicate()
if b'can\'t combine user with prefix' in output[1]:
uextention_text = ['--user', '--prefix=', '--system']
new_process = subprocess.Popen(
cmd + uextention_text, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
new_process.communicate()
_PATHS_TO_INSERT.append(os.path.join(site.USER_BASE, 'bin'))
mypy_exec_path = os.path.join(site.USER_BASE, 'bin', 'mypy')
return (new_process.returncode, mypy_exec_path)
else:
_PATHS_TO_INSERT.append(os.path.join(MYPY_TOOLS_DIR, 'bin'))
mypy_exec_path = os.path.join(MYPY_TOOLS_DIR, 'bin', 'mypy')
return (process.returncode, mypy_exec_path)
def main(args=None):
"""Runs the MyPy type checks."""
parsed_args = _PARSER.parse_args(args=args)
for directory in common.DIRS_TO_ADD_TO_SYS_PATH:
# The directories should only be inserted starting at index 1. See
# https://stackoverflow.com/a/10095099 and
# https://stackoverflow.com/q/10095037 for more details.
sys.path.insert(1, directory)
install_third_party_libraries(parsed_args.skip_install)
common.fix_third_party_imports()
print('Installing Mypy and stubs for third party libraries.')
return_code, mypy_exec_path = install_mypy_prerequisites(
parsed_args.install_globally)
if return_code != 0:
print('Cannot install Mypy and stubs for third party libraries.')
sys.exit(1)
print('Installed Mypy and stubs for third party libraries.')
print('Starting Mypy type checks.')
cmd = get_mypy_cmd(
parsed_args.files, mypy_exec_path, parsed_args.install_globally)
env = os.environ.copy()
for path in _PATHS_TO_INSERT:
env['PATH'] = '%s%s' % (path, os.pathsep) + env['PATH']
env['PYTHONPATH'] = MYPY_TOOLS_DIR
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
stdout, stderr = process.communicate()
# Standard and error output is in bytes, we need to decode the line to
# print it.
print(stdout.decode('utf-8'))
print(stderr.decode('utf-8'))
if process.returncode == 0:
print('Mypy type checks successful.')
else:
print(
'Mypy type checks unsuccessful. Please fix the errors. '
'For more information, visit: '
'https://github.com/oppia/oppia/wiki/Backend-Type-Annotations')
sys.exit(2)
return process.returncode
if __name__ == '__main__': # pragma: no cover
main()
| 36.974425
| 79
| 0.709552
|
07421a3f45b5dd7b6bd661a8ee8367ea83f21090
| 302
|
py
|
Python
|
src/python/kornia/losses/__init__.py
|
pjessesco/DeProCams
|
be89e8aacbefe988225ca4969902803863a29dc4
|
[
"CC-BY-3.0"
] | 10
|
2021-01-26T05:25:01.000Z
|
2022-02-08T06:10:41.000Z
|
kornia/losses/__init__.py
|
sounakdey/kornia
|
6a0df6dee7b213572ff3441bb6eb0e07a23f0ef3
|
[
"Apache-2.0"
] | 3
|
2021-05-03T10:34:15.000Z
|
2022-02-17T04:25:26.000Z
|
kornia/losses/__init__.py
|
sounakdey/kornia
|
6a0df6dee7b213572ff3441bb6eb0e07a23f0ef3
|
[
"Apache-2.0"
] | 4
|
2021-04-30T01:51:38.000Z
|
2022-01-27T05:06:04.000Z
|
from .ssim import SSIM, ssim
from .dice import DiceLoss, dice_loss
from .tversky import TverskyLoss, tversky_loss
from .focal import FocalLoss, focal_loss
from .depth_smooth import (
InverseDepthSmoothnessLoss, inverse_depth_smoothness_loss
)
from .divergence import kl_div_loss_2d, js_div_loss_2d
| 33.555556
| 61
| 0.834437
|
cf18a59751c6ef733e1ca0bd67664abe8420ac59
| 3,392
|
py
|
Python
|
trinity/rpc/modules/admin.py
|
AYCH-Inc/aych.eth.client
|
1c8be83cebffd889c1c98d48605bba741743f31d
|
[
"MIT"
] | null | null | null |
trinity/rpc/modules/admin.py
|
AYCH-Inc/aych.eth.client
|
1c8be83cebffd889c1c98d48605bba741743f31d
|
[
"MIT"
] | null | null | null |
trinity/rpc/modules/admin.py
|
AYCH-Inc/aych.eth.client
|
1c8be83cebffd889c1c98d48605bba741743f31d
|
[
"MIT"
] | null | null | null |
from typing import Tuple, Iterable, Dict
from eth.constants import GENESIS_BLOCK_NUMBER
from eth_typing import BlockNumber
from eth_utils import encode_hex, to_dict
from lahja import EndpointAPI
from p2p.kademlia import Node
from p2p.typing import Capabilities
from p2p.validation import validate_enode_uri
from trinity.chains.base import AsyncChainAPI
from trinity.config import TrinityConfig, Eth1AppConfig, Eth1ChainConfig
from trinity.constants import TO_NETWORKING_BROADCAST_CONFIG
from trinity.protocol.common.events import (
ConnectToNodeCommand,
GetProtocolCapabilitiesRequest
)
from trinity.rpc.modules import Eth1ChainRPCModule
from trinity.rpc.typing import RpcProtocolResponse, RpcNodeInfoResponse
from trinity.server import BOUND_IP
from trinity._utils.version import construct_trinity_client_identifier
def format_enode(config: TrinityConfig) -> str:
return f"enode://{config.nodekey.public_key.to_hex()[2:]}@{BOUND_IP}:{config.port}"
@to_dict
def generate_chain_config(chain_config: Eth1ChainConfig) -> Iterable[Tuple[str, int]]:
for fork_block, vm in chain_config.vm_configuration:
yield f"{vm.fork}Block", fork_block
yield 'chainId', chain_config.chain_id
class Admin(Eth1ChainRPCModule):
def __init__(self,
chain: AsyncChainAPI,
event_bus: EndpointAPI,
trinity_config: TrinityConfig) -> None:
super().__init__(chain, event_bus)
self.trinity_config = trinity_config
async def addPeer(self, uri: str) -> None:
validate_enode_uri(uri, require_ip=True)
await self.event_bus.broadcast(
ConnectToNodeCommand(Node.from_uri(uri)),
TO_NETWORKING_BROADCAST_CONFIG
)
async def nodeInfo(self) -> RpcNodeInfoResponse:
response = await self.event_bus.request(
GetProtocolCapabilitiesRequest(),
TO_NETWORKING_BROADCAST_CONFIG
)
return {
'enode': format_enode(self.trinity_config),
# TODO: get the external ip from the upnp service
'ip': "::",
'listenAddr': f"[::]:{self.trinity_config.port}",
'name': construct_trinity_client_identifier(),
'ports': {
'discovery': self.trinity_config.port,
'listener': self.trinity_config.port
},
'protocols': await self._generate_protocol_info(response.capabilities)
}
async def _generate_protocol_info(
self,
protocols: Capabilities) -> Dict[str, RpcProtocolResponse]:
head = await self.chain.coro_get_canonical_head()
total_difficulty = await self.chain.coro_get_score(head.hash)
genesis_header = await self.chain.coro_get_canonical_block_header_by_number(
BlockNumber(GENESIS_BLOCK_NUMBER)
)
chain_config = self.trinity_config.get_app_config(Eth1AppConfig).get_chain_config()
return {
protocol: {
'version': f'{protocol}/{version}',
'difficulty': total_difficulty,
'genesis': encode_hex(genesis_header.hash),
'head': encode_hex(head.hash),
'network': self.trinity_config.network_id,
'config': generate_chain_config(chain_config)
}
for protocol, version in protocols
}
| 36.085106
| 91
| 0.67954
|
73c5cd0de310dc0f8da4b9780ed6318fc5d92c4e
| 688
|
py
|
Python
|
ballons.py
|
yacinedev19/fall-18-m1
|
359996788e1bafcc2dfe8a3b817a65d9184db100
|
[
"MIT"
] | null | null | null |
ballons.py
|
yacinedev19/fall-18-m1
|
359996788e1bafcc2dfe8a3b817a65d9184db100
|
[
"MIT"
] | null | null | null |
ballons.py
|
yacinedev19/fall-18-m1
|
359996788e1bafcc2dfe8a3b817a65d9184db100
|
[
"MIT"
] | null | null | null |
import turtle
# This code doesn't work!
# The indentation is broken.
# Fix it!
def balloon(t, color):
t.speed(0)
t.color(color)
# Draw balloon body.
for side in range(30):
t.forward(10)
t.left(12)
# Draw balloon knot.
t.right(60)
for side in range(3):
t.forward(10)
t.right(120)
# Draw balloon string.
t.color("gray")
t.right(30)
t.forward(100)
t = turtle.Turtle()
t.penup()
t.back(100)
t.pendown()
balloon(t, "red")
t.penup()
t.home()
t.pendown()
balloon(t, "blue")
t.penup()
t.home()
t.forward(100)
t.pendown()
balloon(t, "purple")
t.hideturtle()
| 14.333333
| 29
| 0.543605
|
05de8272e3b4b636e22ac85e0dbc1572b8770bbc
| 783
|
py
|
Python
|
binance_d/model/longshortpositions.py
|
vinayakpathak/Binance_Futures_python
|
e4c4071f6776af162ae8a87690b7d92a87608a94
|
[
"MIT"
] | 640
|
2020-01-16T05:00:13.000Z
|
2022-03-30T08:40:26.000Z
|
binance_d/model/longshortpositions.py
|
vinayakpathak/Binance_Futures_python
|
e4c4071f6776af162ae8a87690b7d92a87608a94
|
[
"MIT"
] | 140
|
2020-01-19T20:27:35.000Z
|
2022-03-28T08:28:43.000Z
|
binance_d/model/longshortpositions.py
|
vinayakpathak/Binance_Futures_python
|
e4c4071f6776af162ae8a87690b7d92a87608a94
|
[
"MIT"
] | 391
|
2020-01-15T07:12:26.000Z
|
2022-03-31T14:24:19.000Z
|
class LongShortPositions:
def __init__(self):
self.pair = ""
self.longShortRatio = 0.0 # long/short account num ratio of top traders
self.longPosition = 0.0 # long account num ratio of top traders
self.shortPosition = 0.0 # short account num ratio of top traders
self.timestamp = 0
@staticmethod
def json_parse(json_data):
result = LongShortPositions()
result.pair = json_data.get_string("pair")
result.longShortRatio = json_data.get_float("longShortRatio")
result.longPosition = json_data.get_float("longPosition")
result.shortPosition = json_data.get_float("shortPosition")
result.timestamp = json_data.get_int("timestamp")
return result
| 35.590909
| 82
| 0.650064
|
6ef7f42ab5dcdfaff6055ae9e8841a876de0b3ca
| 11,901
|
py
|
Python
|
DexiNed-TF2/dataset_manager.py
|
ranajit1996/DexiNed
|
639f5d724e5d028b106afbf112af9cea7245a261
|
[
"MIT"
] | 1
|
2020-08-07T12:07:33.000Z
|
2020-08-07T12:07:33.000Z
|
DexiNed-TF2/dataset_manager.py
|
ranajit1996/DexiNed
|
639f5d724e5d028b106afbf112af9cea7245a261
|
[
"MIT"
] | null | null | null |
DexiNed-TF2/dataset_manager.py
|
ranajit1996/DexiNed
|
639f5d724e5d028b106afbf112af9cea7245a261
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import numpy as np
import h5py, os
import random
import cv2 as cv
AUTOTUNE = tf.data.experimental.AUTOTUNE
BUFFER_SIZE = 1024
img_shape =None
class DataLoader(tf.keras.utils.Sequence):
def __init__(self,data_name,arg=None, is_val=False):
self.is_training = True if arg.model_state.lower() == 'train' else False
self.dim_w = arg.image_width if self.is_training else arg.test_img_width
self.dim_h = arg.image_height if self.is_training else arg.test_img_height
self.args = arg
self.base_dir = arg.train_dir if arg.model_state.lower()=='train' else arg.test_dir
self.is_val = is_val
self.data_name =data_name
self.bs = arg.batch_size if self.is_training else arg.test_bs
self.shuffle=self.is_training
if not self.is_training and arg.model_state=="test":
i_width = self.dim_w if self.dim_w%16==0 else (self.dim_w//16+1)*16
i_height= self.dim_h if self.dim_h%16==0 else (self.dim_h//16+1)*16
self.input_shape = (None,i_height, i_width,3)
self.dim_w = i_width
self.dim_h = i_height
self.imgs_shape = []
# OMSIV real size= 320,580,3
self.data_list = self._build_index()
self.on_epoch_end()
def _build_index(self):
# base_dir = os.path.join(self.base_dir, self.args.model_state.lower())
list_name= self.args.train_list if self.is_training else self.args.test_list
if not self.data_name.lower()=='classic':
file_path = os.path.join(self.base_dir, list_name)
with open(file_path,'r') as f:
file_list = f.readlines()
file_list = [line.strip() for line in file_list] # to clean the '\n'
file_list = [line.split(' ') for line in file_list] # separate paths
if self.data_name.lower() in ['biped','mbiped']:
m_mode = 'train' if self.is_training else 'test'
input_path = [os.path.join(
self.base_dir,'imgs',m_mode,line[0]) for line in file_list]
gt_path = [os.path.join(
self.base_dir,'edge_maps',m_mode,line[1]) for line in file_list]
elif self.data_name.lower()=='classic':
file_list = os.listdir(self.base_dir)
input_path = [os.path.join(self.base_dir,line) for line in file_list]
gt_path = None
else:
input_path = [os.path.join(self.base_dir, line[0]) for line in file_list]
gt_path = [os.path.join(self.base_dir, line[1]) for line in file_list]
# split training and validation, val=10%
if self.is_training and self.is_val:
input_path = input_path[int(0.9 * len(input_path)):]
gt_path = gt_path[int(0.9 * len(gt_path)):]
elif self.is_training:
input_path = input_path[:int(0.9 * len(input_path))]
gt_path = gt_path[:int(0.9 * len(gt_path))]
if not self.is_training:
self.imgs_name = [os.path.basename(k) for k in input_path]
for tmp_path in input_path:
tmp_i = cv.imread(tmp_path)
tmp_shape = tmp_i.shape[:2]
self.imgs_shape.append(tmp_shape)
sample_indeces= [input_path, gt_path]
return sample_indeces
def on_epoch_end(self):
self.indices = np.arange(len(self.data_list[0]))
if self.shuffle:
np.random.shuffle(self.indices)
def __len__(self):
return len(self.indices)//self.bs
def __getitem__(self, index):
indices = self.indices[index*self.bs:(index+1)*self.bs]
if not self.data_name.lower()=='classic':
x_list,y_list = self.data_list
tmp_x_path = [x_list[k] for k in indices]
tmp_y_path = [y_list[k] for k in indices]
x,y = self.__data_generation(tmp_x_path,tmp_y_path)
else:
x_list, _ = self.data_list
tmp_x_path = [x_list[k] for k in indices]
x, y = self.__data_generation(tmp_x_path, None)
return x,y
def __data_generation(self,x_path,y_path):
if self.args.scale is not None and self.args.model_state.lower()!='train':
scl= self.args.scale
scl_h = int(self.dim_h*scl) if (self.dim_h*scl)%16==0 else \
int(((self.dim_h*scl) // 16 + 1) * 16)
scl_w = int(self.dim_w * scl) if (self.dim_w * scl) % 16 == 0 else \
int(((self.dim_h * scl) // 16 + 1) * 16)
x = np.empty((self.bs, scl_h, scl_w, 3), dtype="float32")
else:
x = np.empty((self.bs, self.dim_h, self.dim_w, 3), dtype="float32")
y = np.empty((self.bs, self.dim_h, self.dim_w, 1), dtype="float32")
for i,tmp_data in enumerate(x_path):
tmp_x_path = tmp_data
tmp_y_path = y_path[i] if not self.data_name.lower()=='classic' else None
tmp_x,tmp_y = self.transformer(tmp_x_path,tmp_y_path)
x[i,]=tmp_x
y[i,]=tmp_y
return x,y
def transformer(self, x_path, y_path):
tmp_x = cv.imread(x_path)
if y_path is not None:
tmp_y = cv.imread(y_path,cv.IMREAD_GRAYSCALE)
else:
tmp_y=None
h,w,_ = tmp_x.shape
if self.args.model_state == "train":
if self.args.crop_img:
i_h = random.randint(0,h-self.dim_h)
i_w = random.randint(0,w-self.dim_w)
tmp_x = tmp_x[i_h:i_h+self.dim_h,i_w:i_w+self.dim_w,]
tmp_y = tmp_y[i_h:i_h+self.dim_h,i_w:i_w+self.dim_w,]
else:
tmp_x = cv.resize(tmp_x,(self.dim_w,self.dim_h))
tmp_y = cv.resize(tmp_y,(self.dim_w,self.dim_h))
else:
if self.dim_w!=w and self.dim_h!=h:
tmp_x = cv.resize(tmp_x, (self.dim_w, self.dim_h))
if self.args.scale is not None:
scl = self.args.scale
scl_h = int(self.dim_h * scl) if (self.dim_h * scl) % 16 == 0 else \
int(((self.dim_h * scl) // 16 + 1) * 16)
scl_w = int(self.dim_w * scl) if (self.dim_w * scl) % 16 == 0 else \
int(((self.dim_h * scl) // 16 + 1) * 16)
tmp_x = cv.resize(tmp_x,dsize=(scl_w,scl_h))
if tmp_y is not None:
tmp_y = cv.resize(tmp_y, (self.dim_w, self.dim_h))
if tmp_y is not None:
tmp_y = np.expand_dims(np.float32(tmp_y)/255.,axis=-1)
tmp_x = np.float32(tmp_x)
return tmp_x, tmp_y
# def __read_h5(self,file_path):
#
# with h5py.File(file_path,'r') as h5f:
# # n_var = len(list(h5f.keys()))
# data = np.array(h5f.get('data'))
# return data
def dataset_info(dataset_name, is_linux=False):
if is_linux:
config = {
'BSDS': {'img_height':400,# 321
'img_width':400,#481
'test_list': 'test_pair.lst',
'data_dir': '/opt/dataset/BSDS', # mean_rgb
'yita': 0.5},
'BSDS300': {'img_height': 321,
'img_width': 481,
'test_list': 'test_pair.lst',
'data_dir': '/opt/dataset/BSDS300', # NIR
'yita': 0.5},
'PASCAL': {'img_height':375,
'img_width':500,
'test_list': 'test_pair.lst',
'data_dir': '/opt/dataset/PASCAL', # mean_rgb
'yita': 0.3},
'CID': {'img_height':512,
'img_width':512,
'test_list': 'test_pair.lst',
'data_dir': '/opt/dataset/CID', # mean_rgb
'yita': 0.3},
'NYUD': {'img_height':425,
'img_width':560,
'test_list': 'test_pair.lst',
'data_dir': '/opt/dataset/NYUD', # mean_rgb
'yita': 0.5},
'MULTICUE': {'img_height':720,
'img_width':1280,
'test_list': 'test_pair.lst',
'data_dir': '/opt/dataset/MULTICUE', # mean_rgb
'yita': 0.3},
'BIPED': {'img_height': 720,
'img_width': 1280,
'test_list': 'test_rgb.lst',
'train_list': 'train_rgb.lst',
'data_dir': '/opt/dataset/BIPED/edges', # WIN: '../.../dataset/BIPED/edges'
'yita': 0.5},
'MBIPED': {'img_height': 720,
'img_width': 1280,
'test_list': 'test_rgbn.lst',
'train_list': 'train_rgbn.lst',
'data_dir': '/opt/dataset/BIPED/edges', # WIN: '../.../dataset/BIPED/edges'
'yita': 0.5},
'CLASSIC': {'img_height':512,
'img_width': 512,
'test_list': None,
'data_dir': 'data', # mean_rgb
'yita': 0.5},
'DCD': {'img_height': 336,# 240
'img_width': 448,#360
'test_list':'test_pair.lst',
'data_dir': '/opt/dataset/DCD', # mean_rgb
'yita': 0.2}
}
data_info = config[dataset_name]
return data_info
else:
config = {
'BSDS': {'img_height': 512,#321
'img_width': 512,#481
'test_list': 'test_pair.lst',
'data_dir': '../../dataset/BSDS', # mean_rgb
'yita': 0.5},
'BSDS300': {'img_height': 512,#321
'img_width': 512,#481
'test_list': 'test_pair.lst',
'data_dir': '../../dataset/BSDS300', # NIR
'yita': 0.5},
'PASCAL': {'img_height': 375,
'img_width': 500,
'test_list': 'test_pair.lst',
'data_dir': '/opt/dataset/PASCAL', # mean_rgb
'yita': 0.3},
'CID': {'img_height': 512,
'img_width': 512,
'test_list': 'test_pair.lst',
'data_dir': '../../dataset/CID', # mean_rgb
'yita': 0.3},
'NYUD': {'img_height': 425,
'img_width': 560,
'test_list': 'test_pair.lst',
'data_dir': '/opt/dataset/NYUD', # mean_rgb
'yita': 0.5},
'MULTICUE': {'img_height': 720,
'img_width': 1280,
'test_list': 'test_pair.lst',
'data_dir': '../../dataset/MULTICUE', # mean_rgb
'yita': 0.3},
'BIPED': {'img_height': 720,#720
'img_width': 1280,#1280
'test_list': 'test_rgb.lst',
'train_list': 'train_rgb.lst',
'data_dir': '../../dataset/BIPED/edges', # WIN: '../.../dataset/BIPED/edges'
'yita': 0.5},
'CLASSIC': {'img_height': 512,
'img_width': 512,
'test_list': None,
'train_list': None,
'data_dir': 'data', # mean_rgb
'yita': 0.5},
'DCD': {'img_height': 240,
'img_width': 360,
'test_list': 'test_pair.lst',
'data_dir': '/opt/dataset/DCD', # mean_rgb
'yita': 0.2}
}
data_info = config[dataset_name]
return data_info
| 42.655914
| 99
| 0.493152
|
216e0771a81a9167c2a06a65ffab0fd9838f851d
| 7,602
|
py
|
Python
|
integration/sawtooth_integration/tests/test_events_and_receipts.py
|
mealchain/beta
|
7dc1a1aea175bfb3f1008939f098a1d58bb455a6
|
[
"Apache-2.0"
] | null | null | null |
integration/sawtooth_integration/tests/test_events_and_receipts.py
|
mealchain/beta
|
7dc1a1aea175bfb3f1008939f098a1d58bb455a6
|
[
"Apache-2.0"
] | null | null | null |
integration/sawtooth_integration/tests/test_events_and_receipts.py
|
mealchain/beta
|
7dc1a1aea175bfb3f1008939f098a1d58bb455a6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import unittest
import logging
import json
import urllib.request
import urllib.error
import base64
import sys
import cbor
from sawtooth_intkey.intkey_message_factory import IntkeyMessageFactory
from sawtooth_intkey.processor.handler import make_intkey_address
from sawtooth_sdk.messaging.stream import Stream
from sawtooth_sdk.protobuf import events_pb2
from sawtooth_sdk.protobuf import validator_pb2
from sawtooth_sdk.protobuf import batch_pb2
from sawtooth_sdk.protobuf import txn_receipt_pb2
from sawtooth_sdk.protobuf import state_delta_pb2
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
WAIT = 300
class TestEventsAndReceipts(unittest.TestCase):
def test_subscribe_and_unsubscribe(self):
"""Tests that a client can subscribe and unsubscribe from events."""
response = self._subscribe()
self.assert_subscribe_response(response)
response = self._unsubscribe()
self.assert_unsubscribe_response(response)
def test_block_commit_event_received(self):
"""Tests that block commit events are properly received on block
boundaries."""
self._subscribe()
for i in range(1, 5):
self.batch_submitter.submit_next_batch()
msg = self.stream.receive().result()
self.assertEqual(
msg.message_type,
validator_pb2.Message.CLIENT_EVENTS)
event_list = events_pb2.EventList()
event_list.ParseFromString(msg.content)
events = event_list.events
self.assertEqual(len(events), 1)
self.assert_block_commit_event(events[0], i)
self._unsubscribe()
def test_receipt_stored(self):
"""Tests that receipts are stored successfully when a block is
committed."""
self._subscribe()
n = self.batch_submitter.submit_next_batch()
response = self._get_receipt(n)
receipts = self.assert_receipt_get_response(response)
state_change = receipts[0].state_changes[0]
self.assertEqual(
state_change.type,
state_delta_pb2.StateChange.SET)
self.assertEqual(
state_change.value,
cbor.dumps({str(n): 0}))
self.assertEqual(
state_change.address,
make_intkey_address(str(n)))
self._unsubscribe()
@classmethod
def setUpClass(cls):
cls.batch_submitter = BatchSubmitter(WAIT)
def setUp(self):
self.url = "tcp://validator:4004"
self.stream = Stream(self.url)
def tearDown(self):
if self.stream is not None:
self.stream.close()
def _get_receipt(self, n):
txn_id = \
self.batch_submitter.batches[n].transactions[0].header_signature
request = txn_receipt_pb2.ClientReceiptGetRequest(
transaction_ids=[txn_id])
response = self.stream.send(
validator_pb2.Message.CLIENT_RECEIPT_GET_REQUEST,
request.SerializeToString()).result()
return response
def _subscribe(self, subscriptions=None):
if subscriptions is None:
subscriptions = [
events_pb2.EventSubscription(event_type="block_commit"),
]
request = events_pb2.ClientEventsSubscribeRequest(
subscriptions=subscriptions)
response = self.stream.send(
validator_pb2.Message.CLIENT_EVENTS_SUBSCRIBE_REQUEST,
request.SerializeToString()).result()
return response
def _unsubscribe(self):
request = events_pb2.ClientEventsUnsubscribeRequest()
response = self.stream.send(
validator_pb2.Message.CLIENT_EVENTS_UNSUBSCRIBE_REQUEST,
request.SerializeToString()).result()
return response
def assert_block_commit_event(self, event, block_num):
self.assertEqual(event.event_type, "block_commit")
self.assertTrue(all([
any(attribute.key == "block_id" for attribute in event.attributes),
any(attribute.key == "block_num"
for attribute in event.attributes),
any(attribute.key == "previous_block_id"
for attribute in event.attributes),
any(attribute.key == "state_root_hash"
for attribute in event.attributes),
]))
for attribute in event.attributes:
if attribute.key == "block_num":
self.assertEqual(attribute.value, str(block_num))
def assert_receipt_get_response(self, msg):
self.assertEqual(
msg.message_type,
validator_pb2.Message.CLIENT_RECEIPT_GET_RESPONSE)
receipt_response = txn_receipt_pb2.ClientReceiptGetResponse()
receipt_response.ParseFromString(msg.content)
self.assertEqual(
receipt_response.status,
txn_receipt_pb2.ClientReceiptGetResponse.OK)
return receipt_response.receipts
def assert_subscribe_response(self, msg):
self.assertEqual(
msg.message_type,
validator_pb2.Message.CLIENT_EVENTS_SUBSCRIBE_RESPONSE)
subscription_response = events_pb2.ClientEventsSubscribeResponse()
subscription_response.ParseFromString(msg.content)
self.assertEqual(
subscription_response.status,
events_pb2.ClientEventsSubscribeResponse.OK)
def assert_unsubscribe_response(self, msg):
self.assertEqual(
msg.message_type,
validator_pb2.Message.CLIENT_EVENTS_UNSUBSCRIBE_RESPONSE)
subscription_response = events_pb2.ClientEventsUnsubscribeResponse()
subscription_response.ParseFromString(msg.content)
self.assertEqual(
subscription_response.status,
events_pb2.ClientEventsUnsubscribeResponse.OK)
class BatchSubmitter:
def __init__(self, timeout):
self.batches = []
self.imf = IntkeyMessageFactory()
self.timeout = timeout
def _post_batch(self, batch):
headers = {'Content-Type': 'application/octet-stream'}
response = self._query_rest_api(
'/batches?wait={}'.format(self.timeout),
data=batch,
headers=headers)
return response
def _query_rest_api(self, suffix='', data=None, headers={}):
url = 'http://rest-api:8080' + suffix
request = urllib.request.Request(url, data, headers)
response = urllib.request.urlopen(request).read().decode('utf-8')
return json.loads(response)
def make_batch(self, n):
return self.imf.create_batch([('set', str(n), 0)])
def submit_next_batch(self):
batch_list_bytes = self.make_batch(len(self.batches))
batch_list = batch_pb2.BatchList()
batch_list.ParseFromString(batch_list_bytes)
self.batches.append(batch_list.batches[0])
self._post_batch(batch_list_bytes)
return len(self.batches) - 1
| 35.858491
| 80
| 0.667587
|
cffecd9bde2b6a07072f2bf860f1c38607703206
| 1,961
|
py
|
Python
|
contrib/opencensus-ext-prometheus/setup.py
|
Flared/opencensus-python
|
e2535e688a50c7a06be8af93ca3b987d387da605
|
[
"Apache-2.0"
] | null | null | null |
contrib/opencensus-ext-prometheus/setup.py
|
Flared/opencensus-python
|
e2535e688a50c7a06be8af93ca3b987d387da605
|
[
"Apache-2.0"
] | null | null | null |
contrib/opencensus-ext-prometheus/setup.py
|
Flared/opencensus-python
|
e2535e688a50c7a06be8af93ca3b987d387da605
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import find_packages, setup
from version import __version__
setup(
name='opencensus-ext-prometheus',
version=__version__, # noqa
author='OpenCensus Authors',
author_email='census-developers@googlegroups.com',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description='OpenCensus Prometheus Exporter',
include_package_data=True,
long_description=open('README.rst').read(),
install_requires=[
'opencensus >= 0.7.13, < 1.0.0',
'prometheus_client >= 0.5.0, < 1.0.0',
],
extras_require={},
license='Apache-2.0',
packages=find_packages(exclude=('examples', 'tests',)),
namespace_packages=[],
url='https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-prometheus', # noqa: E501
zip_safe=False,
)
| 37.711538
| 130
| 0.675166
|
e5dc2bd28369399cb49299ec4da7b53bd42ad3f4
| 129,460
|
py
|
Python
|
salt/utils/parsers.py
|
johnj/salt
|
b23656fa5ee24047c43ac702d6796a700570f749
|
[
"Apache-2.0"
] | 5
|
2017-02-07T05:39:29.000Z
|
2020-06-13T02:07:33.000Z
|
salt/utils/parsers.py
|
johnj/salt
|
b23656fa5ee24047c43ac702d6796a700570f749
|
[
"Apache-2.0"
] | 86
|
2017-01-27T11:54:46.000Z
|
2020-05-20T06:25:26.000Z
|
salt/utils/parsers.py
|
johnj/salt
|
b23656fa5ee24047c43ac702d6796a700570f749
|
[
"Apache-2.0"
] | 11
|
2017-01-26T19:36:29.000Z
|
2021-12-11T07:54:16.000Z
|
# -*- coding: utf-8 -*-
'''
:codeauthor: Pedro Algarvio (pedro@algarvio.me)
salt.utils.parsers
~~~~~~~~~~~~~~~~~~
This is where all the black magic happens on all of salt's CLI tools.
'''
# pylint: disable=missing-docstring,protected-access,too-many-ancestors,too-few-public-methods
# pylint: disable=attribute-defined-outside-init,no-self-use
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
import types
import signal
import getpass
import logging
import optparse
import traceback
from functools import partial
# Import salt libs
import salt.config as config
import salt.defaults.exitcodes
import salt.log.setup as log
import salt.syspaths as syspaths
import salt.version as version
import salt.utils.args
import salt.utils.data
import salt.utils.files
import salt.utils.jid
import salt.utils.platform
import salt.utils.process
import salt.utils.stringutils
import salt.utils.user
import salt.utils.win_functions
import salt.utils.xdg
import salt.utils.yaml
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.utils.validate.path import is_writeable
from salt.utils.verify import verify_log_files
import salt.exceptions
from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
logger = logging.getLogger(__name__)
def _sorted(mixins_or_funcs):
return sorted(
mixins_or_funcs, key=lambda mf: getattr(mf, '_mixin_prio_', 1000)
)
class MixinFuncsContainer(list):
def append(self, func):
if isinstance(func, types.MethodType):
# We only care about unbound methods
func = func.__func__
if func not in self:
# And no duplicates please
list.append(self, func)
class MixInMeta(type):
# This attribute here won't actually do anything. But, if you need to
# specify an order or a dependency within the mix-ins, please define the
# attribute on your own MixIn
_mixin_prio_ = 0
def __new__(mcs, name, bases, attrs):
instance = super(MixInMeta, mcs).__new__(mcs, name, bases, attrs)
if not hasattr(instance, '_mixin_setup'):
raise RuntimeError(
"Don't subclass {0} in {1} if you're not going "
"to use it as a salt parser mix-in.".format(mcs.__name__, name)
)
return instance
class OptionParserMeta(MixInMeta):
def __new__(mcs, name, bases, attrs):
instance = super(OptionParserMeta, mcs).__new__(mcs,
name,
bases,
attrs)
if not hasattr(instance, '_mixin_setup_funcs'):
instance._mixin_setup_funcs = MixinFuncsContainer()
if not hasattr(instance, '_mixin_process_funcs'):
instance._mixin_process_funcs = MixinFuncsContainer()
if not hasattr(instance, '_mixin_after_parsed_funcs'):
instance._mixin_after_parsed_funcs = MixinFuncsContainer()
if not hasattr(instance, '_mixin_before_exit_funcs'):
instance._mixin_before_exit_funcs = MixinFuncsContainer()
for base in _sorted(bases + (instance,)):
func = getattr(base, '_mixin_setup', None)
if func is not None and func not in instance._mixin_setup_funcs:
instance._mixin_setup_funcs.append(func)
func = getattr(base, '_mixin_after_parsed', None)
if func is not None and func not in \
instance._mixin_after_parsed_funcs:
instance._mixin_after_parsed_funcs.append(func)
func = getattr(base, '_mixin_before_exit', None)
if func is not None and func not in \
instance._mixin_before_exit_funcs:
instance._mixin_before_exit_funcs.append(func)
# Mark process_<opt> functions with the base priority for sorting
for func in dir(base):
if not func.startswith('process_'):
continue
func = getattr(base, func)
if getattr(func, '_mixin_prio_', None) is not None:
# Function already has the attribute set, don't override it
continue
if six.PY2:
func.__func__._mixin_prio_ = getattr(
base, '_mixin_prio_', 1000
)
else:
func._mixin_prio_ = getattr(
base, '_mixin_prio_', 1000
)
return instance
class CustomOption(optparse.Option, object):
def take_action(self, action, dest, *args, **kwargs):
# see https://github.com/python/cpython/blob/master/Lib/optparse.py#L786
self.explicit = True
return optparse.Option.take_action(self, action, dest, *args, **kwargs)
class OptionParser(optparse.OptionParser, object):
VERSION = version.__saltstack_version__.formatted_version
usage = '%prog [options]'
epilog = ('You can find additional help about %prog issuing "man %prog" '
'or on http://docs.saltstack.com')
description = None
# Private attributes
_mixin_prio_ = 100
# Setup multiprocessing logging queue listener
_setup_mp_logging_listener_ = False
def __init__(self, *args, **kwargs):
kwargs.setdefault('version', '%prog {0}'.format(self.VERSION))
kwargs.setdefault('usage', self.usage)
if self.description:
kwargs.setdefault('description', self.description)
if self.epilog:
kwargs.setdefault('epilog', self.epilog)
kwargs.setdefault('option_class', CustomOption)
optparse.OptionParser.__init__(self, *args, **kwargs)
if self.epilog and '%prog' in self.epilog:
self.epilog = self.epilog.replace('%prog', self.get_prog_name())
def add_option_group(self, *args, **kwargs):
option_group = optparse.OptionParser.add_option_group(self, *args, **kwargs)
option_group.option_class = CustomOption
return option_group
def parse_args(self, args=None, values=None):
options, args = optparse.OptionParser.parse_args(self, args, values)
if 'args_stdin' in options.__dict__ and options.args_stdin is True:
# Read additional options and/or arguments from stdin and combine
# them with the options and arguments from the command line.
new_inargs = sys.stdin.readlines()
new_inargs = [arg.rstrip('\r\n') for arg in new_inargs]
new_options, new_args = optparse.OptionParser.parse_args(
self,
new_inargs)
options.__dict__.update(new_options.__dict__)
args.extend(new_args)
if six.PY2:
args = salt.utils.data.decode(args)
if options.versions_report:
self.print_versions_report()
self.options, self.args = options, args
# Let's get some proper sys.stderr logging as soon as possible!!!
# This logging handler will be removed once the proper console or
# logfile logging is setup.
temp_log_level = getattr(self.options, 'log_level', None)
log.setup_temp_logger(
'error' if temp_log_level is None else temp_log_level
)
# Gather and run the process_<option> functions in the proper order
process_option_funcs = []
for option_key in options.__dict__:
process_option_func = getattr(
self, 'process_{0}'.format(option_key), None
)
if process_option_func is not None:
process_option_funcs.append(process_option_func)
for process_option_func in _sorted(process_option_funcs):
try:
process_option_func()
except Exception as err: # pylint: disable=broad-except
logger.exception(err)
self.error(
'Error while processing {0}: {1}'.format(
process_option_func, traceback.format_exc(err)
)
)
# Run the functions on self._mixin_after_parsed_funcs
for mixin_after_parsed_func in self._mixin_after_parsed_funcs: # pylint: disable=no-member
try:
mixin_after_parsed_func(self)
except Exception as err: # pylint: disable=broad-except
logger.exception(err)
self.error(
'Error while processing {0}: {1}'.format(
mixin_after_parsed_func, traceback.format_exc(err)
)
)
if self.config.get('conf_file', None) is not None: # pylint: disable=no-member
logger.debug(
'Configuration file path: %s',
self.config['conf_file'] # pylint: disable=no-member
)
# Retain the standard behavior of optparse to return options and args
return options, args
def _populate_option_list(self, option_list, add_help=True):
optparse.OptionParser._populate_option_list(
self, option_list, add_help=add_help
)
for mixin_setup_func in self._mixin_setup_funcs: # pylint: disable=no-member
mixin_setup_func(self)
def _add_version_option(self):
optparse.OptionParser._add_version_option(self)
self.add_option(
'--versions-report',
'-V',
action='store_true',
help='Show program\'s dependencies version number and exit.'
)
def print_versions_report(self, file=sys.stdout): # pylint: disable=redefined-builtin
print('\n'.join(version.versions_report()), file=file)
self.exit(salt.defaults.exitcodes.EX_OK)
def exit(self, status=0, msg=None):
# Run the functions on self._mixin_after_parsed_funcs
for mixin_before_exit_func in self._mixin_before_exit_funcs: # pylint: disable=no-member
try:
mixin_before_exit_func(self)
except Exception as err: # pylint: disable=broad-except
logger.exception(err)
logger.error('Error while processing %s: %s',
six.text_type(mixin_before_exit_func),
traceback.format_exc(err))
if self._setup_mp_logging_listener_ is True:
# Stop logging through the queue
log.shutdown_multiprocessing_logging()
# Stop the logging queue listener process
log.shutdown_multiprocessing_logging_listener(daemonizing=True)
if isinstance(msg, six.string_types) and msg and msg[-1] != '\n':
msg = '{0}\n'.format(msg)
optparse.OptionParser.exit(self, status, msg)
def error(self, msg):
'''
error(msg : string)
Print a usage message incorporating 'msg' to stderr and exit.
This keeps option parsing exit status uniform for all parsing errors.
'''
self.print_usage(sys.stderr)
self.exit(salt.defaults.exitcodes.EX_USAGE, '{0}: error: {1}\n'.format(self.get_prog_name(), msg))
class MergeConfigMixIn(six.with_metaclass(MixInMeta, object)):
'''
This mix-in will simply merge the CLI-passed options, by overriding the
configuration file loaded settings.
This mix-in should run last.
'''
_mixin_prio_ = six.MAXSIZE
def _mixin_setup(self):
if not hasattr(self, 'setup_config') and not hasattr(self, 'config'):
# No configuration was loaded on this parser.
# There's nothing to do here.
return
# Add an additional function that will merge the shell options with
# the config options and if needed override them
self._mixin_after_parsed_funcs.append(self.__merge_config_with_cli)
def __merge_config_with_cli(self):
# Merge parser options
for option in self.option_list:
if option.dest is None:
# --version does not have dest attribute set for example.
# All options defined by us, even if not explicitly(by kwarg),
# will have the dest attribute set
continue
# Get the passed value from shell. If empty get the default one
default = self.defaults.get(option.dest)
value = getattr(self.options, option.dest, default)
if option.dest not in self.config:
# There's no value in the configuration file
if value is not None:
# There's an actual value, add it to the config
self.config[option.dest] = value
elif value is not None and getattr(option, 'explicit', False):
# Only set the value in the config file IF it was explicitly
# specified by the user, this makes it possible to tweak settings
# on the configuration files bypassing the shell option flags'
# defaults
self.config[option.dest] = value
elif option.dest in self.config:
# Let's update the option value with the one from the
# configuration file. This allows the parsers to make use of
# the updated value by using self.options.<option>
setattr(self.options, option.dest, self.config[option.dest])
# Merge parser group options if any
for group in self.option_groups:
for option in group.option_list:
if option.dest is None:
continue
# Get the passed value from shell. If empty get the default one
default = self.defaults.get(option.dest)
value = getattr(self.options, option.dest, default)
if option.dest not in self.config:
# There's no value in the configuration file
if value is not None:
# There's an actual value, add it to the config
self.config[option.dest] = value
elif value is not None and getattr(option, 'explicit', False):
# Only set the value in the config file IF it was explicitly
# specified by the user, this makes it possible to tweak
# settings on the configuration files bypassing the shell
# option flags' defaults
self.config[option.dest] = value
elif option.dest in self.config:
# Let's update the option value with the one from the
# configuration file. This allows the parsers to make use
# of the updated value by using self.options.<option>
setattr(self.options,
option.dest,
self.config[option.dest])
class SaltfileMixIn(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = -20
def _mixin_setup(self):
self.add_option(
'--saltfile', default=None,
help='Specify the path to a Saltfile. If not passed, one will be '
'searched for in the current working directory.'
)
def process_saltfile(self):
if self.options.saltfile is None:
# No one passed a Saltfile as an option, environment variable!?
self.options.saltfile = os.environ.get('SALT_SALTFILE', None)
if self.options.saltfile is None:
# If we're here, no one passed a Saltfile either to the CLI tool or
# as an environment variable.
# Is there a Saltfile in the current directory?
try: # cwd may not exist if it was removed but salt was run from it
saltfile = os.path.join(os.getcwd(), 'Saltfile')
except OSError:
saltfile = ''
if os.path.isfile(saltfile):
self.options.saltfile = saltfile
else:
saltfile = os.path.join(os.path.expanduser("~"), '.salt', 'Saltfile')
if os.path.isfile(saltfile):
self.options.saltfile = saltfile
else:
saltfile = self.options.saltfile
if not self.options.saltfile:
# There's still no valid Saltfile? No need to continue...
return
if not os.path.isfile(self.options.saltfile):
self.error("'{0}' file does not exist.\n".format(self.options.saltfile))
# Make sure we have an absolute path
self.options.saltfile = os.path.abspath(self.options.saltfile)
# Make sure we let the user know that we will be loading a Saltfile
logger.info("Loading Saltfile from '%s'", six.text_type(self.options.saltfile))
try:
saltfile_config = config._read_conf_file(saltfile)
except salt.exceptions.SaltConfigurationError as error:
self.error(error.message)
self.exit(salt.defaults.exitcodes.EX_GENERIC,
'{0}: error: {1}\n'.format(self.get_prog_name(), error.message))
if not saltfile_config:
# No configuration was loaded from the Saltfile
return
if self.get_prog_name() not in saltfile_config:
# There's no configuration specific to the CLI tool. Stop!
return
# We just want our own configuration
cli_config = saltfile_config[self.get_prog_name()]
# If there are any options, who's names match any key from the loaded
# Saltfile, we need to update its default value
for option in self.option_list:
if option.dest is None:
# --version does not have dest attribute set for example.
continue
if option.dest not in cli_config:
# If we don't have anything in Saltfile for this option, let's
# continue processing right now
continue
# Get the passed value from shell. If empty get the default one
default = self.defaults.get(option.dest)
value = getattr(self.options, option.dest, default)
if value != default:
# The user passed an argument, we won't override it with the
# one from Saltfile, if any
continue
# We reached this far! Set the Saltfile value on the option
setattr(self.options, option.dest, cli_config[option.dest])
option.explicit = True
# Let's also search for options referred in any option groups
for group in self.option_groups:
for option in group.option_list:
if option.dest is None:
continue
if option.dest not in cli_config:
# If we don't have anything in Saltfile for this option,
# let's continue processing right now
continue
# Get the passed value from shell. If empty get the default one
default = self.defaults.get(option.dest)
value = getattr(self.options, option.dest, default)
if value != default:
# The user passed an argument, we won't override it with
# the one from Saltfile, if any
continue
setattr(self.options, option.dest, cli_config[option.dest])
option.explicit = True
# Any left over value in the saltfile can now be safely added
for key in cli_config:
setattr(self.options, key, cli_config[key])
class HardCrashMixin(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = 40
_config_filename_ = None
def _mixin_setup(self):
hard_crash = os.environ.get('SALT_HARD_CRASH', False)
self.add_option(
'--hard-crash', action='store_true', default=hard_crash,
help='Raise any original exception rather than exiting gracefully. Default: %default.'
)
class NoParseMixin(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = 50
def _mixin_setup(self):
no_parse = os.environ.get('SALT_NO_PARSE', '')
self.add_option(
'--no-parse', default=no_parse,
help='Comma-separated list of named CLI arguments (i.e. argname=value) '
'which should not be parsed as Python data types',
metavar='argname1,argname2,...',
)
def process_no_parse(self):
if self.options.no_parse:
try:
self.options.no_parse = \
[x.strip() for x in self.options.no_parse.split(',')]
except AttributeError:
self.options.no_parse = []
else:
self.options.no_parse = []
class ConfigDirMixIn(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = -10
_config_filename_ = None
_default_config_dir_ = syspaths.CONFIG_DIR
_default_config_dir_env_var_ = 'SALT_CONFIG_DIR'
def _mixin_setup(self):
config_dir = os.environ.get(self._default_config_dir_env_var_, None)
if not config_dir:
config_dir = self._default_config_dir_
logger.debug('SYSPATHS setup as: %s', six.text_type(syspaths.CONFIG_DIR))
self.add_option(
'-c', '--config-dir', default=config_dir,
help="Pass in an alternative configuration directory. Default: '%default'."
)
def process_config_dir(self):
self.options.config_dir = os.path.expanduser(self.options.config_dir)
if not os.path.isdir(self.options.config_dir):
# No logging is configured yet
sys.stderr.write(
"WARNING: CONFIG '{0}' directory does not exist.\n".format(
self.options.config_dir
)
)
# Make sure we have an absolute path
self.options.config_dir = os.path.abspath(self.options.config_dir)
if hasattr(self, 'setup_config'):
if not hasattr(self, 'config'):
self.config = {}
try:
self.config.update(self.setup_config())
except (IOError, OSError) as exc:
self.error('Failed to load configuration: {0}'.format(exc))
def get_config_file_path(self, configfile=None):
if configfile is None:
configfile = self._config_filename_
return os.path.join(self.options.config_dir, configfile)
class LogLevelMixIn(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = 10
_default_logging_level_ = 'warning'
_default_logging_logfile_ = None
_logfile_config_setting_name_ = 'log_file'
_loglevel_config_setting_name_ = 'log_level'
_logfile_loglevel_config_setting_name_ = 'log_level_logfile' # pylint: disable=invalid-name
_skip_console_logging_config_ = False
def _mixin_setup(self):
if self._default_logging_logfile_ is None:
# This is an attribute available for programmers, so, raise a
# RuntimeError to let them know about the proper usage.
raise RuntimeError(
'Please set {0}._default_logging_logfile_'.format(
self.__class__.__name__
)
)
group = self.logging_options_group = optparse.OptionGroup(
self, 'Logging Options',
'Logging options which override any settings defined on the '
'configuration files.'
)
self.add_option_group(group)
if not getattr(self, '_skip_console_logging_config_', False):
group.add_option(
'-l', '--log-level',
dest=self._loglevel_config_setting_name_,
choices=list(log.LOG_LEVELS),
help='Console logging log level. One of {0}. Default: \'{1}\'.'.format(
', '.join(["'{}'".format(n) for n in log.SORTED_LEVEL_NAMES]),
self._default_logging_level_
)
)
def _logfile_callback(option, opt, value, parser, *args, **kwargs):
if not os.path.dirname(value):
# if the path is only a file name (no parent directory), assume current directory
value = os.path.join(os.path.curdir, value)
setattr(parser.values, self._logfile_config_setting_name_, value)
group.add_option(
'--log-file',
dest=self._logfile_config_setting_name_,
default=None,
action='callback',
type='string',
callback=_logfile_callback,
help='Log file path. Default: \'{0}\'.'.format(
self._default_logging_logfile_
)
)
group.add_option(
'--log-file-level',
dest=self._logfile_loglevel_config_setting_name_,
choices=list(log.LOG_LEVELS),
help='Logfile logging log level. One of {0}. Default: \'{1}\'.'.format(
', '.join(["'{}'".format(n) for n in log.SORTED_LEVEL_NAMES]),
self._default_logging_level_
)
)
def process_log_level(self):
if not getattr(self.options, self._loglevel_config_setting_name_, None):
# Log level is not set via CLI, checking loaded configuration
if self.config.get(self._loglevel_config_setting_name_, None):
# Is the regular log level setting set?
setattr(self.options,
self._loglevel_config_setting_name_,
self.config.get(self._loglevel_config_setting_name_))
else:
# Nothing is set on the configuration? Let's use the CLI tool
# defined default
setattr(self.options,
self._loglevel_config_setting_name_,
self._default_logging_level_)
# Setup extended logging right before the last step
self._mixin_after_parsed_funcs.append(self.__setup_extended_logging)
# Setup the console and log file configuration before the MP logging
# listener because the MP logging listener may need that config.
self._mixin_after_parsed_funcs.append(self.__setup_logfile_logger_config)
self._mixin_after_parsed_funcs.append(self.__setup_console_logger_config)
# Setup the multiprocessing log queue listener if enabled
self._mixin_after_parsed_funcs.append(self._setup_mp_logging_listener)
# Setup the multiprocessing log queue client if listener is enabled
# and using Windows
self._mixin_after_parsed_funcs.append(self._setup_mp_logging_client)
# Setup the console as the last _mixin_after_parsed_func to run
self._mixin_after_parsed_funcs.append(self.__setup_console_logger)
def process_log_file(self):
if not getattr(self.options, self._logfile_config_setting_name_, None):
# Log file is not set via CLI, checking loaded configuration
if self.config.get(self._logfile_config_setting_name_, None):
# Is the regular log file setting set?
setattr(self.options,
self._logfile_config_setting_name_,
self.config.get(self._logfile_config_setting_name_))
else:
# Nothing is set on the configuration? Let's use the CLI tool
# defined default
setattr(self.options,
self._logfile_config_setting_name_,
self._default_logging_logfile_)
if self._logfile_config_setting_name_ in self.config:
# Remove it from config so it inherits from log_file
self.config.pop(self._logfile_config_setting_name_)
def process_log_level_logfile(self):
if not getattr(self.options, self._logfile_loglevel_config_setting_name_, None):
# Log file level is not set via CLI, checking loaded configuration
if self.config.get(self._logfile_loglevel_config_setting_name_, None):
# Is the regular log file level setting set?
setattr(self.options,
self._logfile_loglevel_config_setting_name_,
self.config.get(self._logfile_loglevel_config_setting_name_))
else:
# Nothing is set on the configuration? Let's use the CLI tool
# defined default
setattr(self.options,
self._logfile_loglevel_config_setting_name_,
# From the console log level config setting
self.config.get(
self._loglevel_config_setting_name_,
self._default_logging_level_
))
if self._logfile_loglevel_config_setting_name_ in self.config:
# Remove it from config so it inherits from log_level_logfile
self.config.pop(self._logfile_loglevel_config_setting_name_)
def __setup_logfile_logger_config(self):
if self._logfile_loglevel_config_setting_name_ in self.config and not \
self.config.get(self._logfile_loglevel_config_setting_name_):
# Remove it from config so it inherits from log_level
self.config.pop(self._logfile_loglevel_config_setting_name_)
loglevel = getattr(self.options,
# From the options setting
self._logfile_loglevel_config_setting_name_,
# From the default setting
self._default_logging_level_
)
logfile = getattr(self.options,
# From the options setting
self._logfile_config_setting_name_,
# From the default setting
self._default_logging_logfile_
)
cli_log_path = 'cli_{0}_log_file'.format(
self.get_prog_name().replace('-', '_')
)
if cli_log_path in self.config and not self.config.get(cli_log_path):
# Remove it from config so it inherits from log_level_logfile
self.config.pop(cli_log_path)
if self._logfile_config_setting_name_ in self.config and not \
self.config.get(self._logfile_config_setting_name_):
# Remove it from config so it inherits from log_file
self.config.pop(self._logfile_config_setting_name_)
if self.config['verify_env'] and self.config['log_level'] not in ('quiet', ):
# Verify the logfile if it was explicitly set but do not try to
# verify the default
if logfile is not None:
# Logfile is not using Syslog, verify
with salt.utils.files.set_umask(0o027):
verify_log_files([logfile], self.config['user'])
if logfile is None:
# Use the default setting if the logfile wasn't explicity set
logfile = self._default_logging_logfile_
cli_log_file_fmt = 'cli_{0}_log_file_fmt'.format(
self.get_prog_name().replace('-', '_')
)
if cli_log_file_fmt in self.config and not \
self.config.get(cli_log_file_fmt):
# Remove it from config so it inherits from log_fmt_logfile
self.config.pop(cli_log_file_fmt)
if self.config.get('log_fmt_logfile', None) is None:
# Remove it from config so it inherits from log_fmt_console
self.config.pop('log_fmt_logfile', None)
log_file_fmt = self.config.get(
'log_fmt_logfile',
self.config.get(
'log_fmt_console',
self.config.get(
'log_fmt',
config._DFLT_LOG_FMT_CONSOLE
)
)
)
if self.config.get('log_datefmt_logfile', None) is None:
# Remove it from config so it inherits from log_datefmt_console
self.config.pop('log_datefmt_logfile', None)
if self.config.get('log_datefmt_console', None) is None:
# Remove it from config so it inherits from log_datefmt
self.config.pop('log_datefmt_console', None)
log_file_datefmt = self.config.get(
'log_datefmt_logfile',
self.config.get(
'log_datefmt_console',
self.config.get(
'log_datefmt',
'%Y-%m-%d %H:%M:%S'
)
)
)
if not is_writeable(logfile, check_parent=True):
# Since we're not be able to write to the log file or its parent
# directory (if the log file does not exit), are we the same user
# as the one defined in the configuration file?
current_user = salt.utils.user.get_user()
if self.config['user'] != current_user:
# Yep, not the same user!
# Is the current user in ACL?
acl = self.config['publisher_acl']
if salt.utils.stringutils.check_whitelist_blacklist(
current_user, whitelist=six.iterkeys(acl)):
# Yep, the user is in ACL!
# Let's write the logfile to its home directory instead.
xdg_dir = salt.utils.xdg.xdg_config_dir()
user_salt_dir = (xdg_dir if os.path.isdir(xdg_dir) else
os.path.expanduser('~/.salt'))
if not os.path.isdir(user_salt_dir):
os.makedirs(user_salt_dir, 0o750)
logfile_basename = os.path.basename(
self._default_logging_logfile_
)
logger.debug("The user '%s' is not allowed to write to '%s'. "
"The log file will be stored in '~/.salt/'%s'.log'",
six.text_type(current_user),
six.text_type(logfile),
six.text_type(logfile_basename))
logfile = os.path.join(
user_salt_dir, '{0}.log'.format(logfile_basename)
)
# If we haven't changed the logfile path and it's not writeable,
# salt will fail once we try to setup the logfile logging.
# Log rotate options
log_rotate_max_bytes = self.config.get('log_rotate_max_bytes', 0)
log_rotate_backup_count = self.config.get('log_rotate_backup_count', 0)
if not salt.utils.platform.is_windows():
# Not supported on platforms other than Windows.
# Other platforms may use an external tool such as 'logrotate'
if log_rotate_max_bytes != 0:
logger.warning("'log_rotate_max_bytes' is only supported on Windows")
log_rotate_max_bytes = 0
if log_rotate_backup_count != 0:
logger.warning("'log_rotate_backup_count' is only supported on Windows")
log_rotate_backup_count = 0
# Save the settings back to the configuration
self.config[self._logfile_config_setting_name_] = logfile
self.config[self._logfile_loglevel_config_setting_name_] = loglevel
self.config['log_fmt_logfile'] = log_file_fmt
self.config['log_datefmt_logfile'] = log_file_datefmt
self.config['log_rotate_max_bytes'] = log_rotate_max_bytes
self.config['log_rotate_backup_count'] = log_rotate_backup_count
def setup_logfile_logger(self):
if salt.utils.platform.is_windows() and self._setup_mp_logging_listener_:
# On Windows when using a logging listener, all log file logging
# will go through the logging listener.
return
logfile = self.config[self._logfile_config_setting_name_]
loglevel = self.config[self._logfile_loglevel_config_setting_name_]
log_file_fmt = self.config['log_fmt_logfile']
log_file_datefmt = self.config['log_datefmt_logfile']
log_rotate_max_bytes = self.config['log_rotate_max_bytes']
log_rotate_backup_count = self.config['log_rotate_backup_count']
log.setup_logfile_logger(
logfile,
loglevel,
log_format=log_file_fmt,
date_format=log_file_datefmt,
max_bytes=log_rotate_max_bytes,
backup_count=log_rotate_backup_count
)
for name, level in six.iteritems(self.config.get('log_granular_levels', {})):
log.set_logger_level(name, level)
def __setup_extended_logging(self):
if salt.utils.platform.is_windows() and self._setup_mp_logging_listener_:
# On Windows when using a logging listener, all extended logging
# will go through the logging listener.
return
log.setup_extended_logging(self.config)
def _get_mp_logging_listener_queue(self):
return log.get_multiprocessing_logging_queue()
def _setup_mp_logging_listener(self):
if self._setup_mp_logging_listener_:
log.setup_multiprocessing_logging_listener(
self.config,
self._get_mp_logging_listener_queue()
)
def _setup_mp_logging_client(self):
if self._setup_mp_logging_listener_:
# Set multiprocessing logging level even in non-Windows
# environments. In non-Windows environments, this setting will
# propogate from process to process via fork behavior and will be
# used by child processes if they invoke the multiprocessing
# logging client.
log.set_multiprocessing_logging_level_by_opts(self.config)
if salt.utils.platform.is_windows():
# On Windows, all logging including console and
# log file logging will go through the multiprocessing
# logging listener if it exists.
# This will allow log file rotation on Windows
# since only one process can own the log file
# for log file rotation to work.
log.setup_multiprocessing_logging(
self._get_mp_logging_listener_queue()
)
# Remove the temp logger and any other configured loggers since
# all of our logging is going through the multiprocessing
# logging listener.
log.shutdown_temp_logging()
log.shutdown_console_logging()
log.shutdown_logfile_logging()
def __setup_console_logger_config(self):
# Since we're not going to be a daemon, setup the console logger
logfmt = self.config.get(
'log_fmt_console',
self.config.get(
'log_fmt',
config._DFLT_LOG_FMT_CONSOLE
)
)
if self.config.get('log_datefmt_console', None) is None:
# Remove it from config so it inherits from log_datefmt
self.config.pop('log_datefmt_console', None)
datefmt = self.config.get(
'log_datefmt_console',
self.config.get(
'log_datefmt',
'%Y-%m-%d %H:%M:%S'
)
)
# Save the settings back to the configuration
self.config['log_fmt_console'] = logfmt
self.config['log_datefmt_console'] = datefmt
def __setup_console_logger(self):
# If daemon is set force console logger to quiet
if getattr(self.options, 'daemon', False) is True:
return
if salt.utils.platform.is_windows() and self._setup_mp_logging_listener_:
# On Windows when using a logging listener, all console logging
# will go through the logging listener.
return
# ensure that yaml stays valid with log output
if getattr(self.options, 'output', None) == 'yaml':
log_format = '# {0}'.format(self.config['log_fmt_console'])
else:
log_format = self.config['log_fmt_console']
log.setup_console_logger(
self.config['log_level'],
log_format=log_format,
date_format=self.config['log_datefmt_console']
)
for name, level in six.iteritems(self.config.get('log_granular_levels', {})):
log.set_logger_level(name, level)
class RunUserMixin(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = 20
def _mixin_setup(self):
self.add_option(
'-u', '--user',
help='Specify user to run {0}.'.format(self.get_prog_name())
)
class DaemonMixIn(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = 30
def _mixin_setup(self):
self.add_option(
'-d', '--daemon',
default=False,
action='store_true',
help='Run the {0} as a daemon.'.format(self.get_prog_name())
)
self.add_option(
'--pid-file', dest='pidfile',
default=os.path.join(
syspaths.PIDFILE_DIR, '{0}.pid'.format(self.get_prog_name())
),
help="Specify the location of the pidfile. Default: '%default'."
)
def _mixin_before_exit(self):
if hasattr(self, 'config') and self.config.get('pidfile'):
# We've loaded and merged options into the configuration, it's safe
# to query about the pidfile
if self.check_pidfile():
try:
os.unlink(self.config['pidfile'])
except OSError as err:
# Log error only when running salt-master as a root user.
# Otherwise this can be ignored, since salt-master is able to
# overwrite the PIDfile on the next start.
err_msg = ('PIDfile could not be deleted: %s',
six.text_type(self.config['pidfile']))
if salt.utils.platform.is_windows():
user = salt.utils.win_functions.get_current_user()
if salt.utils.win_functions.is_admin(user):
logger.info(*err_msg)
logger.debug(six.text_type(err))
else:
if not os.getuid():
logger.info(*err_msg)
logger.debug(six.text_type(err))
def set_pidfile(self):
from salt.utils.process import set_pidfile
set_pidfile(self.config['pidfile'], self.config['user'])
def check_pidfile(self):
'''
Report whether a pidfile exists
'''
from salt.utils.process import check_pidfile
return check_pidfile(self.config['pidfile'])
def get_pidfile(self):
'''
Return a pid contained in a pidfile
'''
from salt.utils.process import get_pidfile
return get_pidfile(self.config['pidfile'])
def daemonize_if_required(self):
if self.options.daemon:
if self._setup_mp_logging_listener_ is True:
# Stop the logging queue listener for the current process
# We'll restart it once forked
log.shutdown_multiprocessing_logging_listener(daemonizing=True)
# Late import so logging works correctly
salt.utils.process.daemonize()
# Setup the multiprocessing log queue listener if enabled
self._setup_mp_logging_listener()
def check_running(self):
'''
Check if a pid file exists and if it is associated with
a running process.
'''
if self.check_pidfile():
pid = self.get_pidfile()
if not salt.utils.platform.is_windows():
if self.check_pidfile() and self.is_daemonized(pid) and os.getppid() != pid:
return True
else:
# We have no os.getppid() on Windows. Use salt.utils.win_functions.get_parent_pid
if self.check_pidfile() and self.is_daemonized(pid) and salt.utils.win_functions.get_parent_pid() != pid:
return True
return False
def is_daemonized(self, pid):
from salt.utils.process import os_is_running
return os_is_running(pid)
# Common methods for scripts which can daemonize
def _install_signal_handlers(self):
signal.signal(signal.SIGTERM, self._handle_signals)
signal.signal(signal.SIGINT, self._handle_signals)
def prepare(self):
self.parse_args()
def start(self):
self.prepare()
self._install_signal_handlers()
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
msg = self.__class__.__name__
if signum == signal.SIGINT:
msg += ' received a SIGINT.'
elif signum == signal.SIGTERM:
msg += ' received a SIGTERM.'
logging.getLogger(__name__).warning('%s Exiting.', msg)
self.shutdown(exitmsg='{0} Exited.'.format(msg))
def shutdown(self, exitcode=0, exitmsg=None):
self.exit(exitcode, exitmsg)
class TargetOptionsMixIn(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = 20
selected_target_option = None
def _mixin_setup(self):
group = self.target_options_group = optparse.OptionGroup(
self, 'Target Options', 'Target selection options.'
)
self.add_option_group(group)
group.add_option(
'-H', '--hosts',
default=False,
action='store_true',
dest='list_hosts',
help='List all known hosts to currently visible or other specified rosters'
)
group.add_option(
'-E', '--pcre',
default=False,
action='store_true',
help=('Instead of using shell globs to evaluate the target '
'servers, use pcre regular expressions.')
)
group.add_option(
'-L', '--list',
default=False,
action='store_true',
help=('Instead of using shell globs to evaluate the target '
'servers, take a comma or whitespace delimited list of '
'servers.')
)
group.add_option(
'-G', '--grain',
default=False,
action='store_true',
help=('Instead of using shell globs to evaluate the target '
'use a grain value to identify targets, the syntax '
'for the target is the grain key followed by a glob'
'expression: "os:Arch*".')
)
group.add_option(
'-P', '--grain-pcre',
default=False,
action='store_true',
help=('Instead of using shell globs to evaluate the target '
'use a grain value to identify targets, the syntax '
'for the target is the grain key followed by a pcre '
'regular expression: "os:Arch.*".')
)
group.add_option(
'-N', '--nodegroup',
default=False,
action='store_true',
help=('Instead of using shell globs to evaluate the target '
'use one of the predefined nodegroups to identify a '
'list of targets.')
)
group.add_option(
'-R', '--range',
default=False,
action='store_true',
help=('Instead of using shell globs to evaluate the target '
'use a range expression to identify targets. '
'Range expressions look like %cluster.')
)
group = self.additional_target_options_group = optparse.OptionGroup(
self,
'Additional Target Options',
'Additional options for minion targeting.'
)
self.add_option_group(group)
group.add_option(
'--delimiter',
default=DEFAULT_TARGET_DELIM,
help=('Change the default delimiter for matching in multi-level '
'data structures. Default: \'%default\'.')
)
self._create_process_functions()
def _create_process_functions(self):
for option in self.target_options_group.option_list:
def process(opt):
if getattr(self.options, opt.dest):
self.selected_target_option = opt.dest
funcname = 'process_{0}'.format(option.dest)
if not hasattr(self, funcname):
setattr(self, funcname, partial(process, option))
def _mixin_after_parsed(self):
group_options_selected = [
option for option in self.target_options_group.option_list if
getattr(self.options, option.dest) is True
]
if len(group_options_selected) > 1:
self.error(
'The options {0} are mutually exclusive. Please only choose '
'one of them'.format('/'.join(
[option.get_opt_string()
for option in group_options_selected]))
)
self.config['selected_target_option'] = self.selected_target_option
class ExtendedTargetOptionsMixIn(TargetOptionsMixIn):
def _mixin_setup(self):
TargetOptionsMixIn._mixin_setup(self)
group = self.target_options_group
group.add_option(
'-C', '--compound',
default=False,
action='store_true',
help=('The compound target option allows for multiple target '
'types to be evaluated, allowing for greater granularity in '
'target matching. The compound target is space delimited, '
'targets other than globs are preceded with an identifier '
'matching the specific targets argument type: salt '
'\'G@os:RedHat and webser* or E@database.*\'.')
)
group.add_option(
'-I', '--pillar',
default=False,
dest='pillar_target',
action='store_true',
help=('Instead of using shell globs to evaluate the target '
'use a pillar value to identify targets, the syntax '
'for the target is the pillar key followed by a glob '
'expression: "role:production*".')
)
group.add_option(
'-J', '--pillar-pcre',
default=False,
action='store_true',
help=('Instead of using shell globs to evaluate the target '
'use a pillar value to identify targets, the syntax '
'for the target is the pillar key followed by a pcre '
'regular expression: "role:prod.*".')
)
group.add_option(
'-S', '--ipcidr',
default=False,
action='store_true',
help=('Match based on Subnet (CIDR notation) or IP address.')
)
self._create_process_functions()
def process_pillar_target(self):
if self.options.pillar_target:
self.selected_target_option = 'pillar'
class TimeoutMixIn(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = 10
def _mixin_setup(self):
if not hasattr(self, 'default_timeout'):
raise RuntimeError(
'You need to define the \'default_timeout\' attribute '
'on {0}'.format(self.__class__.__name__)
)
self.add_option(
'-t', '--timeout',
type=int,
default=self.default_timeout,
help=('Change the timeout, if applicable, for the running '
'command (in seconds). Default: %default.')
)
class ArgsStdinMixIn(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = 10
def _mixin_setup(self):
self.add_option(
'--args-stdin',
default=False,
dest='args_stdin',
action='store_true',
help=('Read additional options and/or arguments from stdin. '
'Each entry is newline separated.')
)
class ProxyIdMixIn(six.with_metaclass(MixInMeta, object)):
_mixin_prio = 40
def _mixin_setup(self):
self.add_option(
'--proxyid',
default=None,
dest='proxyid',
help=('Id for this proxy.')
)
class ExecutorsMixIn(six.with_metaclass(MixInMeta, object)):
_mixin_prio = 10
def _mixin_setup(self):
self.add_option(
'--module-executors',
dest='module_executors',
default=None,
metavar='EXECUTOR_LIST',
help=('Set an alternative list of executors to override the one '
'set in minion config.')
)
self.add_option(
'--executor-opts',
dest='executor_opts',
default=None,
metavar='EXECUTOR_OPTS',
help=('Set alternate executor options if supported by executor. '
'Options set by minion config are used by default.')
)
class CacheDirMixIn(six.with_metaclass(MixInMeta, object)):
_mixin_prio = 40
def _mixin_setup(self):
self.add_option(
'--cachedir',
default='/var/cache/salt/',
dest='cachedir',
help=('Cache Directory')
)
class OutputOptionsMixIn(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = 40
_include_text_out_ = False
selected_output_option = None
def _mixin_setup(self):
group = self.output_options_group = optparse.OptionGroup(
self, 'Output Options', 'Configure your preferred output format.'
)
self.add_option_group(group)
group.add_option(
'--out', '--output',
dest='output',
help=(
'Print the output from the \'{0}\' command using the '
'specified outputter.'.format(
self.get_prog_name(),
)
)
)
group.add_option(
'--out-indent', '--output-indent',
dest='output_indent',
default=None,
type=int,
help=('Print the output indented by the provided value in spaces. '
'Negative values disables indentation. Only applicable in '
'outputters that support indentation.')
)
group.add_option(
'--out-file', '--output-file',
dest='output_file',
default=None,
help='Write the output to the specified file.'
)
group.add_option(
'--out-file-append', '--output-file-append',
action='store_true',
dest='output_file_append',
default=False,
help='Append the output to the specified file.'
)
group.add_option(
'--no-color', '--no-colour',
default=False,
action='store_true',
help='Disable all colored output.'
)
group.add_option(
'--force-color', '--force-colour',
default=False,
action='store_true',
help='Force colored output.'
)
group.add_option(
'--state-output', '--state_output',
default=None,
help=('Override the configured state_output value for minion '
'output. One of \'full\', \'terse\', \'mixed\', \'changes\' or \'filter\'. '
'Default: \'%default\'.')
)
group.add_option(
'--state-verbose', '--state_verbose',
default=None,
help=('Override the configured state_verbose value for minion '
'output. Set to True or False. Default: %default.')
)
for option in self.output_options_group.option_list:
def process(opt):
default = self.defaults.get(opt.dest)
if getattr(self.options, opt.dest, default) is False:
return
self.selected_output_option = opt.dest
funcname = 'process_{0}'.format(option.dest)
if not hasattr(self, funcname):
setattr(self, funcname, partial(process, option))
def process_output(self):
self.selected_output_option = self.options.output
def process_output_file(self):
if self.options.output_file is not None and self.options.output_file_append is False:
if os.path.isfile(self.options.output_file):
try:
with salt.utils.files.fopen(self.options.output_file, 'w'):
# Make this a zero length filename instead of removing
# it. This way we keep the file permissions.
pass
except (IOError, OSError) as exc:
self.error(
'{0}: Access denied: {1}'.format(
self.options.output_file,
exc
)
)
def process_state_verbose(self):
if self.options.state_verbose == "True" or self.options.state_verbose == "true":
self.options.state_verbose = True
elif self.options.state_verbose == "False" or self.options.state_verbose == "false":
self.options.state_verbose = False
def _mixin_after_parsed(self):
group_options_selected = [
option for option in self.output_options_group.option_list if (
getattr(self.options, option.dest) and
(option.dest.endswith('_out') or option.dest == 'output'))
]
if len(group_options_selected) > 1:
self.error(
'The options {0} are mutually exclusive. Please only choose '
'one of them'.format('/'.join([
option.get_opt_string() for
option in group_options_selected
]))
)
self.config['selected_output_option'] = self.selected_output_option
class ExecutionOptionsMixIn(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = 10
def _mixin_setup(self):
group = self.execution_group = optparse.OptionGroup(
self,
'Execution Options',
# Include description here as a string
)
group.add_option(
'-L', '--location',
default=None,
help='Specify which region to connect to.'
)
group.add_option(
'-a', '--action',
default=None,
help='Perform an action that may be specific to this cloud '
'provider. This argument requires one or more instance '
'names to be specified.'
)
group.add_option(
'-f', '--function',
nargs=2,
default=None,
metavar='<FUNC-NAME> <PROVIDER>',
help='Perform a function that may be specific to this cloud '
'provider, that does not apply to an instance. This '
'argument requires a provider to be specified (i.e.: nova).'
)
group.add_option(
'-p', '--profile',
default=None,
help='Create an instance using the specified profile.'
)
group.add_option(
'-m', '--map',
default=None,
help='Specify a cloud map file to use for deployment. This option '
'may be used alone, or in conjunction with -Q, -F, -S or -d. '
'The map can also be filtered by a list of VM names.'
)
group.add_option(
'-H', '--hard',
default=False,
action='store_true',
help='Delete all VMs that are not defined in the map file. '
'CAUTION!!! This operation can irrevocably destroy VMs! It '
'must be explicitly enabled in the cloud config file.'
)
group.add_option(
'-d', '--destroy',
default=False,
action='store_true',
help='Destroy the specified instance(s).'
)
group.add_option(
'--no-deploy',
default=True,
dest='deploy',
action='store_false',
help='Don\'t run a deploy script after instance creation.'
)
group.add_option(
'-P', '--parallel',
default=False,
action='store_true',
help='Build all of the specified instances in parallel.'
)
group.add_option(
'-u', '--update-bootstrap',
default=False,
action='store_true',
help='Update salt-bootstrap to the latest stable bootstrap release.'
)
group.add_option(
'-y', '--assume-yes',
default=False,
action='store_true',
help='Default "yes" in answer to all confirmation questions.'
)
group.add_option(
'-k', '--keep-tmp',
default=False,
action='store_true',
help='Do not remove files from /tmp/ after deploy.sh finishes.'
)
group.add_option(
'--show-deploy-args',
default=False,
action='store_true',
help='Include the options used to deploy the minion in the data '
'returned.'
)
group.add_option(
'--script-args',
default=None,
help='Script arguments to be fed to the bootstrap script when '
'deploying the VM.'
)
group.add_option(
'-b', '--bootstrap',
nargs=1,
default=False,
metavar='<HOST> [MINION_ID] [OPTIONS...]',
help='Bootstrap an existing machine.'
)
self.add_option_group(group)
def process_function(self):
if self.options.function:
self.function_name, self.function_provider = self.options.function
if self.function_provider.startswith('-') or \
'=' in self.function_provider:
self.error(
'--function expects two arguments: <function-name> '
'<provider>'
)
class CloudQueriesMixIn(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = 20
selected_query_option = None
def _mixin_setup(self):
group = self.cloud_queries_group = optparse.OptionGroup(
self,
'Query Options',
# Include description here as a string
)
group.add_option(
'-Q', '--query',
default=False,
action='store_true',
help=('Execute a query and return some information about the '
'nodes running on configured cloud providers.')
)
group.add_option(
'-F', '--full-query',
default=False,
action='store_true',
help=('Execute a query and return all information about the '
'nodes running on configured cloud providers.')
)
group.add_option(
'-S', '--select-query',
default=False,
action='store_true',
help=('Execute a query and return select information about '
'the nodes running on configured cloud providers.')
)
group.add_option(
'--list-providers',
default=False,
action='store_true',
help='Display a list of configured providers.'
)
group.add_option(
'--list-profiles',
default=None,
action='store',
help='Display a list of configured profiles. Pass in a cloud '
'provider to view the provider\'s associated profiles, '
'such as digitalocean, or pass in "all" to list all the '
'configured profiles.'
)
self.add_option_group(group)
self._create_process_functions()
def _create_process_functions(self):
for option in self.cloud_queries_group.option_list:
def process(opt):
if getattr(self.options, opt.dest):
query = 'list_nodes'
if opt.dest == 'full_query':
query += '_full'
elif opt.dest == 'select_query':
query += '_select'
elif opt.dest == 'list_providers':
query = 'list_providers'
if self.args:
self.error(
'\'--list-providers\' does not accept any '
'arguments'
)
elif opt.dest == 'list_profiles':
query = 'list_profiles'
option_dict = vars(self.options)
if option_dict.get('list_profiles') == '--list-providers':
self.error(
'\'--list-profiles\' does not accept '
'\'--list-providers\' as an argument'
)
self.selected_query_option = query
funcname = 'process_{0}'.format(option.dest)
if not hasattr(self, funcname):
setattr(self, funcname, partial(process, option))
def _mixin_after_parsed(self):
group_options_selected = [
option for option in self.cloud_queries_group.option_list if
getattr(self.options, option.dest) is not False and
getattr(self.options, option.dest) is not None
]
if len(group_options_selected) > 1:
self.error(
'The options {0} are mutually exclusive. Please only choose '
'one of them'.format('/'.join([
option.get_opt_string() for option in
group_options_selected
]))
)
self.config['selected_query_option'] = self.selected_query_option
class CloudProvidersListsMixIn(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = 30
def _mixin_setup(self):
group = self.providers_listings_group = optparse.OptionGroup(
self,
'Cloud Providers Listings',
# Include description here as a string
)
group.add_option(
'--list-locations',
default=None,
help=('Display a list of locations available in configured cloud '
'providers. Pass the cloud provider that available '
'locations are desired on, such as "linode", or pass "all" to '
'list locations for all configured cloud providers.')
)
group.add_option(
'--list-images',
default=None,
help=('Display a list of images available in configured cloud '
'providers. Pass the cloud provider that available images '
'are desired on, such as "linode", or pass "all" to list images '
'for all configured cloud providers.')
)
group.add_option(
'--list-sizes',
default=None,
help=('Display a list of sizes available in configured cloud '
'providers. Pass the cloud provider that available sizes '
'are desired on, such as "AWS", or pass "all" to list sizes '
'for all configured cloud providers.')
)
self.add_option_group(group)
def _mixin_after_parsed(self):
list_options_selected = [
option for option in self.providers_listings_group.option_list if
getattr(self.options, option.dest) is not None
]
if len(list_options_selected) > 1:
self.error(
'The options {0} are mutually exclusive. Please only choose '
'one of them'.format(
'/'.join([
option.get_opt_string() for option in
list_options_selected
])
)
)
class ProfilingPMixIn(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = 130
def _mixin_setup(self):
group = self.profiling_group = optparse.OptionGroup(
self,
'Profiling support',
# Include description here as a string
)
group.add_option(
'--profiling-path',
dest='profiling_path',
default='/tmp/stats',
help=('Folder that will hold all stats generations path. Default: \'%default\'.')
)
group.add_option(
'--enable-profiling',
dest='profiling_enabled',
default=False,
action='store_true',
help=('Enable generating profiling stats. See also: --profiling-path.')
)
self.add_option_group(group)
class CloudCredentialsMixIn(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = 30
def _mixin_setup(self):
group = self.cloud_credentials_group = optparse.OptionGroup(
self,
'Cloud Credentials',
# Include description here as a string
)
group.add_option(
'--set-password',
default=None,
nargs=2,
metavar='<USERNAME> <PROVIDER>',
help=('Configure password for a cloud provider and save it to the keyring. '
'PROVIDER can be specified with or without a driver, for example: '
'"--set-password bob rackspace" or more specific '
'"--set-password bob rackspace:openstack" '
'Deprecated.')
)
self.add_option_group(group)
def process_set_password(self):
if self.options.set_password:
raise RuntimeError(
'This functionality is not supported; '
'please see the keyring module at http://docs.saltstack.com/en/latest/topics/sdb/'
)
class EAuthMixIn(six.with_metaclass(MixInMeta, object)):
_mixin_prio_ = 30
def _mixin_setup(self):
group = self.eauth_group = optparse.OptionGroup(
self,
'External Authentication',
# Include description here as a string
)
group.add_option(
'-a', '--auth', '--eauth', '--external-auth',
default='',
dest='eauth',
help=('Specify an external authentication system to use.')
)
group.add_option(
'-T', '--make-token',
default=False,
dest='mktoken',
action='store_true',
help=('Generate and save an authentication token for re-use. The '
'token is generated and made available for the period '
'defined in the Salt Master.')
)
group.add_option(
'--username',
dest='username',
nargs=1,
help=('Username for external authentication.')
)
group.add_option(
'--password',
dest='password',
nargs=1,
help=('Password for external authentication.')
)
self.add_option_group(group)
class MasterOptionParser(six.with_metaclass(OptionParserMeta,
OptionParser,
ConfigDirMixIn,
MergeConfigMixIn,
LogLevelMixIn,
RunUserMixin,
DaemonMixIn,
SaltfileMixIn)):
description = 'The Salt Master, used to control the Salt Minions'
# ConfigDirMixIn config filename attribute
_config_filename_ = 'master'
# LogLevelMixIn attributes
_default_logging_logfile_ = config.DEFAULT_MASTER_OPTS['log_file']
_setup_mp_logging_listener_ = True
def setup_config(self):
return config.master_config(self.get_config_file_path())
class MinionOptionParser(six.with_metaclass(OptionParserMeta,
MasterOptionParser)): # pylint: disable=no-init
description = (
'The Salt Minion, receives commands from a remote Salt Master'
)
# ConfigDirMixIn config filename attribute
_config_filename_ = 'minion'
# LogLevelMixIn attributes
_default_logging_logfile_ = config.DEFAULT_MINION_OPTS['log_file']
_setup_mp_logging_listener_ = True
def setup_config(self):
opts = config.minion_config(self.get_config_file_path(), # pylint: disable=no-member
cache_minion_id=True,
ignore_config_errors=False)
# Optimization: disable multiprocessing logging if running as a
# daemon, without engines and without multiprocessing
if not opts.get('engines') and not opts.get('multiprocessing', True) \
and self.options.daemon: # pylint: disable=no-member
self._setup_mp_logging_listener_ = False
return opts
class ProxyMinionOptionParser(six.with_metaclass(OptionParserMeta,
OptionParser,
ProxyIdMixIn,
ConfigDirMixIn,
MergeConfigMixIn,
LogLevelMixIn,
RunUserMixin,
DaemonMixIn,
SaltfileMixIn)): # pylint: disable=no-init
description = (
'The Salt Proxy Minion, connects to and controls devices not able to run a minion.\n'
'Receives commands from a remote Salt Master.'
)
# ConfigDirMixIn config filename attribute
_config_filename_ = 'proxy'
# LogLevelMixIn attributes
_default_logging_logfile_ = config.DEFAULT_PROXY_MINION_OPTS['log_file']
def setup_config(self):
try:
minion_id = self.values.proxyid
except AttributeError:
minion_id = None
return config.proxy_config(self.get_config_file_path(),
cache_minion_id=False,
minion_id=minion_id)
class SyndicOptionParser(six.with_metaclass(OptionParserMeta,
OptionParser,
ConfigDirMixIn,
MergeConfigMixIn,
LogLevelMixIn,
RunUserMixin,
DaemonMixIn,
SaltfileMixIn)):
description = (
'The Salt Syndic daemon, a special Minion that passes through commands from a\n'
'higher Master. Scale Salt to thousands of hosts or across many different networks.'
)
# ConfigDirMixIn config filename attribute
_config_filename_ = 'master'
# LogLevelMixIn attributes
_logfile_config_setting_name_ = 'syndic_log_file'
_default_logging_level_ = config.DEFAULT_MASTER_OPTS['log_level']
_default_logging_logfile_ = config.DEFAULT_MASTER_OPTS[_logfile_config_setting_name_]
_setup_mp_logging_listener_ = True
def setup_config(self):
return config.syndic_config(
self.get_config_file_path(),
self.get_config_file_path('minion'))
class SaltCMDOptionParser(six.with_metaclass(OptionParserMeta,
OptionParser,
ConfigDirMixIn,
MergeConfigMixIn,
TimeoutMixIn,
ExtendedTargetOptionsMixIn,
OutputOptionsMixIn,
LogLevelMixIn,
ExecutorsMixIn,
HardCrashMixin,
SaltfileMixIn,
ArgsStdinMixIn,
EAuthMixIn,
NoParseMixin)):
default_timeout = 5
description = (
'Salt allows for commands to be executed across a swath of remote systems in\n'
'parallel, so they can be both controlled and queried with ease.'
)
usage = '%prog [options] \'<target>\' <function> [arguments]'
# ConfigDirMixIn config filename attribute
_config_filename_ = 'master'
# LogLevelMixIn attributes
_default_logging_level_ = config.DEFAULT_MASTER_OPTS['log_level']
_default_logging_logfile_ = config.DEFAULT_MASTER_OPTS['log_file']
try:
os.getcwd()
except OSError:
sys.exit("Cannot access current working directory. Exiting!")
def _mixin_setup(self):
self.add_option(
'-s', '--static',
default=False,
action='store_true',
help=('Return the data from minions as a group after they '
'all return.')
)
self.add_option(
'-p', '--progress',
default=False,
action='store_true',
help=('Display a progress graph. Requires "progressbar" python package.')
)
self.add_option(
'--failhard',
default=False,
action='store_true',
help=('Stop batch execution upon first "bad" return.')
)
self.add_option(
'--async',
default=False,
dest='async',
action='store_true',
help=('Run the salt command but don\'t wait for a reply.')
)
self.add_option(
'--subset',
default=0,
type=int,
help=('Execute the routine on a random subset of the targeted '
'minions. The minions will be verified that they have the '
'named function before executing.')
)
self.add_option(
'-v', '--verbose',
default=False,
action='store_true',
help=('Turn on command verbosity, display jid and active job '
'queries.')
)
self.add_option(
'--hide-timeout',
dest='show_timeout',
default=True,
action='store_false',
help=('Hide minions that timeout.')
)
self.add_option(
'--show-jid',
default=False,
action='store_true',
help=('Display jid without the additional output of --verbose.')
)
self.add_option(
'-b', '--batch',
'--batch-size',
default='',
dest='batch',
help=('Execute the salt job in batch mode, pass either the number '
'of minions to batch at a time, or the percentage of '
'minions to have running.')
)
self.add_option(
'--batch-wait',
default=0,
dest='batch_wait',
type=float,
help=('Wait the specified time in seconds after each job is done '
'before freeing the slot in the batch for the next one.')
)
self.add_option(
'--batch-safe-limit',
default=0,
dest='batch_safe_limit',
type=int,
help=('Execute the salt job in batch mode if the job would have '
'executed on more than this many minions.')
)
self.add_option(
'--batch-safe-size',
default=8,
dest='batch_safe_size',
help=('Batch size to use for batch jobs created by batch-safe-limit.')
)
self.add_option(
'--return',
default='',
metavar='RETURNER',
help=('Set an alternative return method. By default salt will '
'send the return data from the command back to the master, '
'but the return data can be redirected into any number of '
'systems, databases or applications.')
)
self.add_option(
'--return_config',
default='',
metavar='RETURNER_CONF',
help=('Set an alternative return method. By default salt will '
'send the return data from the command back to the master, '
'but the return data can be redirected into any number of '
'systems, databases or applications.')
)
self.add_option(
'--return_kwargs',
default={},
metavar='RETURNER_KWARGS',
help=('Set any returner options at the command line.')
)
self.add_option(
'-d', '--doc', '--documentation',
dest='doc',
default=False,
action='store_true',
help=('Return the documentation for the specified module or for '
'all modules if none are specified.')
)
self.add_option(
'--args-separator',
dest='args_separator',
default=',',
help=('Set the special argument used as a delimiter between '
'command arguments of compound commands. This is useful '
'when one wants to pass commas as arguments to '
'some of the commands in a compound command.')
)
self.add_option(
'--summary',
dest='cli_summary',
default=False,
action='store_true',
help=('Display summary information about a salt command.')
)
self.add_option(
'--metadata',
default='',
metavar='METADATA',
help=('Pass metadata into Salt, used to search jobs.')
)
self.add_option(
'--output-diff',
dest='state_output_diff',
action='store_true',
default=False,
help=('Report only those states that have changed.')
)
self.add_option(
'--config-dump',
dest='config_dump',
action='store_true',
default=False,
help=('Dump the master configuration values')
)
self.add_option(
'--preview-target',
dest='preview_target',
action='store_true',
default=False,
help=('Show the minions expected to match a target. Does not issue any command.')
)
def _mixin_after_parsed(self):
if len(self.args) <= 1 and not self.options.doc and not self.options.preview_target:
try:
self.print_help()
except Exception: # pylint: disable=broad-except
# We get an argument that Python's optparser just can't deal
# with. Perhaps stdout was redirected, or a file glob was
# passed in. Regardless, we're in an unknown state here.
sys.stdout.write('Invalid options passed. Please try -h for '
'help.') # Try to warn if we can.
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# Dump the master configuration file, exit normally at the end.
if self.options.config_dump:
cfg = config.master_config(self.get_config_file_path())
sys.stdout.write(
salt.utils.yaml.safe_dump(
cfg,
default_flow_style=False)
)
sys.exit(salt.defaults.exitcodes.EX_OK)
if self.options.preview_target:
# Insert dummy arg which won't be used
self.args.append('not_a_valid_command')
if self.options.doc:
# Include the target
if not self.args:
self.args.insert(0, '*')
if len(self.args) < 2:
# Include the function
self.args.insert(1, 'sys.doc')
if self.args[1] != 'sys.doc':
self.args.insert(1, 'sys.doc')
if len(self.args) > 3:
self.error('You can only get documentation for one method at one time.')
if self.options.list:
try:
if ',' in self.args[0]:
self.config['tgt'] = self.args[0].replace(' ', '').split(',')
else:
self.config['tgt'] = self.args[0].split()
except IndexError:
self.exit(42, '\nCannot execute command without defining a target.\n\n')
else:
try:
self.config['tgt'] = self.args[0]
except IndexError:
self.exit(42, '\nCannot execute command without defining a target.\n\n')
# Detect compound command and set up the data for it
if self.args:
try:
if ',' in self.args[1]:
self.config['fun'] = self.args[1].split(',')
self.config['arg'] = [[]]
cmd_index = 0
if (self.args[2:].count(self.options.args_separator) ==
len(self.config['fun']) - 1):
# new style parsing: standalone argument separator
for arg in self.args[2:]:
if arg == self.options.args_separator:
cmd_index += 1
self.config['arg'].append([])
else:
self.config['arg'][cmd_index].append(arg)
else:
# old style parsing: argument separator can be inside args
for arg in self.args[2:]:
if self.options.args_separator in arg:
sub_args = arg.split(self.options.args_separator)
for sub_arg_index, sub_arg in enumerate(sub_args):
if sub_arg:
self.config['arg'][cmd_index].append(sub_arg)
if sub_arg_index != len(sub_args) - 1:
cmd_index += 1
self.config['arg'].append([])
else:
self.config['arg'][cmd_index].append(arg)
if len(self.config['fun']) > len(self.config['arg']):
self.exit(42, 'Cannot execute compound command without '
'defining all arguments.\n')
elif len(self.config['fun']) < len(self.config['arg']):
self.exit(42, 'Cannot execute compound command with more '
'arguments than commands.\n')
# parse the args and kwargs before sending to the publish
# interface
for i in range(len(self.config['arg'])):
self.config['arg'][i] = salt.utils.args.parse_input(
self.config['arg'][i],
no_parse=self.options.no_parse)
else:
self.config['fun'] = self.args[1]
self.config['arg'] = self.args[2:]
# parse the args and kwargs before sending to the publish
# interface
self.config['arg'] = salt.utils.args.parse_input(
self.config['arg'],
no_parse=self.options.no_parse)
except IndexError:
self.exit(42, '\nIncomplete options passed.\n\n')
def setup_config(self):
return config.client_config(self.get_config_file_path())
class SaltCPOptionParser(six.with_metaclass(OptionParserMeta,
OptionParser,
OutputOptionsMixIn,
ConfigDirMixIn,
MergeConfigMixIn,
TimeoutMixIn,
TargetOptionsMixIn,
LogLevelMixIn,
HardCrashMixin,
SaltfileMixIn)):
description = (
'salt-cp is NOT intended to broadcast large files, it is intended to handle text\n'
'files. salt-cp can be used to distribute configuration files.'
)
usage = '%prog [options] \'<target>\' SOURCE DEST'
default_timeout = 5
# ConfigDirMixIn config filename attribute
_config_filename_ = 'master'
# LogLevelMixIn attributes
_default_logging_level_ = config.DEFAULT_MASTER_OPTS['log_level']
_default_logging_logfile_ = config.DEFAULT_MASTER_OPTS['log_file']
def _mixin_setup(self):
file_opts_group = optparse.OptionGroup(self, 'File Options')
file_opts_group.add_option(
'-C', '--chunked',
default=False,
dest='chunked',
action='store_true',
help='Use chunked files transfer. Supports big files, recursive '
'lookup and directories creation.'
)
file_opts_group.add_option(
'-n', '--no-compression',
default=True,
dest='gzip',
action='store_false',
help='Disable gzip compression.'
)
self.add_option_group(file_opts_group)
def _mixin_after_parsed(self):
# salt-cp needs arguments
if len(self.args) <= 1:
self.print_help()
self.error('Insufficient arguments')
if self.options.list:
if ',' in self.args[0]:
self.config['tgt'] = self.args[0].split(',')
else:
self.config['tgt'] = self.args[0].split()
else:
self.config['tgt'] = self.args[0]
self.config['src'] = [os.path.realpath(x) for x in self.args[1:-1]]
self.config['dest'] = self.args[-1]
def setup_config(self):
return config.master_config(self.get_config_file_path())
class SaltKeyOptionParser(six.with_metaclass(OptionParserMeta,
OptionParser,
ConfigDirMixIn,
MergeConfigMixIn,
LogLevelMixIn,
OutputOptionsMixIn,
RunUserMixin,
HardCrashMixin,
SaltfileMixIn,
EAuthMixIn)):
description = 'salt-key is used to manage Salt authentication keys'
# ConfigDirMixIn config filename attribute
_config_filename_ = 'master'
# LogLevelMixIn attributes
_skip_console_logging_config_ = True
_logfile_config_setting_name_ = 'key_logfile'
_default_logging_logfile_ = config.DEFAULT_MASTER_OPTS[_logfile_config_setting_name_]
def _mixin_setup(self):
actions_group = optparse.OptionGroup(self, 'Actions')
actions_group.set_conflict_handler('resolve')
actions_group.add_option(
'-l', '--list',
default='',
metavar='ARG',
help=('List the public keys. The args '
'\'pre\', \'un\', and \'unaccepted\' will list '
'unaccepted/unsigned keys. '
'\'acc\' or \'accepted\' will list accepted/signed keys. '
'\'rej\' or \'rejected\' will list rejected keys. '
'\'den\' or \'denied\' will list denied keys. '
'Finally, \'all\' will list all keys.')
)
actions_group.add_option(
'-L', '--list-all',
default=False,
action='store_true',
help='List all public keys. Deprecated: use "--list all".'
)
actions_group.add_option(
'-a', '--accept',
default='',
help='Accept the specified public key (use --include-rejected and '
'--include-denied to match rejected and denied keys in '
'addition to pending keys). Globs are supported.',
)
actions_group.add_option(
'-A', '--accept-all',
default=False,
action='store_true',
help='Accept all pending keys.'
)
actions_group.add_option(
'-r', '--reject',
default='',
help='Reject the specified public key. Use --include-accepted and '
'--include-denied to match accepted and denied keys in '
'addition to pending keys. Globs are supported.'
)
actions_group.add_option(
'-R', '--reject-all',
default=False,
action='store_true',
help='Reject all pending keys.'
)
actions_group.add_option(
'--include-all',
default=False,
action='store_true',
help='Include rejected/accepted keys when accepting/rejecting. '
'Deprecated: use "--include-rejected" and "--include-accepted".'
)
actions_group.add_option(
'--include-accepted',
default=False,
action='store_true',
help='Include accepted keys when rejecting.'
)
actions_group.add_option(
'--include-rejected',
default=False,
action='store_true',
help='Include rejected keys when accepting.'
)
actions_group.add_option(
'--include-denied',
default=False,
action='store_true',
help='Include denied keys when accepting/rejecting.'
)
actions_group.add_option(
'-p', '--print',
default='',
help='Print the specified public key.'
)
actions_group.add_option(
'-P', '--print-all',
default=False,
action='store_true',
help='Print all public keys.'
)
actions_group.add_option(
'-d', '--delete',
default='',
help='Delete the specified key. Globs are supported.'
)
actions_group.add_option(
'-D', '--delete-all',
default=False,
action='store_true',
help='Delete all keys.'
)
actions_group.add_option(
'-f', '--finger',
default='',
help='Print the specified key\'s fingerprint.'
)
actions_group.add_option(
'-F', '--finger-all',
default=False,
action='store_true',
help='Print all keys\' fingerprints.'
)
self.add_option_group(actions_group)
self.add_option(
'-q', '--quiet',
default=False,
action='store_true',
help='Suppress output.'
)
self.add_option(
'-y', '--yes',
default=False,
action='store_true',
help='Answer "Yes" to all questions presented. Default: %default.'
)
self.add_option(
'--rotate-aes-key',
default=True,
help=('Setting this to False prevents the master from refreshing '
'the key session when keys are deleted or rejected, this '
'lowers the security of the key deletion/rejection operation. '
'Default: %default.')
)
self.add_option(
'--preserve-minions',
default=False,
help=('Setting this to True prevents the master from deleting '
'the minion cache when keys are deleted, this may have '
'security implications if compromised minions auth with '
'a previous deleted minion ID. '
'Default: %default.')
)
key_options_group = optparse.OptionGroup(
self, 'Key Generation Options'
)
self.add_option_group(key_options_group)
key_options_group.add_option(
'--gen-keys',
default='',
help='Set a name to generate a keypair for use with salt.'
)
key_options_group.add_option(
'--gen-keys-dir',
default='.',
help=('Set the directory to save the generated keypair, only '
'works with "gen_keys_dir" option. Default: \'%default\'.')
)
key_options_group.add_option(
'--keysize',
default=2048,
type=int,
help=('Set the keysize for the generated key, only works with '
'the "--gen-keys" option, the key size must be 2048 or '
'higher, otherwise it will be rounded up to 2048. '
'Default: %default.')
)
key_options_group.add_option(
'--gen-signature',
default=False,
action='store_true',
help=('Create a signature file of the masters public-key named '
'master_pubkey_signature. The signature can be send to a '
'minion in the masters auth-reply and enables the minion '
'to verify the masters public-key cryptographically. '
'This requires a new signing-key-pair which can be auto-created '
'with the --auto-create parameter.')
)
key_options_group.add_option(
'--priv',
default='',
type=str,
help=('The private-key file to create a signature with.')
)
key_options_group.add_option(
'--signature-path',
default='',
type=str,
help=('The path where the signature file should be written.')
)
key_options_group.add_option(
'--pub',
default='',
type=str,
help=('The public-key file to create a signature for.')
)
key_options_group.add_option(
'--auto-create',
default=False,
action='store_true',
help=('Auto-create a signing key-pair if it does not yet exist.')
)
def process_config_dir(self):
if self.options.gen_keys:
# We're generating keys, override the default behavior of this
# function if we don't have any access to the configuration
# directory.
if not os.access(self.options.config_dir, os.R_OK):
if not os.path.isdir(self.options.gen_keys_dir):
# This would be done at a latter stage, but we need it now
# so no errors are thrown
os.makedirs(self.options.gen_keys_dir)
self.options.config_dir = self.options.gen_keys_dir
super(SaltKeyOptionParser, self).process_config_dir()
# Don't change its mixin priority!
process_config_dir._mixin_prio_ = ConfigDirMixIn._mixin_prio_
def setup_config(self):
keys_config = config.master_config(self.get_config_file_path())
if self.options.gen_keys:
# Since we're generating the keys, some defaults can be assumed
# or tweaked
keys_config[self._logfile_config_setting_name_] = os.devnull
keys_config['pki_dir'] = self.options.gen_keys_dir
return keys_config
def process_rotate_aes_key(self):
if hasattr(self.options, 'rotate_aes_key') and isinstance(self.options.rotate_aes_key, six.string_types):
if self.options.rotate_aes_key.lower() == 'true':
self.options.rotate_aes_key = True
elif self.options.rotate_aes_key.lower() == 'false':
self.options.rotate_aes_key = False
def process_preserve_minions(self):
if hasattr(self.options, 'preserve_minions') and isinstance(self.options.preserve_minions, six.string_types):
if self.options.preserve_minions.lower() == 'true':
self.options.preserve_minions = True
elif self.options.preserve_minions.lower() == 'false':
self.options.preserve_minions = False
def process_list(self):
# Filter accepted list arguments as soon as possible
if not self.options.list:
return
if not self.options.list.startswith(('acc', 'pre', 'un', 'rej', 'den', 'all')):
self.error(
'\'{0}\' is not a valid argument to \'--list\''.format(
self.options.list
)
)
def process_keysize(self):
if self.options.keysize < 2048:
self.error('The minimum value for keysize is 2048')
elif self.options.keysize > 32768:
self.error('The maximum value for keysize is 32768')
def process_gen_keys_dir(self):
# Schedule __create_keys_dir() to run if there's a value for
# --create-keys-dir
self._mixin_after_parsed_funcs.append(self.__create_keys_dir) # pylint: disable=no-member
def _mixin_after_parsed(self):
# It was decided to always set this to info, since it really all is
# info or error.
self.config['loglevel'] = 'info'
def __create_keys_dir(self):
if not os.path.isdir(self.config['gen_keys_dir']):
os.makedirs(self.config['gen_keys_dir'])
class SaltCallOptionParser(six.with_metaclass(OptionParserMeta,
OptionParser,
ProxyIdMixIn,
ConfigDirMixIn,
ExecutorsMixIn,
MergeConfigMixIn,
LogLevelMixIn,
OutputOptionsMixIn,
HardCrashMixin,
SaltfileMixIn,
ArgsStdinMixIn,
ProfilingPMixIn,
NoParseMixin,
CacheDirMixIn)):
description = (
'salt-call is used to execute module functions locally on a Salt Minion'
)
usage = '%prog [options] <function> [arguments]'
# ConfigDirMixIn config filename attribute
_config_filename_ = 'minion'
# LogLevelMixIn attributes
_default_logging_level_ = config.DEFAULT_MINION_OPTS['log_level']
_default_logging_logfile_ = config.DEFAULT_MINION_OPTS['log_file']
def _mixin_setup(self):
self.add_option(
'-g', '--grains',
dest='grains_run',
default=False,
action='store_true',
help='Return the information generated by the salt grains.'
)
self.add_option(
'-m', '--module-dirs',
default=[],
action='append',
help=('Specify an additional directory to pull modules from. '
'Multiple directories can be provided by passing '
'`-m/--module-dirs` multiple times.')
)
self.add_option(
'-d', '--doc', '--documentation',
dest='doc',
default=False,
action='store_true',
help=('Return the documentation for the specified module or for '
'all modules if none are specified.')
)
self.add_option(
'--master',
default='',
dest='master',
help=('Specify the master to use. The minion must be '
'authenticated with the master. If this option is omitted, '
'the master options from the minion config will be used. '
'If multi masters are set up the first listed master that '
'responds will be used.')
)
self.add_option(
'--return',
default='',
metavar='RETURNER',
help=('Set salt-call to pass the return data to one or many '
'returner interfaces.')
)
self.add_option(
'--local',
default=False,
action='store_true',
help='Run salt-call locally, as if there was no master running.'
)
self.add_option(
'--file-root',
default=None,
help='Set this directory as the base file root.'
)
self.add_option(
'--pillar-root',
default=None,
help='Set this directory as the base pillar root.'
)
self.add_option(
'--states-dir',
default=None,
help='Set this directory to search for additional states.'
)
self.add_option(
'--retcode-passthrough',
default=False,
action='store_true',
help=('Exit with the salt call retcode and not the salt binary '
'retcode.')
)
self.add_option(
'--metadata',
default=False,
dest='print_metadata',
action='store_true',
help=('Print out the execution metadata as well as the return. '
'This will print out the outputter data, the return code, '
'etc.')
)
self.add_option(
'--set-metadata',
dest='metadata',
default=None,
metavar='METADATA',
help=('Pass metadata into Salt, used to search jobs.')
)
self.add_option(
'--id',
default='',
dest='id',
help=('Specify the minion id to use. If this option is omitted, '
'the id option from the minion config will be used.')
)
self.add_option(
'--skip-grains',
default=False,
action='store_true',
help=('Do not load grains.')
)
self.add_option(
'--refresh-grains-cache',
default=False,
action='store_true',
help=('Force a refresh of the grains cache.')
)
self.add_option(
'-t', '--timeout',
default=60,
dest='auth_timeout',
type=int,
help=('Change the timeout, if applicable, for the running '
'command. Default: %default.')
)
self.add_option(
'--output-diff',
dest='state_output_diff',
action='store_true',
default=False,
help=('Report only those states that have changed.')
)
def _mixin_after_parsed(self):
if not self.args and not self.options.grains_run and not self.options.doc:
self.print_help()
self.error('Requires function, --grains or --doc')
elif len(self.args) >= 1:
if self.options.grains_run:
self.error('-g/--grains does not accept any arguments')
if self.options.doc and len(self.args) > 1:
self.error('You can only get documentation for one method at one time')
self.config['fun'] = self.args[0]
self.config['arg'] = self.args[1:]
def setup_config(self):
if self.options.proxyid:
opts = config.proxy_config(self.get_config_file_path(configfile='proxy'),
cache_minion_id=True,
minion_id=self.options.proxyid)
else:
opts = config.minion_config(self.get_config_file_path(),
cache_minion_id=True)
return opts
def process_module_dirs(self):
for module_dir in self.options.module_dirs:
# Provide some backwards compatibility with previous comma
# delimited format
if ',' in module_dir:
self.config.setdefault('module_dirs', []).extend(
os.path.abspath(x) for x in module_dir.split(','))
continue
self.config.setdefault('module_dirs',
[]).append(os.path.abspath(module_dir))
class SaltRunOptionParser(six.with_metaclass(OptionParserMeta,
OptionParser,
ConfigDirMixIn,
MergeConfigMixIn,
TimeoutMixIn,
LogLevelMixIn,
HardCrashMixin,
SaltfileMixIn,
OutputOptionsMixIn,
ArgsStdinMixIn,
ProfilingPMixIn,
EAuthMixIn,
NoParseMixin)):
default_timeout = 1
description = (
'salt-run is the frontend command for executing Salt Runners.\n'
'Salt Runners are modules used to execute convenience functions on the Salt Master'
)
usage = '%prog [options] <function> [arguments]'
# ConfigDirMixIn config filename attribute
_config_filename_ = 'master'
# LogLevelMixIn attributes
_default_logging_level_ = config.DEFAULT_MASTER_OPTS['log_level']
_default_logging_logfile_ = config.DEFAULT_MASTER_OPTS['log_file']
def _mixin_setup(self):
self.add_option(
'-d', '--doc', '--documentation',
dest='doc',
default=False,
action='store_true',
help=('Display documentation for runners, pass a runner or '
'runner.function to see documentation on only that runner '
'or function.')
)
self.add_option(
'--async',
default=False,
action='store_true',
help='Start the runner operation and immediately return control.'
)
self.add_option(
'--skip-grains',
default=False,
action='store_true',
help='Do not load grains.'
)
group = self.output_options_group = optparse.OptionGroup(
self, 'Output Options', 'Configure your preferred output format.'
)
self.add_option_group(group)
group.add_option(
'--quiet',
default=False,
action='store_true',
help='Do not display the results of the run.'
)
def _mixin_after_parsed(self):
if self.options.doc and len(self.args) > 1:
self.error('You can only get documentation for one method at one time')
if len(self.args) > 0:
self.config['fun'] = self.args[0]
else:
self.config['fun'] = ''
if len(self.args) > 1:
self.config['arg'] = self.args[1:]
else:
self.config['arg'] = []
def setup_config(self):
return config.client_config(self.get_config_file_path())
class SaltSSHOptionParser(six.with_metaclass(OptionParserMeta,
OptionParser,
ConfigDirMixIn,
MergeConfigMixIn,
LogLevelMixIn,
TargetOptionsMixIn,
OutputOptionsMixIn,
SaltfileMixIn,
HardCrashMixin,
NoParseMixin)):
usage = '%prog [options] \'<target>\' <function> [arguments]'
# ConfigDirMixIn config filename attribute
_config_filename_ = 'master'
# LogLevelMixIn attributes
_logfile_config_setting_name_ = 'ssh_log_file'
_default_logging_level_ = config.DEFAULT_MASTER_OPTS['log_level']
_default_logging_logfile_ = config.DEFAULT_MASTER_OPTS[_logfile_config_setting_name_]
def _mixin_setup(self):
self.add_option(
'-r', '--raw', '--raw-shell',
dest='raw_shell',
default=False,
action='store_true',
help=('Don\'t execute a salt routine on the targets, execute a '
'raw shell command.')
)
self.add_option(
'--roster',
dest='roster',
default='flat',
help=('Define which roster system to use, this defines if a '
'database backend, scanner, or custom roster system is '
'used. Default: \'flat\'.')
)
self.add_option(
'--roster-file',
dest='roster_file',
default='',
help=('Define an alternative location for the default roster '
'file location. The default roster file is called roster '
'and is found in the same directory as the master config '
'file.')
)
self.add_option(
'--refresh', '--refresh-cache',
dest='refresh_cache',
default=False,
action='store_true',
help=('Force a refresh of the master side data cache of the '
'target\'s data. This is needed if a target\'s grains have '
'been changed and the auto refresh timeframe has not been '
'reached.')
)
self.add_option(
'--max-procs',
dest='ssh_max_procs',
default=25,
type=int,
help='Set the number of concurrent minions to communicate with. '
'This value defines how many processes are opened up at a '
'time to manage connections, the more running processes the '
'faster communication should be. Default: %default.'
)
self.add_option(
'--extra-filerefs',
dest='extra_filerefs',
default=None,
help='Pass in extra files to include in the state tarball.'
)
self.add_option('--min-extra-modules',
dest='min_extra_mods', default=None,
help='One or comma-separated list of extra Python modules'
'to be included into Minimal Salt.')
self.add_option(
'--thin-extra-modules',
dest='thin_extra_mods',
default=None,
help='One or comma-separated list of extra Python modules'
'to be included into Thin Salt.')
self.add_option(
'-v', '--verbose',
default=False,
action='store_true',
help='Turn on command verbosity, display jid.'
)
self.add_option(
'-s', '--static',
default=False,
action='store_true',
help='Return the data from minions as a group after they all return.'
)
self.add_option(
'-w', '--wipe',
default=False,
action='store_true',
dest='ssh_wipe',
help='Remove the deployment of the salt files when done executing.',
)
self.add_option(
'-W', '--rand-thin-dir',
default=False,
action='store_true',
help=('Select a random temp dir to deploy on the remote system. '
'The dir will be cleaned after the execution.'))
self.add_option(
'-t', '--regen-thin', '--thin',
dest='regen_thin',
default=False,
action='store_true',
help=('Trigger a thin tarball regeneration. This is needed if '
'custom grains/modules/states have been added or updated.'))
self.add_option(
'--python2-bin',
default='python2',
help='Path to a python2 binary which has salt installed.'
)
self.add_option(
'--python3-bin',
default='python3',
help='Path to a python3 binary which has salt installed.'
)
self.add_option(
'--jid',
default=None,
help='Pass a JID to be used instead of generating one.'
)
ssh_group = optparse.OptionGroup(
self, 'SSH Options',
'Parameters for the SSH client.'
)
ssh_group.add_option(
'--remote-port-forwards',
dest='ssh_remote_port_forwards',
help='Setup remote port forwarding using the same syntax as with '
'the -R parameter of ssh. A comma separated list of port '
'forwarding definitions will be translated into multiple '
'-R parameters.'
)
ssh_group.add_option(
'--ssh-option',
dest='ssh_options',
action='append',
help='Equivalent to the -o ssh command option. Passes options to '
'the SSH client in the format used in the client configuration file. '
'Can be used multiple times.'
)
self.add_option_group(ssh_group)
auth_group = optparse.OptionGroup(
self, 'Authentication Options',
'Parameters affecting authentication.'
)
auth_group.add_option(
'--priv',
dest='ssh_priv',
help='Ssh private key file.'
)
auth_group.add_option(
'--priv-passwd',
dest='ssh_priv_passwd',
default='',
help='Passphrase for ssh private key file.'
)
auth_group.add_option(
'-i',
'--ignore-host-keys',
dest='ignore_host_keys',
default=False,
action='store_true',
help='By default ssh host keys are honored and connections will '
'ask for approval. Use this option to disable '
'StrictHostKeyChecking.'
)
auth_group.add_option(
'--no-host-keys',
dest='no_host_keys',
default=False,
action='store_true',
help='Removes all host key checking functionality from SSH session.'
)
auth_group.add_option(
'--user',
dest='ssh_user',
default='root',
help='Set the default user to attempt to use when '
'authenticating.'
)
auth_group.add_option(
'--passwd',
dest='ssh_passwd',
default='',
help='Set the default password to attempt to use when '
'authenticating.'
)
auth_group.add_option(
'--askpass',
dest='ssh_askpass',
default=False,
action='store_true',
help='Interactively ask for the SSH password with no echo - avoids '
'password in process args and stored in history.'
)
auth_group.add_option(
'--key-deploy',
dest='ssh_key_deploy',
default=False,
action='store_true',
help='Set this flag to attempt to deploy the authorized ssh key '
'with all minions. This combined with --passwd can make '
'initial deployment of keys very fast and easy.'
)
auth_group.add_option(
'--identities-only',
dest='ssh_identities_only',
default=False,
action='store_true',
help='Use the only authentication identity files configured in the '
'ssh_config files. See IdentitiesOnly flag in man ssh_config.'
)
auth_group.add_option(
'--sudo',
dest='ssh_sudo',
default=False,
action='store_true',
help='Run command via sudo.'
)
auth_group.add_option(
'--update-roster',
dest='ssh_update_roster',
default=False,
action='store_true',
help='If hostname is not found in the roster, store the information'
'into the default roster file (flat).'
)
self.add_option_group(auth_group)
scan_group = optparse.OptionGroup(
self, 'Scan Roster Options',
'Parameters affecting scan roster.'
)
scan_group.add_option(
'--scan-ports',
default='22',
dest='ssh_scan_ports',
help='Comma-separated list of ports to scan in the scan roster.',
)
scan_group.add_option(
'--scan-timeout',
default=0.01,
dest='ssh_scan_timeout',
help='Scanning socket timeout for the scan roster.',
)
self.add_option_group(scan_group)
def _mixin_after_parsed(self):
if not self.args:
self.print_help()
self.error('Insufficient arguments')
if self.options.list:
if ',' in self.args[0]:
self.config['tgt'] = self.args[0].split(',')
else:
self.config['tgt'] = self.args[0].split()
else:
self.config['tgt'] = self.args[0]
self.config['argv'] = self.args[1:]
if not self.config['argv'] or not self.config['tgt']:
self.print_help()
self.error('Insufficient arguments')
# Add back the --no-parse options so that shimmed/wrapped commands
# handle the arguments correctly.
if self.options.no_parse:
self.config['argv'].append(
'--no-parse=' + ','.join(self.options.no_parse))
if self.options.ssh_askpass:
self.options.ssh_passwd = getpass.getpass('Password: ')
for group in self.option_groups:
for option in group.option_list:
if option.dest == 'ssh_passwd':
option.explicit = True
break
def setup_config(self):
return config.master_config(self.get_config_file_path())
def process_jid(self):
if self.options.jid is not None:
if not salt.utils.jid.is_jid(self.options.jid):
self.error('\'{0}\' is not a valid JID'.format(self.options.jid))
class SaltCloudParser(six.with_metaclass(OptionParserMeta,
OptionParser,
LogLevelMixIn,
MergeConfigMixIn,
OutputOptionsMixIn,
ConfigDirMixIn,
CloudQueriesMixIn,
ExecutionOptionsMixIn,
CloudProvidersListsMixIn,
CloudCredentialsMixIn,
HardCrashMixin,
SaltfileMixIn)):
description = (
'Salt Cloud is the system used to provision virtual machines on various public\n'
'clouds via a cleanly controlled profile and mapping system'
)
usage = '%prog [options] <-m MAP | -p PROFILE> <NAME> [NAME2 ...]'
# ConfigDirMixIn attributes
_config_filename_ = 'cloud'
# LogLevelMixIn attributes
_default_logging_level_ = config.DEFAULT_CLOUD_OPTS['log_level']
_default_logging_logfile_ = config.DEFAULT_CLOUD_OPTS['log_file']
def print_versions_report(self, file=sys.stdout): # pylint: disable=redefined-builtin
print('\n'.join(version.versions_report(include_salt_cloud=True)),
file=file)
self.exit(salt.defaults.exitcodes.EX_OK)
def parse_args(self, args=None, values=None):
try:
# Late import in order not to break setup
from salt.cloud import libcloudfuncs
libcloudfuncs.check_libcloud_version()
except ImportError as exc:
self.error(exc)
return super(SaltCloudParser, self).parse_args(args, values)
def _mixin_after_parsed(self):
if 'DUMP_SALT_CLOUD_CONFIG' in os.environ:
import pprint
print('Salt Cloud configuration dump (INCLUDES SENSIBLE DATA):')
pprint.pprint(self.config)
self.exit(salt.defaults.exitcodes.EX_OK)
if self.args:
self.config['names'] = self.args
def setup_config(self):
try:
return config.cloud_config(self.get_config_file_path())
except salt.exceptions.SaltCloudConfigError as exc:
self.error(exc)
class SPMParser(six.with_metaclass(OptionParserMeta,
OptionParser,
ConfigDirMixIn,
LogLevelMixIn,
MergeConfigMixIn,
SaltfileMixIn)):
'''
The CLI parser object used to fire up the Salt SPM system.
'''
description = 'SPM is used to manage 3rd party formulas and other Salt components'
usage = '%prog [options] <function> <argument>'
# ConfigDirMixIn config filename attribute
_config_filename_ = 'spm'
# LogLevelMixIn attributes
_logfile_config_setting_name_ = 'spm_logfile'
_default_logging_logfile_ = config.DEFAULT_SPM_OPTS[_logfile_config_setting_name_]
def _mixin_setup(self):
self.add_option(
'-y', '--assume-yes',
default=False,
action='store_true',
help='Default "yes" in answer to all confirmation questions.'
)
self.add_option(
'-f', '--force',
default=False,
action='store_true',
help='Default "yes" in answer to all confirmation questions.'
)
self.add_option(
'-v', '--verbose',
default=False,
action='store_true',
help='Display more detailed information.'
)
def _mixin_after_parsed(self):
# spm needs arguments
if len(self.args) <= 1:
if not self.args or self.args[0] not in ('update_repo',):
self.print_help()
self.error('Insufficient arguments')
def setup_config(self):
return salt.config.spm_config(self.get_config_file_path())
class SaltAPIParser(six.with_metaclass(OptionParserMeta,
OptionParser,
ConfigDirMixIn,
LogLevelMixIn,
DaemonMixIn,
MergeConfigMixIn)):
'''
The CLI parser object used to fire up the Salt API system.
'''
description = (
'The Salt API system manages network API connectors for the Salt Master'
)
# ConfigDirMixIn config filename attribute
_config_filename_ = 'master'
# LogLevelMixIn attributes
_logfile_config_setting_name_ = 'api_logfile'
_default_logging_logfile_ = config.DEFAULT_API_OPTS[_logfile_config_setting_name_]
def setup_config(self):
return salt.config.api_config(self.get_config_file_path()) # pylint: disable=no-member
| 39.277913
| 121
| 0.548733
|
0a20a25b1bd3c040c1463417134adfd5fdc0a8db
| 9,193
|
py
|
Python
|
SiameseZero/SiameseZero_TrainSupport.py
|
hermes2507/Birdsong-Phrase-Clasification
|
fc74cb77113c6f70328c33d9bb4c64de45060d6c
|
[
"MIT"
] | null | null | null |
SiameseZero/SiameseZero_TrainSupport.py
|
hermes2507/Birdsong-Phrase-Clasification
|
fc74cb77113c6f70328c33d9bb4c64de45060d6c
|
[
"MIT"
] | null | null | null |
SiameseZero/SiameseZero_TrainSupport.py
|
hermes2507/Birdsong-Phrase-Clasification
|
fc74cb77113c6f70328c33d9bb4c64de45060d6c
|
[
"MIT"
] | null | null | null |
import numpy as np
from keras.models import Model
from keras.layers import Dense, Activation, Input, Lambda,LSTM,Dropout,Bidirectional, Flatten, Conv2D, Reshape, MaxPooling2D
from keras.initializers import RandomNormal
from sklearn.model_selection import train_test_split
import keras.optimizers as ko
from keras.utils import plot_model
from keras.models import load_model
from keras import backend as K
import tensorflow as tf
import librosa as lb
import librosa.display
import matplotlib.pyplot as plt
from scipy.signal import freqz
import os
import glob
import copy
import seaborn as sns
import re
import pickle
import operator
import IPython.display as ipd
import itertools
import numpy.random as rng
import random
import pandas as pd
#Define Keras Model
def LSTM_branch(input_shape):
input = Input(shape=input_shape)
#x = Lambda(lambda x: tf.cast(x, tf.float32) - 0.5)(input)
#x = Reshape((64,128,1))(input)
#x = Conv2D(32,(5,5),activation='relu',kernel_initializer=RandomNormal(mean=0.0, stddev=1e-2),bias_initializer=RandomNormal(mean=0.5, stddev=1e-2))(x)
#x = MaxPooling2D()(x)
#x = Conv2D(64,(3,3),activation='relu',kernel_initializer=RandomNormal(mean=0.0, stddev=1e-2),bias_initializer=RandomNormal(mean=0.5, stddev=1e-2))(x)
#x = MaxPooling2D()(x)
#x = Conv2D(64,(2,2),activation='relu',kernel_initializer=RandomNormal(mean=0.0, stddev=1e-2),bias_initializer=RandomNormal(mean=0.5, stddev=1e-2))(x)
#x = MaxPooling2D()(x)
#x = Conv2D(128,(2,2),activation='relu',kernel_initializer=RandomNormal(mean=0.0, stddev=1e-2),bias_initializer=RandomNormal(mean=0.5, stddev=1e-2))(x)
x = Flatten()(input)
return Model(input,x,name="CNN")
# Loss and metrics
def euclidean_distance(vects):
x, y = vects
#return K.sqrt(K.sum(K.square(x - y), axis=-1, keepdims=True))
sum_square = K.sum(K.square(x - y), axis=1, keepdims=True) #Original
return K.sqrt(K.maximum(sum_square, K.epsilon())) #Original
def eucl_dist_output_shape(shapes):
shape1, shape2 = shapes
return (shape1[0], 1)
def contrastive_loss(y_true, y_pred):
'''Contrastive loss from Hadsell-et-al.'06
http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
'''
margin = 1
square_pred = K.square(y_pred)
margin_square = K.square(K.maximum(margin - y_pred, 0))
return K.mean(y_true * square_pred + (1 - y_true) * margin_square)
def compute_accuracy(y_true, y_pred):
'''Compute classification accuracy with a fixed threshold on distances.
'''
pred = y_pred.ravel() < 0.5
return np.mean(pred == y_true)
def filter_by_freq(target,threshold):
filtered = dict()
for key in target:
if len(target[key]) >= threshold:
filtered[key] = target[key]
return filtered
#Create support set averaging samples from training of full set
def create_support_set(train_set):
support_set = dict()
for k in train_set.keys():
support_set[k] = np.mean(train_set[k],axis=0)
return support_set
def split_set(new_filtered,train_size):
#Returns train and test set
train = dict()
test = dict()
for k in new_filtered.keys():
train[k],test[k] = train_test_split(new_filtered[k],train_size=train_size)
return train, test
#Generate train set for k-shot learning
def get_batch(dataset,k,n):
"""Create batch of 2*n pairs per class using up to k examples, n same class, n different class"""
pairs = []
labels = []
categories = dataset.keys()
#Create subset of dataset with only k elements per class
k_set = dict()
for cat in categories:
k_set[cat] = random.sample(dataset[cat],k) #Take k samples with no replacement per class
for i in range(n):
for cat in categories:
z1, z2 = random.choice(k_set[cat]), random.choice(k_set[cat])
pairs += [[z1,z2]] #Same class pair
#Pick a a different category than current "cat"
while True:
notcat = random.choice(list(categories))
if(notcat != cat):
break
z1, z2 = random.choice(k_set[cat]), random.choice(k_set[notcat])
pairs += [[z1,z2]] #different class pair
labels += [1, 0] #1 to same pairs, 0 to contrastive
return np.array(pairs), np.array(labels)
# # Load features from all phrases
with open("features_total.pkl", "rb") as input_file:
total_features = pickle.load(input_file)
#Transpose vectors and compute decibels
total_features_db = dict()
for k in total_features.keys():
for i in range(len(total_features[k])):
total_features[k][i] = lb.amplitude_to_db(total_features[k][i],top_db=65.0)
total_features[k][i] = total_features[k][i].astype('int8')
#Get most common phrases
filt_features = filter_by_freq(total_features,12)
total_features = 0
#Create support set from averages
support_set = dict()
for k in filt_features.keys():
support_set[k] = np.mean(filt_features[k],axis=0)
support_set_array = np.array([s for s in list(support_set.values())])
#Create classification set
def create_classif_task(test_set):
classif_test = []
classif_labels = []
#use the full test set
for k in test_set.keys():
for a in test_set[k]:
classif_test.append(a)
classif_labels.append(k)
return (np.array(classif_test),classif_labels)
def get_predictions(support_set,classif_test,model):
predictions = []
support_set_array = np.array([s for s in list(support_set.values())])
classif_test_repeated = np.repeat(classif_test,len(support_set_array),axis=0)
I, L = pd.factorize(list(support_set.keys()))
for k in range(len(classif_test)):
pred_support = model.predict([classif_test_repeated[32*k:32+32*k],support_set_array]).ravel()
pred_class = np.where(pred_support == np.min(pred_support))[0][0]
predictions.append(L[pred_class])
return predictions
def train_model(x,y,labels,epochs):
"Creates, trains and returns trained model"
input_shape = (64,128) #(Timesteps,n_features)
lstm = LSTM_branch(input_shape)
inputA = Input(shape=input_shape,name="InputA")
inputB = Input(shape=input_shape,name="InputB")
encodedA = lstm(inputA)
encodedB = lstm(inputB)
distance = Lambda(euclidean_distance,output_shape=eucl_dist_output_shape,name="distance")([encodedA, encodedB])
model = Model(input=[inputA,inputB],output=distance)
model.compile(optimizer='adam', loss=contrastive_loss)
history = model.fit([x, y],labels,epochs=epochs,batch_size=256,shuffle=True)
return model, history.history['loss']
def generate_sets(k):
#Generate train_test set
train_set, test_set = split_set(filt_features,k)
train_pairs, train_labels = get_batch(train_set,k,1000)
min_phrases_test = min([len(i) for i in test_set.values()])
test_pairs, test_labels = get_batch(test_set,min_phrases_test,100)
te1 = test_pairs[:,0,:,:]
te2 = test_pairs[:,1,:,:]
tr1 = train_pairs[:,0,:,:]
tr2 = train_pairs[:,1,:,:]
return tr1,tr2,train_labels,train_set,te1,te2,test_labels,test_set,train_set
def compute_one_run(k,epochs):
tr1,tr2,train_labels,train_set,te1,te2,test_labels,test_set,train_set = generate_sets(k)
model, history = train_model(tr1,tr2,train_labels,epochs)
#Verification task evaluation (test)
v_pred_te = model.predict([te1,te2])
v_acc_te = compute_accuracy(test_labels,v_pred_te)
#Verification task evaluation (train)
v_pred_tr = model.predict([tr1,tr2])
v_acc_tr = compute_accuracy(train_labels,v_pred_tr)
#Classification task evaluation (test)
classif_test, classif_labels_test = create_classif_task(test_set)
support_set = create_support_set(train_set)
predictions_test = get_predictions(support_set,classif_test,model)
c_acc_te = np.mean([predictions_test[i] == classif_labels_test[i] for i in range(len(predictions_test))])
#Classification task evaluation (train)
classif_train, classif_labels_train = create_classif_task(train_set)
predictions_train = get_predictions(support_set,classif_train,model)
c_acc_tr = np.mean([predictions_train[i] == classif_labels_train[i] for i in range(len(predictions_train))])
#Accuracy per class (test)
acc_c_class_test = dict()
for k in test_set.keys():
k_indices = list(filter(lambda x: classif_labels_test[x] == k, range(len(classif_labels_test))))
acc_c_class_test[k] = np.mean([predictions_test[i] == classif_labels_test[i] for i in k_indices])
#Accuracy per class (train)
acc_c_class_train = dict()
for k in train_set.keys():
k_indices = list(filter(lambda x: classif_labels_train[x] == k, range(len(classif_labels_train))))
acc_c_class_train[k] = np.mean([predictions_train[i] == classif_labels_train[i] for i in k_indices])
return (v_acc_tr,v_acc_te,c_acc_tr,c_acc_te,acc_c_class_train,acc_c_class_test,history)
H = []
n = 30
shots = 7
for i in range(n):
print("Experiment: " + str(i+1) + " from " + str(n))
X = compute_one_run(k=shots,epochs=5)
H.append(X)
K.clear_session()
with open('k'+ str(shots) +'.pickle', 'wb') as f:
pickle.dump(H, f)
| 37.218623
| 153
| 0.703361
|
3d4d65d03d12d8fbd88923a5e303a3761aafda7a
| 1,918
|
py
|
Python
|
src/sanger_sequencing/analysis/sample.py
|
biosustain/sanger-sequencing
|
eefedd79b625062f919910e4b3df8a4c428662c7
|
[
"Apache-2.0"
] | 1
|
2021-10-04T13:46:45.000Z
|
2021-10-04T13:46:45.000Z
|
src/sanger_sequencing/analysis/sample.py
|
biosustain/sanger-sequencing
|
eefedd79b625062f919910e4b3df8a4c428662c7
|
[
"Apache-2.0"
] | null | null | null |
src/sanger_sequencing/analysis/sample.py
|
biosustain/sanger-sequencing
|
eefedd79b625062f919910e4b3df8a4c428662c7
|
[
"Apache-2.0"
] | 3
|
2020-01-16T11:32:46.000Z
|
2021-08-04T01:08:07.000Z
|
# Copyright (c) 2018-2020 Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provide sample analysis functions."""
import logging
from Bio.SeqRecord import SeqRecord
from numpy import arange, array, asarray, nanmedian
from ..config import Configuration
__all__ = ("trim_sample",)
logger = logging.getLogger(__name__)
def trim_sample(seq: SeqRecord) -> (int, SeqRecord, array, int, float):
"""Cut off low quality ends of a Sanger sequencing record."""
logger.debug("Trim sample.")
config = Configuration()
scores = asarray(seq.letter_annotations["phred_quality"])
median = float(nanmedian(scores))
if median < config.threshold:
message = (
f"The median Phred quality ({median}) is below the "
f"required threshold ({config.threshold})."
)
logger.error(message)
raise ValueError(message)
mask = scores >= config.threshold
index = arange(len(mask), dtype=int)
min_i = index[mask][0]
max_i = index[mask][-1] + 1 # Since Python excludes upper range limit.
logger.debug(
"Cutting %d nucleotides at the beginning and %d at the end.",
min_i + 1,
len(seq) - max_i,
)
return (
min_i,
seq[min_i:max_i],
scores[min_i:max_i],
len(seq) - max_i,
median,
)
| 30.444444
| 79
| 0.677268
|
d511848899c5c747e1f4f1e3976b0817e0b5da1c
| 11,347
|
py
|
Python
|
ctypesgen/main.py
|
DJAndries/ctypesgen
|
0bdd4a8d6486bc8e873d5a3a6ee3ebca988ab4d9
|
[
"BSD-2-Clause"
] | 10
|
2017-09-11T22:30:42.000Z
|
2021-03-24T08:35:21.000Z
|
ctypesgen/main.py
|
DJAndries/ctypesgen
|
0bdd4a8d6486bc8e873d5a3a6ee3ebca988ab4d9
|
[
"BSD-2-Clause"
] | 3
|
2017-04-12T06:12:36.000Z
|
2019-11-24T02:04:07.000Z
|
ctypesgen/main.py
|
DJAndries/ctypesgen
|
0bdd4a8d6486bc8e873d5a3a6ee3ebca988ab4d9
|
[
"BSD-2-Clause"
] | 8
|
2017-04-12T05:38:33.000Z
|
2019-08-01T16:22:36.000Z
|
# -*- coding: us-ascii -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
"""
Main loop for ctypesgen.
"""
import optparse, sys
from . import options as core_options
from . import parser as core_parser
from . import printer_python, printer_json, processor
from . import messages as msgs
from . import version
def find_names_in_modules(modules):
names = set()
for module in modules:
try:
mod = __import__(module)
except:
pass
else:
names.update(dir(mod))
return names
def option_callback_W(option, opt, value, parser):
# Options preceded by a "-Wl," are simply treated as though the "-Wl,"
# is not there? I don't understand the purpose of this code...
if len(value) < 4 or value[0:3] != "l,-":
raise optparse.BadOptionError("not in '-Wl,<opt>' form: %s%s" % (opt, value))
opt = value[2:]
if opt not in ["-L", "-R", "--rpath"]:
raise optparse.BadOptionError("-Wl option must be -L, -R" " or --rpath, not " + value[2:])
# Push the linker option onto the list for further parsing.
parser.rargs.insert(0, value)
def option_callback_libdir(option, opt, value, parser):
# There are two sets of linker search paths: those for use at compile time
# and those for use at runtime. Search paths specified with -L, -R, or
# --rpath are added to both sets.
parser.values.compile_libdirs.append(value)
parser.values.runtime_libdirs.append(value)
def main(givenargs=None):
usage = "usage: %prog [options] /path/to/header.h ..."
op = optparse.OptionParser(usage=usage, version=version.VERSION_NUMBER)
# Parameters
op.add_option(
"-o",
"--output",
dest="output",
metavar="FILE",
help="write wrapper to FILE [default stdout]",
)
op.add_option(
"-l",
"--library",
dest="libraries",
action="append",
default=[],
metavar="LIBRARY",
help="link to LIBRARY",
)
op.add_option(
"",
"--include",
dest="other_headers",
action="append",
default=[],
metavar="HEADER",
help="include system header HEADER (e.g. stdio.h or stdlib.h)",
)
op.add_option(
"-m",
"--module",
"--link-module",
action="append",
dest="modules",
metavar="MODULE",
default=[],
help="use symbols from Python module MODULE",
)
op.add_option(
"-I",
"--includedir",
dest="include_search_paths",
action="append",
default=[],
metavar="INCLUDEDIR",
help="add INCLUDEDIR as a directory to search for headers",
)
op.add_option(
"-W",
action="callback",
callback=option_callback_W,
metavar="l,OPTION",
type="str",
help="where OPTION is -L, -R, or --rpath",
)
op.add_option(
"-L",
"-R",
"--rpath",
"--libdir",
action="callback",
callback=option_callback_libdir,
metavar="LIBDIR",
type="str",
help="Add LIBDIR to the search path (both compile-time and run-time)",
)
op.add_option(
"",
"--compile-libdir",
action="append",
dest="compile_libdirs",
metavar="LIBDIR",
default=[],
help="Add LIBDIR to the compile-time library search path.",
)
op.add_option(
"",
"--runtime-libdir",
action="append",
dest="runtime_libdirs",
metavar="LIBDIR",
default=[],
help="Add LIBDIR to the run-time library search path.",
)
# Parser options
op.add_option(
"",
"--cpp",
dest="cpp",
default="gcc -E",
help="The command to invoke the c preprocessor, including any "
"necessary options (default: gcc -E)",
)
op.add_option(
"-D",
"--define",
action="append",
dest="cpp_defines",
metavar="MACRO",
default=[],
help="Add a definition to the preprocessor via commandline",
)
op.add_option(
"-U",
"--undefine",
action="append",
dest="cpp_undefines",
metavar="NAME",
default=[],
help="Instruct the preprocessor to undefine the specified macro via commandline",
)
op.add_option(
"",
"--save-preprocessed-headers",
metavar="FILENAME",
dest="save_preprocessed_headers",
default=None,
help="Save the preprocessed headers to the specified FILENAME",
)
op.add_option(
"",
"--optimize-lexer",
dest="optimize_lexer",
action="store_true",
default=False,
help="Run the lexer in optimized mode. This mode requires write "
"access to lextab.py file stored within the ctypesgen package.",
)
# Processor options
op.add_option(
"-a",
"--all-headers",
action="store_true",
dest="all_headers",
default=False,
help="include symbols from all headers, including system headers",
)
op.add_option(
"",
"--builtin-symbols",
action="store_true",
dest="builtin_symbols",
default=False,
help="include symbols automatically generated by the preprocessor",
)
op.add_option(
"",
"--no-macros",
action="store_false",
dest="include_macros",
default=True,
help="Don't output macros.",
)
op.add_option(
"",
"--no-undefs",
action="store_false",
dest="include_undefs",
default=True,
help="Do not remove macro definitions as per #undef directives",
)
op.add_option(
"-i",
"--include-symbols",
action="append",
dest="include_symbols",
metavar="REGEXPR",
default=[],
help="Regular expression for symbols to always include. Multiple "
"instances of this option will be combined into a single expression "
"doing something like '(expr1|expr2|expr3)'.",
)
op.add_option(
"-x",
"--exclude-symbols",
action="append",
dest="exclude_symbols",
metavar="REGEXPR",
default=[],
help="Regular expression for symbols to exclude. Multiple instances "
"of this option will be combined into a single expression doing "
"something like '(expr1|expr2|expr3)'.",
)
op.add_option(
"",
"--no-stddef-types",
action="store_true",
dest="no_stddef_types",
default=False,
help="Do not support extra C types from stddef.h",
)
op.add_option(
"",
"--no-gnu-types",
action="store_true",
dest="no_gnu_types",
default=False,
help="Do not support extra GNU C types",
)
op.add_option(
"",
"--no-python-types",
action="store_true",
dest="no_python_types",
default=False,
help="Do not support extra C types built in to Python",
)
# Printer options
op.add_option(
"",
"--header-template",
dest="header_template",
default=None,
metavar="TEMPLATE",
help="Use TEMPLATE as the header template in the output file.",
)
op.add_option(
"",
"--strip-build-path",
dest="strip_build_path",
default=None,
metavar="BUILD_PATH",
help="Strip build path from header paths in the wrapper file.",
)
op.add_option(
"",
"--insert-file",
dest="inserted_files",
default=[],
action="append",
metavar="FILENAME",
help="Add the contents of FILENAME to the end of the wrapper file.",
)
op.add_option(
"",
"--output-language",
dest="output_language",
metavar="LANGUAGE",
default="py",
choices=("py", "py32", "py27", "py25", "json"),
help="Choose output language (`py'[default], `py32', `py27', `py25', or "
"`json'). The implementation for py32 does appear to be "
"compatible down to at least Python2.7.15. py25 and py27 are in "
"any case _not_ compatible with >= Python3. The default choice "
"(py) attempts to select `py32', `py27', or `py25' based on the "
"version of Python that runs this script.",
)
op.add_option(
"-P",
"--strip-prefix",
dest="strip_prefixes",
default=[],
action="append",
metavar="REGEXPR",
help="Regular expression to match prefix to strip from all symbols. "
"Multiple instances of this option will be combined into a single "
"expression doing something like '(expr1|expr2|expr3)'.",
)
# Error options
op.add_option(
"",
"--all-errors",
action="store_true",
default=False,
dest="show_all_errors",
help="Display all warnings and errors even " "if they would not affect output.",
)
op.add_option(
"",
"--show-long-errors",
action="store_true",
default=False,
dest="show_long_errors",
help="Display long error messages " "instead of abbreviating error messages.",
)
op.add_option(
"",
"--no-macro-warnings",
action="store_false",
default=True,
dest="show_macro_warnings",
help="Do not print macro warnings.",
)
op.add_option(
"",
"--debug-level",
dest="debug_level",
default=0,
type="int",
help="Run ctypesgen with specified debug level (also applies to yacc parser)",
)
op.set_defaults(**core_options.default_values)
(options, args) = op.parse_args(givenargs)
options.headers = args
# Figure out what names will be defined by imported Python modules
options.other_known_names = find_names_in_modules(options.modules)
# Required parameters
if len(args) < 1:
msgs.error_message("No header files specified", cls="usage")
sys.exit(1)
if len(options.libraries) == 0:
msgs.warning_message("No libraries specified", cls="usage")
# Check output language
printer = None
if options.output_language.startswith("py"):
printer = printer_python.WrapperPrinter
elif options.output_language == "json":
printer = printer_json.WrapperPrinter
else:
msgs.error_message("No such output language `" + options.output_language + "'", cls="usage")
sys.exit(1)
# Step 1: Parse
descriptions = core_parser.parse(options.headers, options)
# Step 2: Process
processor.process(descriptions, options)
# Step 3: Print
printer(options.output, options, descriptions)
msgs.status_message("Wrapping complete.")
# Correct what may be a common mistake
if descriptions.all == []:
if not options.all_headers:
msgs.warning_message(
"There wasn't anything of use in the "
"specified header file(s). Perhaps you meant to run with "
"--all-headers to include objects from included sub-headers? ",
cls="usage",
)
| 28.946429
| 100
| 0.569754
|
370eb3db478526d33abd6ce5881a710bbb6af57f
| 8,780
|
py
|
Python
|
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/web_infrastructure/htpasswd.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/web_infrastructure/htpasswd.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/web_infrastructure/htpasswd.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Nimbis Services, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: htpasswd
short_description: manage user files for basic authentication
description:
- Add and remove username/password entries in a password file using htpasswd.
- This is used by web servers such as Apache and Nginx for basic authentication.
options:
path:
required: true
aliases: [ dest, destfile ]
description:
- Path to the file that contains the usernames and passwords
name:
required: true
aliases: [ username ]
description:
- User name to add or remove
password:
required: false
description:
- Password associated with user.
- Must be specified if user does not exist yet.
crypt_scheme:
required: false
choices: ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"]
default: "apr_md5_crypt"
description:
- Encryption scheme to be used. As well as the four choices listed
here, you can also use any other hash supported by passlib, such as
md5_crypt and sha256_crypt, which are linux passwd hashes. If you
do so the password file will not be compatible with Apache or Nginx
state:
required: false
choices: [ present, absent ]
default: "present"
description:
- Whether the user entry should be present or not
create:
required: false
type: bool
default: "yes"
description:
- Used with C(state=present). If specified, the file will be created
if it does not already exist. If set to "no", will fail if the
file does not exist
notes:
- "This module depends on the I(passlib) Python library, which needs to be installed on all target systems."
- "On Debian, Ubuntu, or Fedora: install I(python-passlib)."
- "On RHEL or CentOS: Enable EPEL, then install I(python-passlib)."
requirements: [ passlib>=1.6 ]
author: "Ansible Core Team"
extends_documentation_fragment: files
'''
EXAMPLES = """
# Add a user to a password file and ensure permissions are set
- htpasswd:
path: /etc/nginx/passwdfile
name: janedoe
password: '9s36?;fyNp'
owner: root
group: www-data
mode: 0640
# Remove a user from a password file
- htpasswd:
path: /etc/apache2/passwdfile
name: foobar
state: absent
# Add a user to a password file suitable for use by libpam-pwdfile
- htpasswd:
path: /etc/mail/passwords
name: alex
password: oedu2eGh
crypt_scheme: md5_crypt
"""
import os
import tempfile
import traceback
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
PASSLIB_IMP_ERR = None
try:
from passlib.apache import HtpasswdFile, htpasswd_context
from passlib.context import CryptContext
import passlib
except ImportError:
PASSLIB_IMP_ERR = traceback.format_exc()
passlib_installed = False
else:
passlib_installed = True
apache_hashes = ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"]
def create_missing_directories(dest):
destpath = os.path.dirname(dest)
if not os.path.exists(destpath):
os.makedirs(destpath)
def present(dest, username, password, crypt_scheme, create, check_mode):
""" Ensures user is present
Returns (msg, changed) """
if crypt_scheme in apache_hashes:
context = htpasswd_context
else:
context = CryptContext(schemes=[crypt_scheme] + apache_hashes)
if not os.path.exists(dest):
if not create:
raise ValueError('Destination %s does not exist' % dest)
if check_mode:
return ("Create %s" % dest, True)
create_missing_directories(dest)
if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme, context=context)
else:
ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme, context=context)
if getattr(ht, 'set_password', None):
ht.set_password(username, password)
else:
ht.update(username, password)
ht.save()
return ("Created %s and added %s" % (dest, username), True)
else:
if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme, context=context)
else:
ht = HtpasswdFile(dest, default=crypt_scheme, context=context)
found = None
if getattr(ht, 'check_password', None):
found = ht.check_password(username, password)
else:
found = ht.verify(username, password)
if found:
return ("%s already present" % username, False)
else:
if not check_mode:
if getattr(ht, 'set_password', None):
ht.set_password(username, password)
else:
ht.update(username, password)
ht.save()
return ("Add/update %s" % username, True)
def absent(dest, username, check_mode):
""" Ensures user is absent
Returns (msg, changed) """
if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
ht = HtpasswdFile(dest, new=False)
else:
ht = HtpasswdFile(dest)
if username not in ht.users():
return ("%s not present" % username, False)
else:
if not check_mode:
ht.delete(username)
ht.save()
return ("Remove %s" % username, True)
def check_file_attrs(module, changed, message):
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
if changed:
message += " and "
changed = True
message += "ownership, perms or SE linux context changed"
return message, changed
def main():
arg_spec = dict(
path=dict(required=True, aliases=["dest", "destfile"]),
name=dict(required=True, aliases=["username"]),
password=dict(required=False, default=None, no_log=True),
crypt_scheme=dict(required=False, default="apr_md5_crypt"),
state=dict(required=False, default="present"),
create=dict(type='bool', default='yes'),
)
module = AnsibleModule(argument_spec=arg_spec,
add_file_common_args=True,
supports_check_mode=True)
path = module.params['path']
username = module.params['name']
password = module.params['password']
crypt_scheme = module.params['crypt_scheme']
state = module.params['state']
create = module.params['create']
check_mode = module.check_mode
if not passlib_installed:
module.fail_json(msg=missing_required_lib("passlib"), exception=PASSLIB_IMP_ERR)
# Check file for blank lines in effort to avoid "need more than 1 value to unpack" error.
try:
f = open(path, "r")
except IOError:
# No preexisting file to remove blank lines from
f = None
else:
try:
lines = f.readlines()
finally:
f.close()
# If the file gets edited, it returns true, so only edit the file if it has blank lines
strip = False
for line in lines:
if not line.strip():
strip = True
break
if strip:
# If check mode, create a temporary file
if check_mode:
temp = tempfile.NamedTemporaryFile()
path = temp.name
f = open(path, "w")
try:
[f.write(line) for line in lines if line.strip()]
finally:
f.close()
try:
if state == 'present':
(msg, changed) = present(path, username, password, crypt_scheme, create, check_mode)
elif state == 'absent':
if not os.path.exists(path):
module.exit_json(msg="%s not present" % username,
warnings="%s does not exist" % path, changed=False)
(msg, changed) = absent(path, username, check_mode)
else:
module.fail_json(msg="Invalid state: %s" % state)
check_file_attrs(module, changed, msg)
module.exit_json(msg=msg, changed=changed)
except Exception as e:
module.fail_json(msg=to_native(e))
if __name__ == '__main__':
main()
| 31.927273
| 110
| 0.633371
|
0295f272be893159818a933ae3333e5fb7932f24
| 231
|
py
|
Python
|
inn/inn_hotels/doctype/inn_folio_transaction_bundle/test_inn_folio_transaction_bundle.py
|
vinhnguyent090/front-desk
|
7384642e9206e30855986465a7ef63c8fd76ef2a
|
[
"MIT"
] | 4
|
2021-08-19T03:33:36.000Z
|
2021-08-28T16:37:52.000Z
|
inn/inn_hotels/doctype/inn_folio_transaction_bundle/test_inn_folio_transaction_bundle.py
|
vinhnguyent090/front-desk
|
7384642e9206e30855986465a7ef63c8fd76ef2a
|
[
"MIT"
] | 98
|
2020-02-24T08:12:47.000Z
|
2021-08-21T07:54:03.000Z
|
inn/inn_hotels/doctype/inn_folio_transaction_bundle/test_inn_folio_transaction_bundle.py
|
vinhnguyent090/front-desk
|
7384642e9206e30855986465a7ef63c8fd76ef2a
|
[
"MIT"
] | 13
|
2021-01-24T18:08:43.000Z
|
2022-03-29T09:23:25.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Core Initiative and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestInnFolioTransactionBundle(unittest.TestCase):
pass
| 21
| 55
| 0.787879
|
b0e199dbedb2b04acd8090c8cf56fc47233d0c06
| 2,212
|
py
|
Python
|
setup.py
|
hroncok/towncrier
|
2841c12f2e7ebfc40d9cc3228ca468d339225549
|
[
"MIT"
] | null | null | null |
setup.py
|
hroncok/towncrier
|
2841c12f2e7ebfc40d9cc3228ca468d339225549
|
[
"MIT"
] | null | null | null |
setup.py
|
hroncok/towncrier
|
2841c12f2e7ebfc40d9cc3228ca468d339225549
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function
from setuptools import setup, find_packages
# If incremental is not present then setuptools just silently uses v0.0.0 so
# let's import it and fail instead.
import incremental
setup(
name="towncrier",
maintainer="Amber Brown",
maintainer_email="hawkowl@twistedmatrix.com",
url="https://github.com/hawkowl/towncrier",
project_urls={
"Chat": "https://webchat.freenode.net/?channels=%23twisted",
"Maillist": "https://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-python",
"Issues": "https://github.com/twisted/towncrier/issues",
"Repository": "https://github.com/twisted/towncrier",
"Tests": "https://github.com/twisted/towncrier/actions?query=branch%3Amaster",
"Coverage": "https://codecov.io/gh/twisted/towncrier",
"Distribution": "https://pypi.org/project/towncrier",
},
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
use_incremental=True,
install_requires=["click", "click-default-group", "incremental", "jinja2", "toml"],
extras_require={"dev": ["packaging"]},
package_dir={"": "src"},
packages=find_packages("src"),
license="MIT",
zip_safe=False,
include_package_data=True,
description="Building newsfiles for your project.",
long_description=open("README.rst").read(),
entry_points={"console_scripts": ["towncrier = towncrier._shell:cli"]},
)
| 40.218182
| 88
| 0.648282
|
58a1b656563adee8716d20abc2619d93df54a083
| 37,908
|
py
|
Python
|
sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze_sentiment_async.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | 1
|
2022-01-24T08:54:57.000Z
|
2022-01-24T08:54:57.000Z
|
sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze_sentiment_async.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | null | null | null |
sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze_sentiment_async.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | null | null | null |
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import os
import pytest
import platform
import functools
import json
from azure.core.exceptions import HttpResponseError, ClientAuthenticationError
from azure.core.credentials import AzureKeyCredential
from azure.ai.textanalytics.aio import TextAnalyticsClient
from azure.ai.textanalytics import (
VERSION,
DetectLanguageInput,
TextDocumentInput,
TextAnalyticsApiVersion,
)
from testcase import TextAnalyticsPreparer
from testcase import TextAnalyticsClientPreparer as _TextAnalyticsClientPreparer
from devtools_testutils.aio import recorded_by_proxy_async
from testcase import TextAnalyticsTest
# pre-apply the client_cls positional argument so it needn't be explicitly passed below
TextAnalyticsClientPreparer = functools.partial(_TextAnalyticsClientPreparer, TextAnalyticsClient)
class TestAnalyzeSentiment(TextAnalyticsTest):
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_no_single_input(self, client):
with pytest.raises(TypeError):
response = await client.analyze_sentiment("hello world")
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_all_successful_passing_dict(self, client):
docs = [{"id": "1", "language": "en", "text": "Microsoft was founded by Bill Gates and Paul Allen."},
{"id": "2", "language": "en", "text": "I did not like the hotel we stayed at. It was too expensive."},
{"id": "3", "language": "en", "text": "The restaurant had really good food. I recommend you try it."}]
response = await client.analyze_sentiment(docs, show_stats=True)
assert response[0].sentiment == "neutral"
assert response[1].sentiment == "negative"
assert response[2].sentiment == "positive"
for doc in response:
assert doc.id is not None
assert doc.statistics is not None
self.validateConfidenceScores(doc.confidence_scores)
assert doc.sentences is not None
assert len(response[0].sentences) == 1
assert response[0].sentences[0].text == "Microsoft was founded by Bill Gates and Paul Allen."
assert len(response[1].sentences) == 2
# assert response[1].sentences[0].text == "I did not like the hotel we stayed at." FIXME https://msazure.visualstudio.com/Cognitive%20Services/_workitems/edit/13848227
assert response[1].sentences[1].text == "It was too expensive."
assert len(response[2].sentences) == 2
# assert response[2].sentences[0].text == "The restaurant had really good food." FIXME https://msazure.visualstudio.com/Cognitive%20Services/_workitems/edit/13848227
assert response[2].sentences[1].text == "I recommend you try it."
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_all_successful_passing_text_document_input(self, client):
docs = [
TextDocumentInput(id="1", text="Microsoft was founded by Bill Gates and Paul Allen."),
TextDocumentInput(id="2", text="I did not like the hotel we stayed at. It was too expensive."),
TextDocumentInput(id="3", text="The restaurant had really good food. I recommend you try it."),
]
response = await client.analyze_sentiment(docs)
assert response[0].sentiment == "neutral"
assert response[1].sentiment == "negative"
assert response[2].sentiment == "positive"
for doc in response:
self.validateConfidenceScores(doc.confidence_scores)
assert doc.sentences is not None
assert len(response[0].sentences) == 1
assert response[0].sentences[0].text == "Microsoft was founded by Bill Gates and Paul Allen."
assert len(response[1].sentences) == 2
# assert response[1].sentences[0].text == "I did not like the hotel we stayed at." FIXME https://msazure.visualstudio.com/Cognitive%20Services/_workitems/edit/13848227
assert response[1].sentences[1].text == "It was too expensive."
assert len(response[2].sentences) == 2
# assert response[2].sentences[0].text == "The restaurant had really good food." FIXME https://msazure.visualstudio.com/Cognitive%20Services/_workitems/edit/13848227
assert response[2].sentences[1].text == "I recommend you try it."
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_passing_only_string(self, client):
docs = [
"Microsoft was founded by Bill Gates and Paul Allen.",
"I did not like the hotel we stayed at. It was too expensive.",
"The restaurant had really good food. I recommend you try it.",
""
]
response = await client.analyze_sentiment(docs)
assert response[0].sentiment == "neutral"
assert response[1].sentiment == "negative"
assert response[2].sentiment == "positive"
assert response[3].is_error
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_input_with_some_errors(self, client):
docs = [{"id": "1", "language": "en", "text": ""},
{"id": "2", "language": "english", "text": "I did not like the hotel we stayed at. It was too expensive."},
{"id": "3", "language": "en", "text": "The restaurant had really good food. I recommend you try it."}]
response = await client.analyze_sentiment(docs)
assert response[0].is_error
assert response[1].is_error
assert not response[2].is_error
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_input_with_all_errors(self, client):
docs = [{"id": "1", "language": "en", "text": ""},
{"id": "2", "language": "english", "text": "I did not like the hotel we stayed at. It was too expensive."},
{"id": "3", "language": "en", "text": ""}]
response = await client.analyze_sentiment(docs)
assert response[0].is_error
assert response[1].is_error
assert response[2].is_error
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_too_many_documents(self, client):
docs = ["One", "Two", "Three", "Four", "Five", "Six", "Seven", "Eight", "Nine", "Ten", "Eleven"]
with pytest.raises(HttpResponseError) as excinfo:
await client.analyze_sentiment(docs)
assert excinfo.value.status_code == 400
assert excinfo.value.error.code == "InvalidDocumentBatch"
assert "Batch request contains too many records" in str(excinfo.value)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_output_same_order_as_input(self, client):
docs = [
TextDocumentInput(id="1", text="one"),
TextDocumentInput(id="2", text="two"),
TextDocumentInput(id="3", text="three"),
TextDocumentInput(id="4", text="four"),
TextDocumentInput(id="5", text="five")
]
response = await client.analyze_sentiment(docs)
for idx, doc in enumerate(response):
assert str(idx + 1) == doc.id
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"textanalytics_test_api_key": ""})
@recorded_by_proxy_async
async def test_empty_credential_class(self, client):
with pytest.raises(ClientAuthenticationError):
response = await client.analyze_sentiment(
["This is written in English."]
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"textanalytics_test_api_key": "xxxxxxxxxxxx"})
@recorded_by_proxy_async
async def test_bad_credentials(self, client):
with pytest.raises(ClientAuthenticationError):
response = await client.analyze_sentiment(
["This is written in English."]
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_bad_document_input(self, client):
docs = "This is the wrong type"
with pytest.raises(TypeError):
response = await client.analyze_sentiment(docs)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_mixing_inputs(self, client):
docs = [
{"id": "1", "text": "Microsoft was founded by Bill Gates and Paul Allen."},
TextDocumentInput(id="2", text="I did not like the hotel we stayed at. It was too expensive."),
"You cannot mix string input with the above inputs"
]
with pytest.raises(TypeError):
response = await client.analyze_sentiment(docs)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_out_of_order_ids(self, client):
docs = [{"id": "56", "text": ":)"},
{"id": "0", "text": ":("},
{"id": "22", "text": ""},
{"id": "19", "text": ":P"},
{"id": "1", "text": ":D"}]
response = await client.analyze_sentiment(docs)
in_order = ["56", "0", "22", "19", "1"]
for idx, resp in enumerate(response):
assert resp.id == in_order[idx]
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_show_stats_and_model_version(self, client):
def callback(response):
assert response is not None
assert response.model_version
assert response.raw_response is not None
assert response.statistics.document_count == 5
assert response.statistics.transaction_count == 4
assert response.statistics.valid_document_count == 4
assert response.statistics.erroneous_document_count == 1
docs = [{"id": "56", "text": ":)"},
{"id": "0", "text": ":("},
{"id": "22", "text": ""},
{"id": "19", "text": ":P"},
{"id": "1", "text": ":D"}]
response = await client.analyze_sentiment(
docs,
show_stats=True,
model_version="latest",
raw_response_hook=callback
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_batch_size_over_limit(self, client):
docs = ["hello world"] * 1050
with pytest.raises(HttpResponseError):
response = await client.analyze_sentiment(docs)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_whole_batch_language_hint(self, client):
def callback(resp):
language_str = "\"language\": \"fr\""
language = resp.http_request.body.count(language_str)
assert language == 3
docs = [
"This was the best day of my life.",
"I did not like the hotel we stayed at. It was too expensive.",
"The restaurant was not as good as I hoped."
]
response = await client.analyze_sentiment(docs, language="fr", raw_response_hook=callback)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_whole_batch_dont_use_language_hint(self, client):
def callback(resp):
language_str = "\"language\": \"\""
language = resp.http_request.body.count(language_str)
assert language == 3
docs = [
"This was the best day of my life.",
"I did not like the hotel we stayed at. It was too expensive.",
"The restaurant was not as good as I hoped."
]
response = await client.analyze_sentiment(docs, language="", raw_response_hook=callback)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_per_item_dont_use_language_hint(self, client):
def callback(resp):
language_str = "\"language\": \"\""
language = resp.http_request.body.count(language_str)
assert language == 2
language_str = "\"language\": \"en\""
language = resp.http_request.body.count(language_str)
assert language == 1
docs = [{"id": "1", "language": "", "text": "I will go to the park."},
{"id": "2", "language": "", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
response = await client.analyze_sentiment(docs, raw_response_hook=callback)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_whole_batch_language_hint_and_obj_input(self, client):
def callback(resp):
language_str = "\"language\": \"de\""
language = resp.http_request.body.count(language_str)
assert language == 3
docs = [
TextDocumentInput(id="1", text="I should take my cat to the veterinarian."),
TextDocumentInput(id="4", text="Este es un document escrito en Español."),
TextDocumentInput(id="3", text="猫は幸せ"),
]
response = await client.analyze_sentiment(docs, language="de", raw_response_hook=callback)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_whole_batch_language_hint_and_dict_input(self, client):
def callback(resp):
language_str = "\"language\": \"es\""
language = resp.http_request.body.count(language_str)
assert language == 3
docs = [{"id": "1", "text": "I will go to the park."},
{"id": "2", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
response = await client.analyze_sentiment(docs, language="es", raw_response_hook=callback)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_whole_batch_language_hint_and_obj_per_item_hints(self, client):
def callback(resp):
language_str = "\"language\": \"es\""
language = resp.http_request.body.count(language_str)
assert language == 2
language_str = "\"language\": \"en\""
language = resp.http_request.body.count(language_str)
assert language == 1
docs = [
TextDocumentInput(id="1", text="I should take my cat to the veterinarian.", language="es"),
TextDocumentInput(id="2", text="Este es un document escrito en Español.", language="es"),
TextDocumentInput(id="3", text="猫は幸せ"),
]
response = await client.analyze_sentiment(docs, language="en", raw_response_hook=callback)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_whole_batch_language_hint_and_dict_per_item_hints(self, client):
def callback(resp):
language_str = "\"language\": \"es\""
language = resp.http_request.body.count(language_str)
assert language == 2
language_str = "\"language\": \"en\""
language = resp.http_request.body.count(language_str)
assert language == 1
docs = [{"id": "1", "language": "es", "text": "I will go to the park."},
{"id": "2", "language": "es", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
response = await client.analyze_sentiment(docs, language="en", raw_response_hook=callback)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"default_language": "es"})
@recorded_by_proxy_async
async def test_client_passed_default_language_hint(self, client):
def callback(resp):
language_str = "\"language\": \"es\""
language = resp.http_request.body.count(language_str)
assert language == 3
def callback_2(resp):
language_str = "\"language\": \"en\""
language = resp.http_request.body.count(language_str)
assert language == 3
docs = [{"id": "1", "text": "I will go to the park."},
{"id": "2", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
response = await client.analyze_sentiment(docs, raw_response_hook=callback)
response = await client.analyze_sentiment(docs, language="en", raw_response_hook=callback_2)
response = await client.analyze_sentiment(docs, raw_response_hook=callback)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_invalid_language_hint_method(self, client):
response = await client.analyze_sentiment(
["This should fail because we're passing in an invalid language hint"], language="notalanguage"
)
assert response[0].error.code == 'UnsupportedLanguageCode'
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_invalid_language_hint_docs(self, client):
response = await client.analyze_sentiment(
[{"id": "1", "language": "notalanguage", "text": "This should fail because we're passing in an invalid language hint"}]
)
assert response[0].error.code == 'UnsupportedLanguageCode'
@TextAnalyticsPreparer()
@recorded_by_proxy_async
async def test_rotate_subscription_key(self, textanalytics_test_endpoint, textanalytics_test_api_key):
credential = AzureKeyCredential(textanalytics_test_api_key)
client = TextAnalyticsClient(textanalytics_test_endpoint, credential)
docs = [{"id": "1", "text": "I will go to the park."},
{"id": "2", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
response = await client.analyze_sentiment(docs)
assert response is not None
credential.update("xxx") # Make authentication fail
with pytest.raises(ClientAuthenticationError):
response = await client.analyze_sentiment(docs)
credential.update(textanalytics_test_api_key) # Authenticate successfully again
response = await client.analyze_sentiment(docs)
assert response is not None
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_user_agent(self, client):
def callback(resp):
assert "azsdk-python-ai-textanalytics/{} Python/{} ({})".format(
VERSION, platform.python_version(), platform.platform()) in \
resp.http_request.headers["User-Agent"]
docs = [{"id": "1", "text": "I will go to the park."},
{"id": "2", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
response = await client.analyze_sentiment(docs, raw_response_hook=callback)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_document_attribute_error_no_result_attribute(self, client):
docs = [{"id": "1", "text": ""}]
response = await client.analyze_sentiment(docs)
# Attributes on DocumentError
assert response[0].is_error
assert response[0].id == "1"
assert response[0].error is not None
# Result attribute not on DocumentError, custom error message
try:
sentiment = response[0].sentiment
except AttributeError as custom_error:
assert custom_error.args[0] == \
'\'DocumentError\' object has no attribute \'sentiment\'. ' \
'The service was unable to process this document:\nDocument Id: 1\nError: ' \
'InvalidDocument - Document text is empty.\n'
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_document_attribute_error_nonexistent_attribute(self, client):
docs = [{"id": "1", "text": ""}]
response = await client.analyze_sentiment(docs)
# Attribute not found on DocumentError or result obj, default behavior/message
try:
sentiment = response[0].attribute_not_on_result_or_error
except AttributeError as default_behavior:
assert default_behavior.args[0] == '\'DocumentError\' object has no attribute \'attribute_not_on_result_or_error\''
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_bad_model_version_error(self, client):
docs = [{"id": "1", "language": "english", "text": "I did not like the hotel we stayed at."}]
try:
result = await client.analyze_sentiment(docs, model_version="bad")
except HttpResponseError as err:
assert err.error.code == "ModelVersionIncorrect"
assert err.error.message is not None
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_document_errors(self, client):
text = ""
for _ in range(5121):
text += "x"
docs = [{"id": "1", "text": ""},
{"id": "2", "language": "english", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": text}]
doc_errors = await client.analyze_sentiment(docs)
assert doc_errors[0].error.code == "InvalidDocument"
assert doc_errors[0].error.message is not None
assert doc_errors[1].error.code == "UnsupportedLanguageCode"
assert doc_errors[1].error.message is not None
assert doc_errors[2].error.code == "InvalidDocument"
assert doc_errors[2].error.message is not None
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_document_warnings(self, client):
# No warnings actually returned for analyze_sentiment. Will update when they add
docs = [
{"id": "1", "text": "This won't actually create a warning :'("},
]
result = await client.analyze_sentiment(docs)
for doc in result:
doc_warnings = doc.warnings
assert len(doc_warnings) == 0
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_not_passing_list_for_docs(self, client):
docs = {"id": "1", "text": "hello world"}
with pytest.raises(TypeError) as excinfo:
await client.analyze_sentiment(docs)
assert "Input documents cannot be a dict" in str(excinfo.value)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_missing_input_records_error(self, client):
docs = []
with pytest.raises(ValueError) as excinfo:
await client.analyze_sentiment(docs)
assert "Input documents can not be empty or None" in str(excinfo.value)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_passing_none_docs(self, client):
with pytest.raises(ValueError) as excinfo:
await client.analyze_sentiment(None)
assert "Input documents can not be empty or None" in str(excinfo.value)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_duplicate_ids_error(self, client):
# Duplicate Ids
docs = [{"id": "1", "text": "hello world"},
{"id": "1", "text": "I did not like the hotel we stayed at."}]
try:
result = await client.analyze_sentiment(docs)
except HttpResponseError as err:
assert err.error.code == "InvalidDocument"
assert err.error.message is not None
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_batch_size_over_limit_error(self, client):
# Batch size over limit
docs = ["hello world"] * 1001
try:
response = await client.analyze_sentiment(docs)
except HttpResponseError as err:
assert err.error.code == "InvalidDocumentBatch"
assert err.error.message is not None
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_language_kwarg_spanish(self, client):
def callback(response):
language_str = "\"language\": \"es\""
assert response.http_request.body.count(language_str) == 1
assert response.model_version is not None
assert response.statistics is not None
res = await client.analyze_sentiment(
documents=["Bill Gates is the CEO of Microsoft."],
model_version="latest",
show_stats=True,
language="es",
raw_response_hook=callback
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_pass_cls(self, client):
def callback(pipeline_response, deserialized, _):
return "cls result"
res = await client.analyze_sentiment(
documents=["Test passing cls to endpoint"],
cls=callback
)
assert res == "cls result"
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_opinion_mining(self, client):
documents = [
"It has a sleek premium aluminum design that makes it beautiful to look at."
]
document = (await client.analyze_sentiment(documents=documents, show_opinion_mining=True))[0]
for sentence in document.sentences:
for mined_opinion in sentence.mined_opinions:
target = mined_opinion.target
assert 'design' == target.text
assert 'positive' == target.sentiment
assert 0.0 == target.confidence_scores.neutral
self.validateConfidenceScores(target.confidence_scores)
assert 32 == target.offset
sleek_opinion = mined_opinion.assessments[0]
assert 'sleek' == sleek_opinion.text
assert 'positive' == sleek_opinion.sentiment
assert 0.0 == sleek_opinion.confidence_scores.neutral
self.validateConfidenceScores(sleek_opinion.confidence_scores)
assert 9 == sleek_opinion.offset
assert not sleek_opinion.is_negated
# FIXME https://msazure.visualstudio.com/Cognitive%20Services/_workitems/edit/13848227
# premium_opinion = mined_opinion.assessments[1]
# assert 'premium' == premium_opinion.text
# assert 'positive' == premium_opinion.sentiment
# assert 0.0 == premium_opinion.confidence_scores.neutral
# self.validateConfidenceScores(premium_opinion.confidence_scores)
# assert 15 == premium_opinion.offset
# assert not premium_opinion.is_negated
beautiful_opinion = mined_opinion.assessments[1]
assert 'beautiful' == beautiful_opinion.text
assert 'positive' == beautiful_opinion.sentiment
assert 1.0 == beautiful_opinion.confidence_scores.positive
self.validateConfidenceScores(beautiful_opinion.confidence_scores)
assert 53 == beautiful_opinion.offset
assert not beautiful_opinion.is_negated
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_opinion_mining_with_negated_opinion(self, client):
documents = [
"The food and service is not good"
]
document = (await client.analyze_sentiment(documents=documents, show_opinion_mining=True))[0]
for sentence in document.sentences:
food_target = sentence.mined_opinions[0].target
service_target = sentence.mined_opinions[1].target
assert 'food' == food_target.text
assert 'negative' == food_target.sentiment
assert 0.0 == food_target.confidence_scores.neutral
self.validateConfidenceScores(food_target.confidence_scores)
assert 4 == food_target.offset
assert 'service' == service_target.text
# assert 'negative' == service_target.sentiment FIXME https://msazure.visualstudio.com/Cognitive%20Services/_workitems/edit/13848227
assert 0.0 == service_target.confidence_scores.neutral
self.validateConfidenceScores(service_target.confidence_scores)
assert 13 == service_target.offset
food_opinion = sentence.mined_opinions[0].assessments[0]
service_opinion = sentence.mined_opinions[1].assessments[0]
self.assertOpinionsEqual(food_opinion, service_opinion)
assert 'good' == food_opinion.text
assert 'negative' == food_opinion.sentiment
assert 0.0 == food_opinion.confidence_scores.neutral
self.validateConfidenceScores(food_opinion.confidence_scores)
assert 28 == food_opinion.offset
assert food_opinion.is_negated
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_opinion_mining_more_than_5_documents(self, client):
documents = [
"The food was unacceptable",
"The rooms were beautiful. The AC was good and quiet.",
"The breakfast was good, but the toilet was smelly.",
"Loved this hotel - good breakfast - nice shuttle service - clean rooms.",
"I had a great unobstructed view of the Microsoft campus.",
"Nice rooms but bathrooms were old and the toilet was dirty when we arrived.",
"The toilet smelled."
]
analyzed_documents = await client.analyze_sentiment(documents, show_opinion_mining=True)
doc_5 = analyzed_documents[5]
doc_6 = analyzed_documents[6]
doc_5_opinions = [
opinion.text
for sentence in doc_5.sentences
for mined_opinion in sentence.mined_opinions
for opinion in mined_opinion.assessments
]
doc_6_opinions = [
opinion.text
for sentence in doc_6.sentences
for mined_opinion in sentence.mined_opinions
for opinion in mined_opinion.assessments
]
assert doc_5_opinions == ["Nice", "old", "dirty"]
assert doc_6_opinions == ["smelled"]
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_opinion_mining_no_mined_opinions(self, client):
document = (await client.analyze_sentiment(documents=["today is a hot day"], show_opinion_mining=True))[0]
assert not document.sentences[0].mined_opinions
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"api_version": TextAnalyticsApiVersion.V3_0})
async def test_opinion_mining_v3(self, **kwargs):
client = kwargs.pop("client")
with pytest.raises(ValueError) as excinfo:
await client.analyze_sentiment(["will fail"], show_opinion_mining=True)
assert "'show_opinion_mining' is only available for API version v3.1 and up" in str(excinfo.value)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy_async
async def test_offset(self, client):
result = await client.analyze_sentiment(["I like nature. I do not like being inside"])
sentences = result[0].sentences
assert sentences[0].offset == 0
assert sentences[1].offset == 15
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"api_version": TextAnalyticsApiVersion.V3_0})
@recorded_by_proxy_async
async def test_no_offset_v3_sentence_sentiment(self, client):
result = await client.analyze_sentiment(["I like nature. I do not like being inside"])
sentences = result[0].sentences
assert sentences[0].offset is None
assert sentences[1].offset is None
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"api_version": TextAnalyticsApiVersion.V3_0})
@recorded_by_proxy_async
async def test_string_index_type_not_fail_v3(self, client):
# make sure that the addition of the string_index_type kwarg for v3.1-preview.1 doesn't
# cause v3.0 calls to fail
await client.analyze_sentiment(["please don't fail"])
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"api_version": TextAnalyticsApiVersion.V3_1})
@recorded_by_proxy_async
async def test_default_string_index_type_is_UnicodeCodePoint(self, client):
def callback(response):
assert response.http_request.query["stringIndexType"] == "UnicodeCodePoint"
res = await client.analyze_sentiment(
documents=["Hello world"],
raw_response_hook=callback
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"api_version": TextAnalyticsApiVersion.V2022_04_01_PREVIEW})
@recorded_by_proxy_async
async def test_default_string_index_type_UnicodeCodePoint_body_param(self, client):
def callback(response):
assert json.loads(response.http_request.body)['parameters']["stringIndexType"] == "UnicodeCodePoint"
res = await client.analyze_sentiment(
documents=["Hello world"],
raw_response_hook=callback
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"api_version": TextAnalyticsApiVersion.V3_1})
@recorded_by_proxy_async
async def test_explicit_set_string_index_type(self, client):
def callback(response):
assert response.http_request.query["stringIndexType"] == "TextElement_v8"
res = await client.analyze_sentiment(
documents=["Hello world"],
string_index_type="TextElement_v8",
raw_response_hook=callback
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"api_version": TextAnalyticsApiVersion.V2022_04_01_PREVIEW})
@recorded_by_proxy_async
async def test_explicit_set_string_index_type_body_param(self, client):
def callback(response):
assert json.loads(response.http_request.body)['parameters']["stringIndexType"] == "TextElements_v8"
res = await client.analyze_sentiment(
documents=["Hello world"],
string_index_type="TextElement_v8",
raw_response_hook=callback
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"api_version": TextAnalyticsApiVersion.V3_1})
@recorded_by_proxy_async
async def test_disable_service_logs(self, client):
def callback(resp):
assert resp.http_request.query['loggingOptOut']
await client.analyze_sentiment(
documents=["Test for logging disable"],
disable_service_logs=True,
raw_response_hook=callback,
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"api_version": TextAnalyticsApiVersion.V2022_04_01_PREVIEW})
@recorded_by_proxy_async
async def test_disable_service_logs_body_param(self, client):
def callback(resp):
assert json.loads(resp.http_request.body)['parameters']['loggingOptOut']
await client.analyze_sentiment(
documents=["Test for logging disable"],
disable_service_logs=True,
raw_response_hook=callback,
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"api_version": "v3.0"})
async def test_sentiment_multiapi_validate_args_v3_0(self, **kwargs):
client = kwargs.pop("client")
with pytest.raises(ValueError) as e:
res = await client.analyze_sentiment(["I'm tired"], string_index_type="UnicodeCodePoint")
assert str(e.value) == "'string_index_type' is only available for API version v3.1 and up.\n"
with pytest.raises(ValueError) as e:
res = await client.analyze_sentiment(["I'm tired"], show_opinion_mining=True)
assert str(e.value) == "'show_opinion_mining' is only available for API version v3.1 and up.\n"
with pytest.raises(ValueError) as e:
res = await client.analyze_sentiment(["I'm tired"], disable_service_logs=True)
assert str(e.value) == "'disable_service_logs' is only available for API version v3.1 and up.\n"
with pytest.raises(ValueError) as e:
res = await client.analyze_sentiment(["I'm tired"], show_opinion_mining=True, disable_service_logs=True, string_index_type="UnicodeCodePoint")
assert str(e.value) == "'show_opinion_mining' is only available for API version v3.1 and up.\n'disable_service_logs' is only available for API version v3.1 and up.\n'string_index_type' is only available for API version v3.1 and up.\n"
| 43.672811
| 242
| 0.654585
|
7e2d9f3c3e80473379275081769263ff9c62ba29
| 130,530
|
py
|
Python
|
demisto_sdk/commands/find_dependencies/tests/find_dependencies_test.py
|
lesleyxyz/demisto-sdk
|
188199bda6fd56f611f204e4e487d1c912f323aa
|
[
"MIT"
] | null | null | null |
demisto_sdk/commands/find_dependencies/tests/find_dependencies_test.py
|
lesleyxyz/demisto-sdk
|
188199bda6fd56f611f204e4e487d1c912f323aa
|
[
"MIT"
] | null | null | null |
demisto_sdk/commands/find_dependencies/tests/find_dependencies_test.py
|
lesleyxyz/demisto-sdk
|
188199bda6fd56f611f204e4e487d1c912f323aa
|
[
"MIT"
] | null | null | null |
from typing import List
from unittest.mock import patch
import networkx as nx
import pytest
import demisto_sdk.commands.create_id_set.create_id_set as cis
from demisto_sdk.commands.common.constants import (DEFAULT_JOB_FROM_VERSION,
FileType)
from demisto_sdk.commands.find_dependencies.find_dependencies import (
PackDependencies, calculate_single_pack_dependencies,
get_packs_dependent_on_given_packs,
remove_items_from_content_entities_sections,
remove_items_from_packs_section)
from TestSuite.test_tools import ChangeCWD
from TestSuite.utils import IsEqualFunctions
def create_a_pack_entity(pack, entity_type: FileType = None, entity_id: str = None, entity_name: str = None,
commands: List[str] = None):
"""
Given
- A Pack.
When
- add an entity to the pack.
Then
- Adds the entity to the pack with basic data.
"""
if entity_type == FileType.SCRIPT:
pack.create_script(entity_id).create_default_script(entity_id)
elif entity_type == FileType.INTEGRATION:
pack.create_integration(entity_id).create_default_integration(entity_id, commands)
elif entity_type == FileType.PLAYBOOK:
pack.create_playbook(entity_id).create_default_playbook(entity_id)
elif entity_type == FileType.TEST_PLAYBOOK:
pack.create_test_playbook(entity_id).create_default_test_playbook(entity_id)
elif entity_type == FileType.CLASSIFIER:
content = {'id': entity_id, 'name': entity_name, 'transformer': '', 'keyTypeMap': {}, 'type': 'classification'}
pack.create_classifier(entity_id, content)
elif entity_type == FileType.LAYOUT:
content = {"typeId": entity_id, "TypeName": entity_id, "kind": "details", "layout": {}}
pack.create_layout(entity_id, content)
elif entity_type == FileType.LAYOUTS_CONTAINER:
content = {"id": entity_id, "name": entity_name, "group": "incident", "detailsV2": {}}
pack.create_layout(entity_id, content)
elif entity_type == FileType.MAPPER:
content = {'id': entity_id, 'name': entity_name, 'mapping': {}, 'type': 'mapping-incomming'}
pack.create_mapper(entity_id, content)
elif entity_type == FileType.INCIDENT_FIELD:
content = {'id': f'incident_{entity_id}', 'name': entity_name}
pack.create_incident_field(entity_id, content)
elif entity_type == FileType.INCIDENT_TYPE:
content = {'id': entity_id, 'name': entity_name, 'preProcessingScript': '', 'color': 'test'}
pack.create_incident_type(entity_id, content)
elif entity_type == FileType.INDICATOR_FIELD:
content = {'id': f'indicator_{entity_id}', 'name': entity_name}
pack.create_indicator_field(entity_id, content)
elif entity_type == FileType.REPUTATION:
content = {'id': entity_id, 'details': entity_name, 'regex': ''}
pack.create_indicator_type(entity_id, content)
elif entity_type == FileType.GENERIC_DEFINITION:
content = {'id': entity_id, 'details': entity_name, "auditable": True}
pack.create_generic_definition(entity_id, content)
elif entity_type == FileType.GENERIC_TYPE:
content = {'id': entity_id, 'details': entity_name, "color": "#8052f4", "definitionId": "assets"}
pack.create_generic_type(entity_id, content)
elif entity_type == FileType.GENERIC_MODULE:
content = {'id': entity_id, 'details': entity_name, "views": [], "definitionId": "assets"}
pack.create_generic_module(entity_id, content)
elif entity_type == FileType.GENERIC_FIELD:
content = {'id': entity_id, 'details': entity_name, "definitionId": "assets"}
pack.create_generic_field(entity_id, content)
def working_repo(repo):
# Create 5 packs with all entities
repo.setup_content_repo(5)
# Create a pack called 'PrismaCloudCompute' with 4 scripts and 1 incident_type.
prisma_cloud_compute = repo.create_pack('PrismaCloudCompute')
prisma_cloud_compute_scripts = ['PrismaCloudComputeParseAuditAlert',
'PrismaCloudComputeParseCloudDiscoveryAlert',
'PrismaCloudComputeParseComplianceAlert',
'PrismaCloudComputeParseVulnerabilityAlert']
for script in prisma_cloud_compute_scripts:
create_a_pack_entity(prisma_cloud_compute, FileType.SCRIPT, script)
create_a_pack_entity(prisma_cloud_compute, FileType.INCIDENT_TYPE, 'Prisma Cloud Compute Cloud Discovery',
'Prisma Cloud Compute Cloud Discovery')
# Create a pack called 'Expanse' with 1 playbook.
expanse = repo.create_pack('Expanse')
create_a_pack_entity(expanse, FileType.PLAYBOOK, 'Expanse_Incident_Playbook')
# Create a pack called 'GetServerURL' with 1 script.
get_server_url = repo.create_pack('GetServerURL')
create_a_pack_entity(get_server_url, FileType.SCRIPT, 'GetServerURL')
# Create a pack called 'HelloWorld' with 1 script and 1 classifier.
hello_world = repo.create_pack('HelloWorld')
create_a_pack_entity(hello_world, FileType.SCRIPT, 'HelloWorldScript')
create_a_pack_entity(hello_world, FileType.CLASSIFIER, 'HelloWorld', 'HelloWorld')
# Create a pack called 'Feedsslabusech' with 1 integration.
feedsslabusech = repo.create_pack('Feedsslabusech')
create_a_pack_entity(feedsslabusech, FileType.INTEGRATION, 'Feedsslabusech', commands=['sslbl-get-indicators'])
# Create a pack called 'ActiveMQ' with 1 integration.
active_mq = repo.create_pack('ActiveMQ')
create_a_pack_entity(active_mq, FileType.INTEGRATION, 'ActiveMQ', commands=['activemq-subscribe'])
# Create a pack called 'FeedAlienVault' with 1 integration.
feed_alien_vault = repo.create_pack('FeedAlienVault')
create_a_pack_entity(feed_alien_vault, FileType.INTEGRATION, 'FeedAlienVault',
commands=['alienvault-get-indicators'])
# Create a pack called 'QRadar' with 1 integration.
qradar = repo.create_pack('QRadar')
create_a_pack_entity(qradar, FileType.INTEGRATION, 'QRadar', commands=['qradar-searches'])
# Create a pack called 'Active_Directory_Query' with 1 integration and 1 script.
active_directory_query = repo.create_pack('Active_Directory_Query')
create_a_pack_entity(active_directory_query, FileType.INTEGRATION, 'Active Directory Query',
commands=['ad-get-user', 'ad-search'])
create_a_pack_entity(active_directory_query, FileType.SCRIPT, 'ADGetUser')
# Create a pack called 'Pcysys' with 1 playbook.
pcysys = repo.create_pack('Pcysys')
create_a_pack_entity(pcysys, FileType.PLAYBOOK, 'Pentera Run Scan')
# Create a pack called 'Indeni' with 1 playbook.
indeni = repo.create_pack('Indeni')
create_a_pack_entity(indeni, FileType.PLAYBOOK, 'Indeni Demo')
# Create a pack called 'Pcysys' with 1 playbook.
slack = repo.create_pack('Slack')
create_a_pack_entity(slack, FileType.PLAYBOOK, 'Failed Login Playbook - Slack v2')
# Create a pack called 'FeedAWS' with 1 integration.
feed_aws = repo.create_pack('FeedAWS')
create_a_pack_entity(feed_aws, FileType.INTEGRATION, 'FeedAWS', commands=['aws-get-indicators'])
# Create a pack called 'FeedAutoFocus' with 1 integration.
feed_autofocus = repo.create_pack('FeedAutofocus')
create_a_pack_entity(feed_autofocus, FileType.INTEGRATION, 'FeedAutofocus', commands=['autofocus-get-indicators'])
# Create a pack called 'ipinfo' with 1 integration.
ipinfo = repo.create_pack('ipinfo')
create_a_pack_entity(ipinfo, FileType.INTEGRATION, 'ipinfo', commands=['ip'])
# Create a pack called 'DigitalGuardian' with 1 incident_field.
digital_guardian = repo.create_pack('DigitalGuardian')
create_a_pack_entity(digital_guardian, FileType.INCIDENT_FIELD, 'digitalguardianusername',
'Digital Guardian Username')
# Create a pack called 'EmployeeOffboarding' with 1 incident_field.
employee_offboarding = repo.create_pack('EmployeeOffboarding')
create_a_pack_entity(employee_offboarding, FileType.INCIDENT_FIELD, 'googledisplayname', 'Google Display Name')
# Create a pack called 'Phishing' with 3 incident_fields and 1 script.
phishing = repo.create_pack('Phishing')
create_a_pack_entity(phishing, FileType.INCIDENT_FIELD, 'attachmentname', 'Attachment Name')
create_a_pack_entity(phishing, FileType.INCIDENT_FIELD, 'emailfrom', 'Email From')
create_a_pack_entity(phishing, FileType.INCIDENT_FIELD, 'emailsubject', 'Email Subject')
create_a_pack_entity(phishing, FileType.SCRIPT, 'CheckEmailAuthenticity')
# Create a pack called 'CommonTypes' with 3 incident_fields 2 incident_types 5 indicator_fields 1 indicator_type.
common_types = repo.create_pack('CommonTypes')
ct_incident_field_ids = ['accountid', 'country', 'username']
ct_incident_field_names = ['Account Id', 'Country', 'Username']
ct_incident_type_ids = ['Network', 'Authentication']
ct_incident_type_names = ['Network', 'Authentication']
ct_indicator_field_ids = ['accounttype', 'adminname', 'tags', 'commontypes', 'adminemail']
ct_indicator_field_names = ['Account Type', 'Admin Name', 'Tags', 'Common Types', 'Admin Email']
for field_id, field_name in zip(ct_incident_field_ids, ct_incident_field_names):
create_a_pack_entity(common_types, FileType.INCIDENT_FIELD, field_id, field_name)
for field_id, field_name in zip(ct_incident_type_ids, ct_incident_type_names):
create_a_pack_entity(common_types, FileType.INCIDENT_TYPE, field_id, field_name)
for field_id, field_name in zip(ct_indicator_field_ids, ct_indicator_field_names):
create_a_pack_entity(common_types, FileType.INDICATOR_FIELD, field_id, field_name)
create_a_pack_entity(common_types, FileType.REPUTATION, 'accountrep', 'Account Rep')
# Create a pack called 'SafeBreach' with 1 incident_field and 1 integration.
safe_breach = repo.create_pack('SafeBreach')
create_a_pack_entity(safe_breach, FileType.INDICATOR_FIELD, 'safebreachremediationstatus',
'SafeBreach Remediation Status')
create_a_pack_entity(safe_breach, FileType.INTEGRATION, 'SafeBreach', commands=['safebreach-get-remediation-data'])
# Create a pack called 'CommonScripts' with 7 scripts.
common_scripts = repo.create_pack('CommonScripts')
create_a_pack_entity(common_scripts, FileType.SCRIPT, 'ChangeContext')
create_a_pack_entity(common_scripts, FileType.SCRIPT, 'Set')
create_a_pack_entity(common_scripts, FileType.SCRIPT, 'SetAndHandleEmpty')
create_a_pack_entity(common_scripts, FileType.SCRIPT, 'AssignAnalystToIncident')
create_a_pack_entity(common_scripts, FileType.SCRIPT, 'EmailAskUser')
create_a_pack_entity(common_scripts, FileType.SCRIPT, 'ScheduleCommand')
create_a_pack_entity(common_scripts, FileType.SCRIPT, 'DeleteContext')
create_a_pack_entity(common_scripts, FileType.SCRIPT, 'IsInCidrRanges')
# Create a pack called 'CalculateTimeDifference' with 1 script.
calculate_time_difference = repo.create_pack('CalculateTimeDifference')
create_a_pack_entity(calculate_time_difference, FileType.SCRIPT, 'CalculateTimeDifference')
# Create a pack called 'CommonPlaybooks' with 3 playbooks.
common_playbooks = repo.create_pack('CommonPlaybooks')
create_a_pack_entity(common_playbooks, FileType.PLAYBOOK, 'Block IP - Generic v2')
create_a_pack_entity(common_playbooks, FileType.PLAYBOOK, 'IP Enrichment - Generic v2')
create_a_pack_entity(common_playbooks, FileType.PLAYBOOK, 'Active Directory - Get User Manager Details')
# Create a pack called 'FeedMitreAttack' with 1 indicator_type.
feed_mitre_attack = repo.create_pack('FeedMitreAttack')
create_a_pack_entity(feed_mitre_attack, FileType.REPUTATION, 'MITRE ATT&CK', 'MITRE ATT&CK')
# Create a pack called 'CrisisManagement' with 1 incident_type and 1 incident_field.
crisis_management = repo.create_pack('CrisisManagement')
create_a_pack_entity(crisis_management, FileType.INCIDENT_TYPE, 'HR Ticket', 'HR Ticket')
create_a_pack_entity(crisis_management, FileType.INDICATOR_FIELD, 'jobtitle', 'Job Title')
# Create a pack called 'Carbon_Black_Enterprise_Response' with 2 scripts.
carbon_black_enterprise_response = repo.create_pack('Carbon_Black_Enterprise_Response')
create_a_pack_entity(carbon_black_enterprise_response, FileType.SCRIPT, 'CBLiveFetchFiles')
create_a_pack_entity(carbon_black_enterprise_response, FileType.SCRIPT, 'CBAlerts')
# Create a pack called 'Claroty' with 3 mappers and 1 incident_type.
claroty = repo.create_pack('Claroty')
create_a_pack_entity(claroty, FileType.MAPPER, 'CBAlerts-mapper', 'Claroty-mapper')
create_a_pack_entity(claroty, FileType.MAPPER, 'Claroty', 'Claroty')
create_a_pack_entity(claroty, FileType.MAPPER, 'CBAlerts - Incoming Mapper', 'Claroty - Incoming Mapper')
create_a_pack_entity(claroty, FileType.INCIDENT_TYPE, 'Claroty Integrity Incident', 'Claroty Integrity Incident')
# Create a pack called 'EWS' with 1 mapper.
ews = repo.create_pack('EWS')
create_a_pack_entity(ews, FileType.MAPPER, 'EWS v2-mapper', 'EWS v2-mapper')
# Create a pack called 'AutoFocus' with 1 playbook.
auto_focus = repo.create_pack('AutoFocus')
create_a_pack_entity(auto_focus, FileType.PLAYBOOK, 'Autofocus Query Samples, Sessions and Tags',
'Autofocus Query Samples, Sessions and Tags')
# Create a pack called 'Volatility' with 1 script.
volatility = repo.create_pack('Volatility')
create_a_pack_entity(volatility, FileType.SCRIPT, 'AnalyzeMemImage')
# Create a pack called 'PAN-OS' with 1 incident_type.
pan_os = repo.create_pack('PAN-OS')
create_a_pack_entity(pan_os, FileType.INCIDENT_TYPE, 'FirewallUpgrade', 'FirewallUpgrade')
# Create a pack called 'Logzio' with 1 incident_type.
logzio = repo.create_pack('Logzio')
create_a_pack_entity(logzio, FileType.INCIDENT_TYPE, 'Logz.io Alert', 'Logz.io Alert')
# Create a pack called 'AccessInvestigation' with 1 incident_type.
access_investigation = repo.create_pack('AccessInvestigation')
create_a_pack_entity(access_investigation, FileType.INCIDENT_TYPE, 'Access', 'Access')
# Create a pack called 'PrismaCloud' with 1 incident_type.
prisma_cloud = repo.create_pack('PrismaCloud')
create_a_pack_entity(prisma_cloud, FileType.INCIDENT_TYPE, 'AWS CloudTrail Misconfiguration',
'AWS CloudTrail Misconfiguration')
# Create a pack called 'BruteForce' with 1 incident_field.
brute_force = repo.create_pack('BruteForce')
create_a_pack_entity(brute_force, FileType.INCIDENT_FIELD, 'accountgroups', 'Account Groups')
# Create a pack called 'Compliance' with 1 incident_field.
complience = repo.create_pack('Compliance')
create_a_pack_entity(complience, FileType.INCIDENT_FIELD, 'emailaddress', 'E-mail Address')
# Create a pack called 'CortexXDR' with 1 classifier.
cortex_xdr = repo.create_pack('CortexXDR')
create_a_pack_entity(cortex_xdr, FileType.CLASSIFIER, 'Cortex XDR - IR', 'Cortex XDR - IR')
# Create a pack called 'ImpossibleTraveler' with:
# 1 integration 1 playbook 1 test_playbook 1 layout 7 incident_fields 1 incident type
impossible_traveler = repo.create_pack('ImpossibleTraveler')
create_a_pack_entity(impossible_traveler, FileType.SCRIPT, 'CalculateGeoDistance')
create_a_pack_entity(impossible_traveler, FileType.PLAYBOOK, 'Impossible Traveler')
create_a_pack_entity(impossible_traveler, FileType.TEST_PLAYBOOK, 'Impossible Traveler - Test')
create_a_pack_entity(impossible_traveler, FileType.LAYOUT, 'Impossible Traveler')
create_a_pack_entity(impossible_traveler, FileType.INCIDENT_FIELD, 'coordinates' 'Coordinates')
create_a_pack_entity(impossible_traveler, FileType.INCIDENT_FIELD, 'previouscoordinates' 'Previous Coordinates')
create_a_pack_entity(impossible_traveler, FileType.INCIDENT_FIELD, 'previouscountry' 'Previou Country')
create_a_pack_entity(impossible_traveler, FileType.INCIDENT_FIELD,
'previoussignindatetime' 'Previous Sign In Date Time')
create_a_pack_entity(impossible_traveler, FileType.INCIDENT_FIELD, 'previoussourceiP' 'Previous Source IP')
create_a_pack_entity(impossible_traveler, FileType.INCIDENT_FIELD, 'signindatetime' 'Sign In Date Time')
create_a_pack_entity(impossible_traveler, FileType.INCIDENT_FIELD, 'travelmaplink' 'Travel Map Link')
create_a_pack_entity(impossible_traveler, FileType.INCIDENT_TYPE, 'impossibletraveler' 'Impossible Traveler')
# Create a pack called 'pack_with_definition' with 1 generic definition.
definition_pack = repo.create_pack('pack_with_definition')
create_a_pack_entity(definition_pack, FileType.GENERIC_DEFINITION, 'assets', 'assets')
# Create a pack called 'pack_with_module' with 1 generic module.
pack_with_module = repo.create_pack('pack_with_module')
create_a_pack_entity(pack_with_module, FileType.GENERIC_MODULE, 'module_id', 'module_id')
# Create a pack called 'pack_with_generic_field' with 1 generic field.
pack_with_generic_field = repo.create_pack('pack_with_generic_field')
create_a_pack_entity(pack_with_generic_field, FileType.GENERIC_FIELD, 'generic_field_id', 'generic_field_id')
# Create a pack called 'pack_with_generic_type' with 1 generic type.
pack_with_generic_type = repo.create_pack('pack_with_generic_type')
create_a_pack_entity(pack_with_generic_type, FileType.GENERIC_TYPE, 'generic_type_id', 'generic_type_id')
incident_layout = {
"detailsV2": {
"tabs": [
{
"id": "caseinfoid",
"name": "Incident Info",
"sections": [
{
"items": [
{
"endCol": 2,
"fieldId": "incident_example",
"height": 22,
"id": "example",
"index": 0,
"sectionItemType": "field",
"startCol": 0
}
]
}
],
"type": "custom"
},
]
},
"group": "incident",
"id": "example",
"name": "example",
"system": "false",
"version": -1,
"fromVersion": "6.0.0",
"description": ""
}
indicator_layout = {
"group": "indicator",
"id": "example",
"indicatorsDetails": {
"tabs": [
{
"sections": [
{
"items": [
{
"endCol": 2,
"fieldId": "indicator_example",
"height": 22,
"id": "example",
"index": 0,
"sectionItemType": "field",
"startCol": 0
}
]
}
],
"type": "custom"
}
]
},
"name": "example",
"system": "false",
"version": -1,
"fromVersion": "6.0.0",
}
generic_layout = {
"detailsV2": {
"tabs": [
{
"id": "caseinfoid",
"name": "Incident Info",
"sections": [
{
"items": [
{
"endCol": 2,
"fieldId": "incident_example",
"height": 22,
"id": "example",
"index": 0,
"sectionItemType": "field",
"startCol": 0
}
]
}
],
"type": "custom"
},
]
},
"group": "generic",
"id": "generic_layout_id",
"name": "generic_layout_id",
"system": "false",
"version": -1,
"fromVersion": "6.0.0",
"description": "",
"definitionId": "assets"
}
pack1 = repo.create_pack('pack1')
create_a_pack_entity(pack1, FileType.INCIDENT_FIELD, 'example', 'example')
pack2 = repo.create_pack('pack2')
create_a_pack_entity(pack2, FileType.INDICATOR_FIELD, 'example', 'example')
pack3 = repo.create_pack('pack3')
pack3.create_layoutcontainer('example', incident_layout)
pack4 = repo.create_pack('pack4')
pack4.create_layoutcontainer('example', indicator_layout)
pack5 = repo.create_pack('pack5')
pack5.create_layout(pack5, generic_layout)
with ChangeCWD(repo.path):
ids = cis.IDSetCreator()
ids.create_id_set()
return repo
class TestIdSetFilters:
@pytest.mark.parametrize("item_section", ["scripts", "playbooks"])
def test_search_for_pack_item_with_no_result(self, item_section, module_repo):
pack_id = "Non Existing Pack"
found_filtered_result = PackDependencies._search_for_pack_items(pack_id, module_repo.id_set.read_json_as_dict()[
item_section])
assert len(found_filtered_result) == 0
@pytest.mark.parametrize("pack_id", ["pack_0", "pack_1", "pack_2"])
def test_search_for_pack_script_item(self, pack_id, module_repo):
found_filtered_result = PackDependencies._search_for_pack_items(pack_id, module_repo.id_set.read_json_as_dict()[
'scripts'])
assert len(found_filtered_result) > 0
def test_search_for_specific_pack_script_item(self, module_repo):
pack_id = "PrismaCloudCompute"
expected_result = [
{
"PrismaCloudComputeParseAuditAlert": {
"name": "PrismaCloudComputeParseAuditAlert",
"file_path": "Packs/PrismaCloudCompute/Scripts/PrismaCloudComputeParseAuditAlert/PrismaCloudComputeParseAuditAlert.yml",
"fromversion": '5.0.0',
"docker_image": "demisto/python3:3.8.3.8715",
"pack": "PrismaCloudCompute",
"marketplaces": ["xsoar"],
"source": ['Unknown source', '', '']}
},
{
"PrismaCloudComputeParseCloudDiscoveryAlert": {
"name": "PrismaCloudComputeParseCloudDiscoveryAlert",
"file_path": "Packs/PrismaCloudCompute/Scripts/PrismaCloudComputeParseCloudDiscoveryAlert/PrismaCloudComputeParseCloudDiscoveryAlert.yml",
"fromversion": '5.0.0',
"docker_image": "demisto/python3:3.8.3.8715",
"pack": "PrismaCloudCompute",
"marketplaces": ["xsoar"],
"source": ['Unknown source', '', '']}
},
{
"PrismaCloudComputeParseComplianceAlert": {
"name": "PrismaCloudComputeParseComplianceAlert",
"file_path": "Packs/PrismaCloudCompute/Scripts/PrismaCloudComputeParseComplianceAlert/PrismaCloudComputeParseComplianceAlert.yml",
"fromversion": '5.0.0',
"docker_image": "demisto/python3:3.8.3.8715",
"pack": "PrismaCloudCompute",
"marketplaces": ["xsoar"],
"source": ['Unknown source', '', '']}
},
{
"PrismaCloudComputeParseVulnerabilityAlert": {
"name": "PrismaCloudComputeParseVulnerabilityAlert",
"file_path": "Packs/PrismaCloudCompute/Scripts/PrismaCloudComputeParseVulnerabilityAlert/PrismaCloudComputeParseVulnerabilityAlert.yml",
"fromversion": '5.0.0',
"docker_image": "demisto/python3:3.8.3.8715",
"pack": "PrismaCloudCompute",
"marketplaces": ["xsoar"],
"source": ['Unknown source', '', '']
}
}
]
found_filtered_result = PackDependencies._search_for_pack_items(pack_id, module_repo.id_set.read_json_as_dict()[
'scripts'])
assert found_filtered_result == expected_result
@pytest.mark.parametrize("pack_id", ["pack_0", "pack_1", "pack_2"])
def test_search_for_pack_playbook_item(self, pack_id, module_repo):
found_filtered_result = PackDependencies._search_for_pack_items(pack_id, module_repo.id_set.read_json_as_dict()[
'playbooks'])
assert len(found_filtered_result) > 0
def test_search_for_specific_pack_playbook_item(self, module_repo):
pack_id = "Expanse"
expected_result = [
{
"Expanse_Incident_Playbook": {
"name": "Expanse_Incident_Playbook",
"file_path": "Packs/Expanse/Playbooks/Expanse_Incident_Playbook.yml",
"fromversion": "5.0.0",
"implementing_scripts": [
'DeleteContext'
],
"tests": [
"No tests"
],
"pack": "Expanse",
"marketplaces": ["xsoar"],
"source": ['Unknown source', '', '']
}
}
]
found_filtered_result = PackDependencies._search_for_pack_items(pack_id, module_repo.id_set.read_json_as_dict()[
'playbooks'])
assert found_filtered_result == expected_result
class TestDependsOnScriptAndIntegration:
@pytest.mark.parametrize("dependency_script,expected_result",
[("GetServerURL", {("GetServerURL", True)}),
("HelloWorldScript", {("HelloWorld", True)}),
("PrismaCloudComputeParseAuditAlert", {("PrismaCloudCompute", True)})
])
def test_collect_scripts_depends_on_script(self, dependency_script, expected_result, module_repo):
"""
Given
- A script entry in the id_set depending on a script.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the script depends on.
- Should recognize the pack.
- Dont get dependent items since get_dependent_items=False
"""
test_input = [
{
"DummyScript": {
"name": "DummyScript",
"file_path": "dummy_path",
"docker_image": "demisto/python3:3.8.3.8715",
"depends_on": [
dependency_script
],
"pack": "dummy_pack"
}
}
]
found_result = PackDependencies._collect_scripts_dependencies(pack_scripts=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert set(found_result) == set(expected_result)
@pytest.mark.parametrize("dependency_script,expected_pack,expected_items",
[("GetServerURL", {("GetServerURL", True)},
{('script', 'DummyScript'): {'GetServerURL': [('script', 'GetServerURL')]}}),
("HelloWorldScript", {("HelloWorld", True)},
{('script', 'DummyScript'): {'HelloWorld': [('script', 'HelloWorldScript')]}}),
("PrismaCloudComputeParseAuditAlert", {("PrismaCloudCompute", True)},
{('script', 'DummyScript'):
{'PrismaCloudCompute': [('script', 'PrismaCloudComputeParseAuditAlert')]}})
])
def test_collect_scripts_depends_on_script_with_items(self, dependency_script, expected_pack, expected_items,
module_repo):
"""
Given
- A script entry in the id_set depending on a script.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the script depends on.
- Should recognize the pack.
- Get dependent items aswell since get_dependent_items=True
"""
test_input = [
{
"DummyScript": {
"name": "DummyScript",
"file_path": "dummy_path",
"docker_image": "demisto/python3:3.8.3.8715",
"depends_on": [
dependency_script
],
"pack": "dummy_pack"
}
}
]
found_result, found_items = PackDependencies._collect_scripts_dependencies(pack_scripts=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
get_dependent_items=True)
assert found_result == expected_pack
assert found_items == expected_items
@pytest.mark.parametrize("dependency_integration_command,expected_result",
[("sslbl-get-indicators", {("Feedsslabusech", True)}),
("activemq-subscribe", {("ActiveMQ", True)}),
("alienvault-get-indicators", {("FeedAlienVault", True)})
])
def test_collect_scripts_depends_on_integration(self, dependency_integration_command, expected_result, module_repo):
"""
Given
- A script entry in the id_set depending on integration commands.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the script depends on.
- Should recognize the pack.
- Dont get dependent items since get_dependent_items=False
"""
test_input = [
{
"DummyScript": {
"name": "DummyScript",
"file_path": "dummy_path",
"depends_on": [
dependency_integration_command
],
"pack": "dummy_pack"
}
}
]
found_result = PackDependencies._collect_scripts_dependencies(pack_scripts=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert set(found_result) == set(expected_result)
@pytest.mark.parametrize("dependency_integration_command,expected_result",
[("sslbl-get-indicators",
({("Feedsslabusech", True)},
{('script', 'DummyScript'): {'Feedsslabusech': [('integration', 'Feedsslabusech')]}})),
("activemq-subscribe",
({("ActiveMQ", True)},
{('script', 'DummyScript'): {'ActiveMQ': [('integration', 'ActiveMQ')]}})),
("alienvault-get-indicators", ({("FeedAlienVault", True)},
{('script', 'DummyScript'): {'FeedAlienVault': [
('integration', 'FeedAlienVault')]}}))
])
def test_collect_scripts_depends_on_integration_with_items(self, dependency_integration_command,
expected_result, module_repo):
"""
Given
- A script entry in the id_set depending on integration commands.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the script depends on.
- Should recognize the pack.
- Get dependent items aswell since get_dependent_items=True
"""
test_input = [
{
"DummyScript": {
"name": "DummyScript",
"file_path": "dummy_path",
"depends_on": [
dependency_integration_command
],
"pack": "dummy_pack"
}
}
]
found_result, found_items = PackDependencies._collect_scripts_dependencies(pack_scripts=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
get_dependent_items=True)
assert found_result == expected_result[0]
assert found_items == expected_result[1]
def test_collect_scripts_depends_on_two_scripts(self, module_repo):
"""
Given
- A script entry in the id_set depending on 2 scripts.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the script depends on.
- Should recognize both packs.
"""
expected_result = {('HelloWorld', True), ('PrismaCloudCompute', True)}
test_input = [
{
"DummyScript": {
"name": "DummyScript",
"file_path": "dummy_path",
"depends_on": [
"PrismaCloudComputeParseAuditAlert",
"HelloWorldScript"
],
"pack": "dummy_pack"
}
}
]
found_result = PackDependencies._collect_scripts_dependencies(pack_scripts=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert set(found_result) == set(expected_result)
def test_collect_scripts__filter_toversion(self, module_repo):
"""
Given
- A script entry in the id_set depending on QRadar command.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the script depends on.
- Should ignore the Deprecated pack due to toversion settings of old QRadar integration.
"""
expected_result = {('QRadar', True)}
test_input = [
{
"DummyScript": {
"name": "DummyScript",
"file_path": "dummy_path",
"depends_on": [
"qradar-searches",
],
"pack": "dummy_pack"
}
}
]
found_result = PackDependencies._collect_scripts_dependencies(pack_scripts=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
exclude_ignored_dependencies=False
)
assert set(found_result) == set(expected_result)
def test_collect_scripts_depends_on_two_integrations(self, module_repo):
"""
Given
- A script entry in the id_set depending on 2 integrations.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the script depends on.
- Should recognize both packs.
"""
expected_result = {('Active_Directory_Query', True), ('Feedsslabusech', True)}
test_input = [
{
"DummyScript": {
"name": "DummyScript",
"file_path": "dummy_path",
"depends_on": [
"sslbl-get-indicators",
"ad-get-user"
],
"pack": "dummy_pack"
}
}
]
found_result = PackDependencies._collect_scripts_dependencies(pack_scripts=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert set(found_result) == set(expected_result)
def test_collect_scripts_command_to_integration(self, module_repo):
"""
Given
- A script entry in the id_set containing command_to_integration.
When
- Building dependency graph for pack.
Then
- Extracting the pack that the script depends on.
- Should recognize the pack.
"""
expected_result = {('Active_Directory_Query', True)}
test_input = [
{
"DummyScript": {
"name": "ADGetUser",
"file_path": "Packs/Active_Directory_Query/Scripts/script-ADGetUser.yml",
"depends_on": [
],
"command_to_integration": {
"ad-search": "activedir"
},
"pack": "Active_Directory_Query"
}
}
]
found_result = PackDependencies._collect_scripts_dependencies(pack_scripts=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert set(found_result) == set(expected_result)
def test_collect_scripts_script_executions(self, module_repo):
"""
Given
- A script entry in the id_set containing a script_executions, e.g: demisto.executeCommand(<command>).
When
- Building dependency graph for pack.
Then
- Extracting the pack that the script depends on.
- Should recognize the pack.
"""
expected_result = {('Active_Directory_Query', True)}
test_input = [
{
"DummyScript": {
"name": "ADIsUserMember",
"file_path": "Packs/DeprecatedContent/Scripts/script-ADIsUserMember.yml",
"deprecated": False,
"depends_on": [
],
"script_executions": [
"ADGetUser",
],
"pack": "Active_Directory_Query"
}
}
]
found_result = PackDependencies._collect_scripts_dependencies(pack_scripts=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert set(found_result) == set(expected_result)
def test_collect_scripts_command_to_integrations_and_script_executions(self, module_repo):
"""
Given
- A script entry in the id_set containing command_to_integrations with a reputation command
and script_executions.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the script depends on.
- Should recognize the mandatory pack and ignore the packs that implement the file command.
"""
expected_result = {
('Active_Directory_Query', True)
}
test_input = [
{
"DummyScript": {
"name": "double_dependency",
"file_path": "Packs/DeprecatedContent/Scripts/script-ADIsUserMember.yml",
"deprecated": False,
"depends_on": [
],
"command_to_integration": {
"file": "many integrations"
},
"script_executions": [
"ADGetUser",
],
"pack": "Active_Directory_Query"
}
}
]
found_result = PackDependencies._collect_scripts_dependencies(pack_scripts=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert set(found_result) == set(expected_result)
def test_collect_scripts_depends_on_with_two_inputs(self, module_repo):
"""
Given
- 2 scripts entries in the id_set depending on different integrations.
When
- Building dependency graph for the packs.
Then
- Extracting the packs that the scripts depends on.
- Should recognize both packs.
"""
expected_result = {('Active_Directory_Query', True), ('Feedsslabusech', True)}
test_input = [
{
"DummyScript1": {
"name": "DummyScript1",
"file_path": "dummy_path1",
"depends_on": [
"sslbl-get-indicators"
],
"pack": "dummy_pack"
}
},
{
"DummyScript2": {
"name": "DummyScript2",
"file_path": "dummy_path1",
"depends_on": [
"ad-get-user"
],
"pack": "dummy_pack"
}
}
]
found_result = PackDependencies._collect_scripts_dependencies(pack_scripts=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert set(found_result) == set(expected_result)
@pytest.mark.parametrize("generic_command", ['ip', 'domain', 'url', 'file', 'email', 'cve', 'cve-latest',
'cve-search', 'send-mail', 'send-notification'])
def test_collect_detection_of_optional_dependencies(self, generic_command, module_repo):
"""
Given
- Scripts that depends on generic commands
When
- Building dependency graph for the packs.
Then
- Extracting the packs that the scripts depends on.
- Should NOT recognize packs.
"""
test_input = [
{
"DummyScript": {
"name": "DummyScript",
"file_path": "dummy_path",
"depends_on": [
generic_command
],
"pack": "dummy_pack"
}
}
]
dependencies_set = PackDependencies._collect_scripts_dependencies(pack_scripts=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert len(dependencies_set) == 0
class TestDependsOnPlaybook:
@pytest.mark.parametrize("dependency_script,expected_result",
[("GetServerURL", {("GetServerURL", True)}),
("HelloWorldScript", {("HelloWorld", True)}),
("PrismaCloudComputeParseAuditAlert", {("PrismaCloudCompute", True)})
])
def test_collect_playbooks_dependencies_on_script(self, dependency_script, expected_result, module_repo):
test_input = [
{
"Dummy Playbook": {
"name": "Dummy Playbook",
"file_path": "dummy_path",
"fromversion": "dummy_version",
"implementing_scripts": [
dependency_script
],
"implementing_playbooks": [
],
"command_to_integration": {
},
"tests": [
"dummy_playbook"
],
"pack": "dummy_pack"
}
}
]
found_result = PackDependencies._collect_playbooks_dependencies(pack_playbooks=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert set(found_result) == set(expected_result)
@pytest.mark.parametrize("dependency_script,expected_result,expected_items",
[("GetServerURL", {("GetServerURL", True)},
{('playbook', 'Dummy Playbook'): {'GetServerURL': [('script', 'GetServerURL')]}}),
("HelloWorldScript", {("HelloWorld", True)},
{('playbook', 'Dummy Playbook'): {'HelloWorld': [('script', 'HelloWorldScript')]}}),
("PrismaCloudComputeParseAuditAlert", {("PrismaCloudCompute", True)},
{('playbook', 'Dummy Playbook'): {
'PrismaCloudCompute': [('script', 'PrismaCloudComputeParseAuditAlert')]}})
])
def test_collect_playbooks_dependencies_on_script_with_items(self, dependency_script, expected_result,
expected_items, module_repo):
test_input = [
{
"Dummy Playbook": {
"name": "Dummy Playbook",
"file_path": "dummy_path",
"fromversion": "dummy_version",
"implementing_scripts": [
dependency_script
],
"implementing_playbooks": [
],
"command_to_integration": {
},
"tests": [
"dummy_playbook"
],
"pack": "dummy_pack"
}
}
]
found_result = PackDependencies._collect_playbooks_dependencies(pack_playbooks=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
get_dependent_items=True)
assert found_result[0] == expected_result
assert found_result[1] == expected_items
@pytest.mark.parametrize("dependency_playbook,expected_result",
[("Pentera Run Scan", {("Pcysys", True)}),
("Indeni Demo", {("Indeni", True)}),
("Failed Login Playbook - Slack v2", {("Slack", True)})
])
def test_collect_playbooks_dependencies_on_playbook(self, dependency_playbook, expected_result, module_repo):
test_input = [
{
"Dummy Playbook": {
"name": "Dummy Playbook",
"file_path": "dummy_path",
"fromversion": "dummy_version",
"implementing_scripts": [
],
"implementing_playbooks": [
dependency_playbook
],
"command_to_integration": {
},
"tests": [
"dummy_playbook"
],
"pack": "dummy_pack"
}
}
]
found_result = PackDependencies._collect_playbooks_dependencies(pack_playbooks=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert set(found_result) == set(expected_result)
@pytest.mark.parametrize("integration_command,expected_result",
[("aws-get-indicators", {("FeedAWS", True)}),
("autofocus-get-indicators", {("FeedAutofocus", True)}),
("alienvault-get-indicators", {("FeedAlienVault", True)})
])
def test_collect_playbooks_dependencies_on_integrations(self, integration_command, expected_result, module_repo):
test_input = [
{
"Dummy Playbook": {
"name": "Dummy Playbook",
"file_path": "dummy_path",
"fromversion": "dummy_version",
"implementing_scripts": [
],
"implementing_playbooks": [
],
"command_to_integration": {
integration_command: ""
},
"tests": [
"dummy_playbook"
],
"pack": "dummy_pack"
}
}
]
found_result = PackDependencies._collect_playbooks_dependencies(pack_playbooks=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert set(found_result) == set(expected_result)
def test_collect_playbooks_dependencies_on_integrations_with_brand(self, module_repo):
command = "ip"
pack_name = "ipinfo"
test_input = [
{
"Dummy Playbook": {
"name": "Dummy Playbook",
"file_path": "dummy_path",
"fromversion": "dummy_version",
"implementing_scripts": [
],
"implementing_playbooks": [
],
"command_to_integration": {
command: pack_name
},
"tests": [
"dummy_playbook"
],
"pack": "dummy_pack"
}
}
]
found_result_set = PackDependencies._collect_playbooks_dependencies(pack_playbooks=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert len(found_result_set) == 1
found_result = found_result_set.pop()
assert found_result[0] == pack_name
assert found_result[1]
@pytest.mark.parametrize("integration_command", ["ip", "domain", "url", "cve"])
def test_collect_detection_of_optional_dependencies_in_playbooks(self, integration_command, module_repo):
"""
Given
- Playbooks that are using generic commands
When
- Building dependency graph for the packs.
Then
- Extracting the packs that the scripts depends on.
- Should NOT recognize packs.
"""
test_input = [
{
"Dummy Playbook": {
"name": "Dummy Playbook",
"file_path": "dummy_path",
"fromversion": "dummy_version",
"implementing_scripts": [
],
"implementing_playbooks": [
],
"command_to_integration": {
integration_command: ""
},
"tests": [
"dummy_playbook"
],
"pack": "dummy_pack"
}
}
]
found_result_set = PackDependencies._collect_playbooks_dependencies(pack_playbooks=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert len(found_result_set) == 0
def test_collect_playbooks_dependencies_on_incident_fields(self, module_repo):
"""
Given
- A playbook entry in the id_set.
When
- Collecting playbook dependencies.
Then
- The incident fields from the DigitalGuardian and EmployeeOffboarding packs
should result in an optional dependency.
"""
expected_result = {("DigitalGuardian", False), ("EmployeeOffboarding", False)}
test_input = [
{
"Dummy Playbook": {
"name": "Dummy Playbook",
"file_path": "dummy_path",
"fromversion": "dummy_version",
"implementing_scripts": [
],
"implementing_playbooks": [
],
"command_to_integration": {
},
"tests": [
"dummy_playbook"
],
"pack": "dummy_pack",
"incident_fields": [
"digitalguardianusername",
"Google Display Name"
]
}
}
]
found_result = PackDependencies._collect_playbooks_dependencies(pack_playbooks=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert set(found_result) == set(expected_result)
def test_collect_playbooks_dependencies_on_incident_fields__phishing_pack(self, module_repo):
"""
Given
- A playbook entry in the id_set which is using incident fields from the Phishing pack.
When
- Collecting playbook dependencies.
Then
- The incident fields from the Phishing pack should result in an optional dependency.
"""
expected_result = {("Phishing", False)}
test_input = [
{
"search_and_delete_emails_-_ews": {
"name": "Search And Delete Emails - EWS",
"file_path": "Packs/EWS/Playbooks/playbook-Search_And_Delete_Emails_-_EWS.yml",
"fromversion": "5.0.0",
"tests": [
"No test"
],
"pack": "EWS",
"incident_fields": [
"attachmentname",
"emailfrom",
"emailsubject"
]
}
}
]
found_result = PackDependencies._collect_playbooks_dependencies(pack_playbooks=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert set(found_result) == set(expected_result)
def test_collect_playbooks_dependencies_on_incident_fields__commontypes_pack(self, module_repo):
"""
Given
- A playbook entry in the id_set which is using incident fields from the CommonTYpes pack.
When
- Collecting playbook dependencies.
Then
- The incident fields from the Phishing pack should result in an mandatory dependency.
"""
expected_result = {("CommonTypes", True)}
test_input = [
{
"search_and_delete_emails_-_ews": {
"name": "Search And Delete Emails - EWS",
"file_path": "Packs/EWS/Playbooks/playbook-Search_And_Delete_Emails_-_EWS.yml",
"fromversion": "5.0.0",
"tests": [
"No test"
],
"pack": "EWS",
"incident_fields": [
"accountid"
]
}
}
]
found_result = PackDependencies._collect_playbooks_dependencies(pack_playbooks=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert set(found_result) == set(expected_result)
def test_collect_playbooks_dependencies_on_indicator_fields(self, module_repo):
"""
Given
- A playbook entry in the id_set which is using Indicator fields from the CommonTypes pack.
When
- Collecting playbook dependencies.
Then
- The indicator field accounttype should result in a mandatory dependency to the CommonTypes pack.
"""
expected_packs = {('SafeBreach', True), ('CommonScripts', True), ('CommonTypes', True)}
expected_items = {('playbook', 'SafeBreach - Compare and Validate Insight Indicators'):
{'SafeBreach': [('integration', 'SafeBreach')],
'CommonScripts': [('script', 'ChangeContext'), ('script', 'Set'),
('script', 'SetAndHandleEmpty')],
'CommonTypes': [('incidentfield', 'indicator_accounttype')]}}
test_input = [
{
"SafeBreach - Compare and Validate Insight Indicators": {
"name": "SafeBreach - Compare and Validate Insight Indicators",
"file_path": "Packs/SafeBreach/Playbooks/SafeBreach_Compare_and_Validate_Insight_Indicators.yml",
"fromversion": "5.5.0",
"implementing_scripts": [
"ChangeContext",
"Set",
"SetAndHandleEmpty"
],
"command_to_integration": {
"safebreach-get-remediation-data": ""
},
"tests": [
"No tests (auto formatted)"
],
"pack": "SafeBreach",
"indicator_fields": [
"accounttype",
]
}
},
]
found_packs, found_items = PackDependencies._collect_playbooks_dependencies(pack_playbooks=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
get_dependent_items=True)
assert found_packs == expected_packs
assert found_items == expected_items
def test_collect_playbooks_dependencies_skip_unavailable(self, module_repo):
"""
Given
- A playbook entry in the id_set.
-
When
- Building dependency graph for pack.
Then
- Extracting the packs that the playbook depends on.
"""
expected_result = {
# playbooks:
('Slack', False), ('Indeni', True),
# integrations:
('FeedAlienVault', False), ('ipinfo', True), ('FeedAutofocus', True),
# scripts:
('GetServerURL', False), ('HelloWorld', True),
}
test_input = [
{
'Dummy Playbook': {
'name': 'Dummy Playbook',
'file_path': 'dummy_path',
'fromversion': 'dummy_version',
'implementing_scripts': [
'GetServerURL',
'HelloWorldScript',
],
'implementing_playbooks': [
'Failed Login Playbook - Slack v2',
'Indeni Demo',
],
'command_to_integration': {
'alienvault-get-indicators': '',
'ip': 'ipinfo',
'autofocus-get-indicators': '',
},
'tests': [
'dummy_playbook'
],
'pack': 'dummy_pack',
'incident_fields': [
],
'skippable_tasks': [
'Print',
'Failed Login Playbook - Slack v2',
'alienvault-get-indicators',
'GetServerURL',
]
}
},
]
found_result = PackDependencies._collect_playbooks_dependencies(pack_playbooks=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert set(found_result) == set(expected_result)
def test_collect_playbooks_dependencies_on_filter(self, module_repo):
"""
Given
- A playbook entry in the id_set with filter from the CommonScripts pack.
-
When
- Building dependency graph for pack.
Then
- Extracting the packs that the playbook depends on.
"""
expected_result = {("CommonScripts", True)}
test_input = [
{
'Dummy Playbook': {
'name': 'Dummy Playbook',
'file_path': 'dummy_path',
'fromversion': 'dummy_version',
"filters": ["IsInCidrRanges"]
}
},
]
found_result = PackDependencies._collect_playbooks_dependencies(pack_playbooks=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert set(found_result) == set(expected_result)
class TestDependsOnLayout:
@pytest.mark.parametrize('pack_name, expected_dependencies', [
('pack3', 'pack1'), # pack3 has a layout of type incident that depends in an incident of pack1
('pack4', 'pack2') # pack4 has a layout of type indicator that depends in an indicator of pack2
])
def test_layouts_dependencies(self, pack_name, expected_dependencies, module_repo):
dependencies = PackDependencies.find_dependencies(
pack_name, id_set_path=module_repo.id_set.path,
update_pack_metadata=False
)
assert list(dependencies.keys())[0] == expected_dependencies
def test_collect_incident_layouts_dependencies(self, module_repo):
"""
Given
- A layout entry in the id_set.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the layout depends on.
"""
expected_result = {("PrismaCloudCompute", True)}
test_input = [
{
"Dummy Layout": {
"typeID": "dummy_layout",
"name": "Dummy Layout",
"pack": "dummy_pack",
"kind": "edit",
"path": "dummy_path",
"incident_and_indicator_types": [
"MITRE ATT&CK",
"Prisma Cloud Compute Cloud Discovery"
],
"incident_and_indicator_fields": [
"indicator_adminname",
"indicator_jobtitle"
]
}
}
]
found_result = PackDependencies._collect_layouts_dependencies(pack_layouts=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert set(found_result) == set(expected_result)
def test_collect_indicator_layouts_dependencies(self, module_repo):
"""
Given
- A layout entry in the id_set.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the layout depends on.
"""
expected_result = {("FeedMitreAttack", True), ("CommonTypes", True), ("CrisisManagement", True)}
test_input = [
{
"Dummy Layout": {
"typeID": "dummy_layout",
"name": "Dummy Layout",
"pack": "dummy_pack",
"kind": "indicatorsDetails",
"path": "dummy_path",
"incident_and_indicator_types": [
"MITRE ATT&CK",
"Prisma Cloud Compute Cloud Discovery"
],
"incident_and_indicator_fields": [
"indicator_adminname",
"indicator_jobtitle"
]
}
}
]
found_result = PackDependencies._collect_layouts_dependencies(pack_layouts=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert set(found_result) == set(expected_result)
def test_collect_indicator_layouts_dependencies_with_items(self, module_repo):
"""
Given
- A layout entry in the id_set.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the layout depends on and the items causing mandatory dependencies.
"""
expected_result = ({('CrisisManagement', True), ('FeedMitreAttack', True), ('CommonTypes', True)},
{('layout', 'Dummy Layout'): {'FeedMitreAttack': [(
'layout', 'MITRE ATT&CK')], 'CommonTypes': [('indicator_field', 'indicator_adminname')],
'CrisisManagement': [('indicator_field', 'indicator_jobtitle')]}})
test_input = [
{
"Dummy Layout": {
"typeID": "dummy_layout",
"name": "Dummy Layout",
"pack": "dummy_pack",
"kind": "indicatorsDetails",
"path": "dummy_path",
"incident_and_indicator_types": [
"MITRE ATT&CK",
"Prisma Cloud Compute Cloud Discovery"
],
"incident_and_indicator_fields": [
"indicator_adminname",
"indicator_jobtitle"
]
}
}
]
found_result = PackDependencies._collect_layouts_dependencies(pack_layouts=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
get_dependent_items=True)
assert found_result == expected_result
def test_collect_layouts_dependencies_filter_toversion(self, module_repo):
"""
Given
- A layout entry in the id_set.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the layout depends on.
- Should ignore the NonSupported pack due to toversion settings of both indicator type and field.
"""
expected_result = {("CommonTypes", True)}
test_input = [
{
"Dummy Layout": {
"typeID": "dummy_layout",
"name": "Dummy Layout",
"pack": "dummy_pack",
"kind": "indicatorsDetails",
"path": "dummy_path",
"incident_and_indicator_types": [
"accountRep",
],
"incident_and_indicator_fields": [
"indicator_tags",
]
}
}
]
found_result = PackDependencies._collect_layouts_dependencies(pack_layouts=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
exclude_ignored_dependencies=False,
)
assert set(found_result) == set(expected_result)
def test_collect_generic_layouts_dependencies(self, module_repo):
"""
Given
- A layout entry in the id_set that is related to generic definition
When
- Building dependency graph for pack.
Then
- Extracting the packs that the layout depends on.
"""
expected_result = {("pack_with_generic_field", True)}
test_input = [
{
"Dummy Layout": {
"typeID": "dummy_layout",
"name": "Dummy Layout",
"pack": "dummy_pack",
"kind": "indicatorsDetails",
"path": "dummy_path",
"definitionId": "assets",
"incident_and_indicator_types": [
"generic_type_id"
],
"incident_and_indicator_fields": [
"generic_field_id"
]
}
}
]
found_result = PackDependencies._collect_layouts_dependencies(pack_layouts=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert set(found_result) == set(expected_result)
class TestDependsOnIncidentField:
def test_collect_incident_field_dependencies(self, module_repo):
"""
Given
- An incident field entry in the id_set.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the incident field depends on.
"""
expected_result = {
# incident types
# ("Expanse", True), ("IllusiveNetworks", True),
# scripts
("Carbon_Black_Enterprise_Response", True), ("Phishing", True)
}
test_input = [
{
"Dummy Incident Field": {
"name": "Dummy Incident Field",
"fromversion": "5.0.0",
"pack": "dummy_pack",
"incident_types": [
"Expanse Appearance",
"Illusive Networks Incident"
],
"scripts": [
"CBLiveFetchFiles",
"CheckEmailAuthenticity"
]
}
}
]
found_result = PackDependencies._collect_incidents_fields_dependencies(
pack_incidents_fields=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert set(found_result) == set(expected_result)
def test_collect_incident_field_dependencies_with_items(self, module_repo):
"""
Given
- An incident field entry in the id_set.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the incident field depends on with the items causing the dependency.
"""
expected_result = (
{('Phishing', True), ('Carbon_Black_Enterprise_Response', True)},
{('incident_field', 'Dummy Incident Field'): {
'Carbon_Black_Enterprise_Response': [('script', 'CBLiveFetchFiles')],
'Phishing': [('script', 'CheckEmailAuthenticity')]}})
test_input = [
{
"Dummy Incident Field": {
"name": "Dummy Incident Field",
"fromversion": "5.0.0",
"pack": "dummy_pack",
"incident_types": [
"Expanse Appearance",
"Illusive Networks Incident"
],
"scripts": [
"CBLiveFetchFiles",
"CheckEmailAuthenticity"
]
}
}
]
found_result = PackDependencies._collect_incidents_fields_dependencies(
pack_incidents_fields=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
get_dependent_items=True
)
assert found_result == expected_result
class TestDependsOnIndicatorType:
def test_collect_indicator_type_dependencies(self, module_repo):
"""
Given
- An indicator type entry in the id_set.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the indicator type depends on.
"""
expected_result = {
# script dependencies
("CommonScripts", False), ("Carbon_Black_Enterprise_Response", False)
}
test_input = [
{
"Dummy Indicator Type": {
"name": "Dummy Indicator Type",
"fromversion": "5.0.0",
"pack": "dummy_pack",
"integrations": [
"abuse.ch SSL Blacklist Feed",
"AbuseIPDB",
"ActiveMQ"
],
"scripts": [
"AssignAnalystToIncident",
"CBAlerts"
]
}
}
]
found_result = PackDependencies._collect_indicators_types_dependencies(
pack_indicators_types=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert set(found_result) == set(expected_result)
def test_collect_indicator_type_dependencies_with_items(self, module_repo):
"""
Given
- An indicator type entry in the id_set.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the indicator type depends on and the items causing mandatory dependencies - no such of those in this cae.
"""
expected_result = ({('Carbon_Black_Enterprise_Response', False), ('CommonScripts', False)}, {})
test_input = [
{
"Dummy Indicator Type": {
"name": "Dummy Indicator Type",
"fromversion": "5.0.0",
"pack": "dummy_pack",
"integrations": [
"abuse.ch SSL Blacklist Feed",
"AbuseIPDB",
"ActiveMQ"
],
"scripts": [
"AssignAnalystToIncident",
"CBAlerts"
]
}
}
]
found_result = PackDependencies._collect_indicators_types_dependencies(
pack_indicators_types=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False, get_dependent_items=True
)
assert found_result == expected_result
class TestDependsOnIntegrations:
def test_collect_integration_dependencies(self, module_repo):
"""
Given
- An integration entry in the id_set.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the integration depends on.
"""
expected_result = {("HelloWorld", True), ("Claroty", True), ("EWS", True), ("CrisisManagement", True),
("CommonTypes", True)}
test_input = [
{
"Dummy Integration": {
"name": "Dummy Integration",
"fromversion": "5.0.0",
"pack": "dummy_pack",
"classifiers": "HelloWorld",
"mappers": [
"Claroty-mapper",
"EWS v2-mapper"
],
"incident_types": "HR Ticket",
"indicator_fields": "CommonTypes",
}
}
]
found_result = PackDependencies._collect_integrations_dependencies(
pack_integrations=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert set(found_result) == set(expected_result)
def test_collect_integration_dependencies_with_ites(self, module_repo):
"""
Given
- An integration entry in the id_set.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the integration depends on and the items causing mandatory dependencies.
"""
expected_result = ({('Claroty', True), ('EWS', True), ('HelloWorld', True), ('CommonTypes', True),
('CrisisManagement', True)},
{('integration', 'Dummy Integration'): {'HelloWorld': [('classifier', 'HelloWorld')],
'Claroty': [('mapper', 'CBAlerts-mapper')],
'EWS': [('mapper', 'EWS v2-mapper')],
'CrisisManagement': [
('incidenttype', 'HR Ticket')]}})
test_input = [
{
"Dummy Integration": {
"name": "Dummy Integration",
"fromversion": "5.0.0",
"pack": "dummy_pack",
"classifiers": "HelloWorld",
"mappers": [
"Claroty-mapper",
"EWS v2-mapper"
],
"incident_types": "HR Ticket",
"indicator_fields": "CommonTypes",
}
}
]
found_result = PackDependencies._collect_integrations_dependencies(
pack_integrations=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False, get_dependent_items=True
)
assert found_result == expected_result
class TestDependsOnIncidentType:
def test_collect_incident_type_dependencies(self, module_repo):
"""
Given
- An incident type entry in the id_set.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the incident type depends on.
"""
expected_result = {("AutoFocus", True), ("Volatility", True)}
test_input = [
{
"Dummy Incident Type": {
"name": "Dummy Incident Type",
"fromversion": "5.0.0",
"pack": "dummy_pack",
"playbooks": "Autofocus Query Samples, Sessions and Tags",
"scripts": "AnalyzeMemImage"
}
}
]
found_result = PackDependencies._collect_incidents_types_dependencies(
pack_incidents_types=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert set(found_result) == set(expected_result)
def test_collect_incident_type_dependencies_with_items(self, module_repo):
"""
Given
- An incident type entry in the id_set.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the incident type depends on and the items causing mandatory dependencies.
"""
expected_result = ({('AutoFocus', True), ('Volatility', True)},
{('incidenttype', 'Dummy Incident Type'):
{'AutoFocus': [('playbook', 'Autofocus Query Samples, Sessions and Tags')],
'Volatility': [('script', 'AnalyzeMemImage')]}})
test_input = [
{
"Dummy Incident Type": {
"name": "Dummy Incident Type",
"fromversion": "5.0.0",
"pack": "dummy_pack",
"playbooks": "Autofocus Query Samples, Sessions and Tags",
"scripts": "AnalyzeMemImage"
}
}
]
found_result = PackDependencies._collect_incidents_types_dependencies(
pack_incidents_types=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False, get_dependent_items=True
)
assert found_result == expected_result
class TestDependsOnClassifiers:
def test_collect_classifier_dependencies(self, module_repo):
"""
Given
- A classifier entry in the id_set.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the classifier depends on as optional dependencies.
"""
expected_result = {("Claroty", False), ("PAN-OS", False), ("Logzio", False)}
test_input = [
{
"Dummy Classifier": {
"name": "Dummy Classifier",
"fromversion": "5.0.0",
"pack": "dummy_pack",
"incident_types": [
"Claroty Integrity Incident",
"FirewallUpgrade",
"Logz.io Alert"
],
}
}
]
found_result = PackDependencies._collect_classifiers_dependencies(
pack_classifiers=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert set(found_result) == set(expected_result)
def test_collect_classifier_dependencies_with_items(self, module_repo):
"""
Given
- A classifier entry in the id_set.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the classifier depends on as optional
dependencies and the items causing mandatory dependencies, no such of those in this case.
"""
expected_result = ({('Claroty', False), ('Logzio', False), ('PAN-OS', False)}, {})
test_input = [
{
"Dummy Classifier": {
"name": "Dummy Classifier",
"fromversion": "5.0.0",
"pack": "dummy_pack",
"incident_types": [
"Claroty Integrity Incident",
"FirewallUpgrade",
"Logz.io Alert"
],
}
}
]
found_result = PackDependencies._collect_classifiers_dependencies(
pack_classifiers=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False, get_dependent_items=True
)
assert found_result == expected_result
def test_collect_classifier_dependencies__commontypes_pack(self, module_repo):
"""
Given
- A classifier entry in the id_set with an incident type from the CommonTypes pack.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the classifier depends on a mandatory dependencies.
"""
expected_result = {("CommonTypes", True)}
test_input = [
{
"Dummy Classifier": {
"name": "Dummy Classifier",
"fromversion": "5.0.0",
"pack": "dummy_pack",
"incident_types": [
"Network"
],
}
}
]
found_result = PackDependencies._collect_classifiers_dependencies(
pack_classifiers=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert set(found_result) == set(expected_result)
def test_collect_classifier_dependencies_on_filter(self, module_repo):
"""
Given
- A classifier entry in the id_set with filter from the CommonScripts pack.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the classifier depends on a mandatory dependencies.
"""
expected_result = {("CommonScripts", True)}
test_input = [
{
"Dummy Classifier": {
"name": "Dummy Classifier",
"fromversion": "5.0.0",
"pack": "dummy_pack",
"filters": ["IsInCidrRanges"]
}
}
]
found_result = PackDependencies._collect_classifiers_dependencies(
pack_classifiers=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert set(found_result) == set(expected_result)
def test_collect_generic_classifier_dependencies(self, module_repo):
"""
Given
- A classifier entry in the id_set that has generic definition
When
- Building dependency graph for pack.
Then
- Extracting the packs that the classifier depends on as optional dependencies.
"""
expected_result = {("pack_with_generic_type", True)}
test_input = [
{
"Dummy Classifier": {
"name": "Dummy Classifier",
"fromversion": "5.0.0",
"definitionId": "assets",
"pack": "dummy_pack",
"incident_types": ["generic_type_id"],
}
}
]
found_result = PackDependencies._collect_classifiers_dependencies(
pack_classifiers=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert set(found_result) == set(expected_result)
class TestDependsOnMappers:
def test_collect_mapper_dependencies(self, module_repo):
"""
Given
- A mapper entry in the id_set.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the mapper depends on as optional dependencies.
"""
expected_result = {("AccessInvestigation", False), ("CommonTypes", True), ("PrismaCloud", False),
("BruteForce", False)}
test_input = [
{
"Dummy Mapper": {
"name": "Dummy Mapper",
"fromversion": "5.0.0",
"pack": "dummy_pack",
"incident_types": [
"Access",
"AWS CloudTrail Misconfiguration"
],
"incident_fields": [
"incident_accountgroups",
"incident_accountid"
],
}
}
]
found_result = PackDependencies._collect_mappers_dependencies(
pack_mappers=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert set(found_result) == set(expected_result)
def test_collect_mapper_dependencies_with_items(self, module_repo):
"""
Given
- A mapper entry in the id_set.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the mapper depends on as optional dependencies and the items causing the mandatory dependency.
"""
expected_result = (
{('BruteForce', False), ('PrismaCloud', False), ('AccessInvestigation', False), ('CommonTypes', True)},
{('mapper', 'Dummy Mapper'): {'CommonTypes': [('incidentfield', 'incident_accountid')]}})
test_input = [
{
"Dummy Mapper": {
"name": "Dummy Mapper",
"fromversion": "5.0.0",
"pack": "dummy_pack",
"incident_types": [
"Access",
"AWS CloudTrail Misconfiguration"
],
"incident_fields": [
"incident_accountgroups",
"incident_accountid"
],
}
}
]
found_result = PackDependencies._collect_mappers_dependencies(
pack_mappers=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False, get_dependent_items=True
)
assert found_result == expected_result
def test_collect_mapper_dependencies__commontypes_pack(self, module_repo):
"""
Given
- A mapper entry in the id_set with an incident type from the CommonTypes pack.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the mapper depends on a mandatory dependencies.
"""
expected_result = {("CommonTypes", True)}
test_input = [
{
"Dummy Mapper": {
"name": "Dummy Mapper",
"fromversion": "5.0.0",
"pack": "dummy_pack",
"incident_types": [
"Authentication"
]
}
}
]
found_result = PackDependencies._collect_mappers_dependencies(
pack_mappers=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert set(found_result) == set(expected_result)
def test_collect_mapper_dependencies_on_filter(self, module_repo):
"""
Given
- A mapper entry in the id_set with filter from the CommonScripts pack.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the mapper depends on a mandatory dependencies.
"""
expected_result = {("CommonScripts", True)}
test_input = [
{
"Dummy Mapper": {
"name": "Dummy Mapper",
"fromversion": "5.0.0",
"pack": "dummy_pack",
"filters": ["IsInCidrRanges"]
}
}
]
found_result = PackDependencies._collect_mappers_dependencies(
pack_mappers=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert set(found_result) == set(expected_result)
class TestDependsOnWidgets:
def test_collect_widgets_dependencies(self, module_repo):
"""
Given
- A mapper entry in the id_set.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the mapper depends on.
"""
expected_result = {('CommonScripts', True)}
test_input = [
{
"Dummy_widget": {
"name": "Dummy Widget",
"fromversion": "5.0.0",
"pack": "dummy_pack",
"scripts": [
"AssignAnalystToIncident"
]
}
}
]
found_result = PackDependencies._collect_widget_dependencies(
pack_widgets=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert set(found_result) == set(expected_result)
def test_collect_widgets_dependencies_with_item(self, module_repo):
"""
Given
- A mapper entry in the id_set.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the mapper depends on and the items causing mandatory dependencies
"""
expected_result = ({('CommonScripts', True)},
{('widget', 'Dummy_widget'): {'CommonScripts': [('script', 'AssignAnalystToIncident')]}})
test_input = [
{
"Dummy_widget": {
"name": "Dummy Widget",
"fromversion": "5.0.0",
"pack": "dummy_pack",
"scripts": [
"AssignAnalystToIncident"
]
}
}
]
found_result = PackDependencies._collect_widget_dependencies(
pack_widgets=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False, get_dependent_items=True
)
assert found_result == expected_result
class TestDependsOnDashboard:
def test_collect_dashboard_dependencies(self, module_repo):
"""
Given
- A dashboard entry in the id_set.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the dashboard depends on.
"""
expected_result = {('CommonScripts', True)}
test_input = [
{
"Dummy_dashboard": {
"name": "Dummy Widget",
"fromversion": "5.0.0",
"pack": "dummy_pack",
"scripts": [
"AssignAnalystToIncident"
]
}
}
]
found_result = PackDependencies._collect_widget_dependencies(
pack_widgets=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
header='Dashboards',
)
assert set(found_result) == set(expected_result)
def test_collect_dashboard_dependencies_with_items(self, module_repo):
"""
Given
- A dashboard entry in the id_set.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the dashboard depends on and the items causing the mandatory dependencies.
"""
expected_result = ({('CommonScripts', True)}, {
('dashboard', 'Dummy_dashboard'): {'CommonScripts': [('script', 'AssignAnalystToIncident')]}})
test_input = [
{
"Dummy_dashboard": {
"name": "Dummy Widget",
"fromversion": "5.0.0",
"pack": "dummy_pack",
"scripts": [
"AssignAnalystToIncident"
]
}
}
]
found_result = PackDependencies._collect_widget_dependencies(
pack_widgets=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
header='Dashboards', get_dependent_items=True
)
assert found_result == expected_result
class TestDependsOnJob:
@pytest.mark.parametrize('feed_dependency', (True, False))
def test_collect_job_dependencies(self, module_repo, feed_dependency: bool):
"""
Given
- A job entry in the id_set
When
- Building a dependency graph
Then
- Ensure depended-on packs are extracted
"""
expected_result = {('Pcysys', True)} # playbook dependant
if feed_dependency:
expected_result.add(('FeedAWS', True)) # integration (feed) dependant
selected_feeds = ['FeedAWS']
else:
selected_feeds = []
test_job_data = [
{
"jobby": {
"name": "jobby",
"file_path": "Packs/pack0/Jobs/job-jobby.json",
"pack": "pack0",
"playbookId": "Pentera Run Scan",
"selectedFeeds": selected_feeds,
"fromVersion": DEFAULT_JOB_FROM_VERSION
}
}
]
found_result = PackDependencies._collect_jobs_dependencies(test_job_data,
module_repo.id_set.read_json_as_dict(),
verbose=False)
assert set(found_result) == set(expected_result)
def test_collect_job_dependencies_with_items(self, module_repo: dict):
"""
Given
- A job entry in the id_set
When
- Building a dependency graph
Then
- Ensure depended-on packs are extracted and the items causing the mandatory dependencies.
"""
expected_result = (
{('Pcysys', True)},
{('job', 'jobby'): {'Pcysys': [('playbook', 'Pentera Run Scan')]}}) # playbook dependant
selected_feeds = []
test_job_data = [
{
"jobby": {
"name": "jobby",
"file_path": "Packs/pack0/Jobs/job-jobby.json",
"pack": "pack0",
"playbookId": "Pentera Run Scan",
"selectedFeeds": selected_feeds,
"fromVersion": DEFAULT_JOB_FROM_VERSION
}
}
]
found_result = PackDependencies._collect_jobs_dependencies(
test_job_data, id_set=module_repo.id_set.read_json_as_dict(), verbose=False, get_dependent_items=True)
assert found_result == expected_result
class TestDependsOnReports:
def test_collect_report_dependencies(self, module_repo):
"""
Given
- A report entry in the id_set.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the report depends on.
"""
expected_result = {('CommonScripts', True)}
test_input = [
{
"Dummy_report": {
"name": "Dummy Widget",
"fromversion": "5.0.0",
"pack": "dummy_pack",
"scripts": [
"AssignAnalystToIncident"
]
}
}
]
found_result = PackDependencies._collect_widget_dependencies(
pack_widgets=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
header='Reports',
)
assert set(found_result) == set(expected_result)
def test_collect_report_dependencies_with_items(self, module_repo):
"""
Given
- A report entry in the id_set.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the report depends on and the items causing mandatory dependencies.
"""
expected_result = ({('CommonScripts', True)},
{('report', 'Dummy_report'): {'CommonScripts': [('script', 'AssignAnalystToIncident')]}})
test_input = [
{
"Dummy_report": {
"name": "Dummy Widget",
"fromversion": "5.0.0",
"pack": "dummy_pack",
"scripts": [
"AssignAnalystToIncident"
]
}
}
]
found_result = PackDependencies._collect_widget_dependencies(
pack_widgets=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
header='Reports', get_dependent_items=True
)
assert found_result == expected_result
SEARCH_PACKS_INPUT = [
(['type'], 'IncidentFields', (set(), dict()), 'incident_field'),
(
['emailaddress'], 'IncidentFields',
({'Compliance'}, {'Compliance': [('incident_field', 'incident_emailaddress')]}),
'incident_field'),
(['E-mail Address'], 'IncidentFields',
({'Compliance'}, {'Compliance': [('incident_field', 'incident_emailaddress')]}), 'incident_field'),
(['adminemail'], 'IndicatorFields',
({'CommonTypes'}, {'CommonTypes': [('indicator_field', 'indicator_adminemail')]}), 'indicator_field'),
(['Admin Email'], 'IndicatorFields',
({'CommonTypes'}, {'CommonTypes': [('indicator_field', 'indicator_adminemail')]}), 'indicator_field'),
(['Claroty'], 'Mappers', ({'Claroty'}, {'Claroty': [('mapper', 'Claroty')]}), 'mapper'),
(['Claroty - Incoming Mapper'], 'Mappers', ({'Claroty'}, {'Claroty': [('mapper', 'CBAlerts - Incoming Mapper')]}),
'mapper'),
(['Cortex XDR - IR'], 'Classifiers', ({'CortexXDR'}, {'CortexXDR': [('classifier', 'Cortex XDR - IR')]}),
'classifier'),
]
@pytest.mark.parametrize('item_names, section_name, expected_result, type', SEARCH_PACKS_INPUT)
def test_search_packs_by_items_names_or_ids(item_names, section_name, expected_result, module_repo, type):
found_packs, packs_and_items_dict = PackDependencies._search_packs_by_items_names_or_ids(item_names,
module_repo.
id_set.read_json_as_dict()
[section_name], True,
'Both', type)
assert found_packs == expected_result[0]
assert packs_and_items_dict == expected_result[1]
def test_find_dependencies_using_pack_metadata(mocker):
"""
Given
- A dict of dependencies from id set.
When
- Running PackDependencies.update_dependencies_from_pack_metadata.
Then
- Assert the dependencies in the given dict is updated.
"""
mock_pack_meta_file = {
"dependencies": {
"dependency_pack1": {
"mandatory": False,
"display_name": "dependency pack 1"
},
"dependency_pack2": {
"mandatory": False,
"display_name": "dependency pack 2"
},
"dependency_pack3": {
"mandatory": False,
"display_name": "dependency pack 3"
}
}
}
dependencies_from_id_set = {
"dependency_pack1": {
"mandatory": False,
"display_name": "dependency pack 1"
},
"dependency_pack2": {
"mandatory": True,
"display_name": "dependency pack 2"
},
"dependency_pack3": {
"mandatory": True,
"display_name": "dependency pack 3"
},
"dependency_pack4": {
"mandatory": True,
"display_name": "dependency pack 4"
}
}
mocker.patch('demisto_sdk.commands.find_dependencies.find_dependencies.PackDependencies.get_metadata_from_pack',
return_value=mock_pack_meta_file)
first_level_dependencies = PackDependencies.update_dependencies_from_pack_metadata('', dependencies_from_id_set)
assert not first_level_dependencies.get("dependency_pack2", {}).get("mandatory")
assert not first_level_dependencies.get("dependency_pack3", {}).get("mandatory")
assert first_level_dependencies.get("dependency_pack4", {}).get("mandatory")
class TestDependencyGraph:
@pytest.mark.parametrize('source_node, expected_nodes_in, expected_nodes_out',
[('pack1', ['pack1', 'pack2', 'pack3'], ['pack4']),
('pack2', ['pack2', 'pack3'], ['pack4', 'pack1'])]
)
def test_get_dependencies_subgraph_by_dfs(self, source_node, expected_nodes_in, expected_nodes_out):
"""
Given
- A directional graph and a source node.
When
- Extracting it's DFS subgraph.
Then
- Assert all nodes that are reachable from the source are in the subgraph
- Assert all nodes that are not reachable from the source are not in the subgraph
"""
graph = nx.DiGraph()
graph.add_node('pack1')
graph.add_node('pack2')
graph.add_node('pack3')
graph.add_node('pack4')
graph.add_edge('pack1', 'pack2')
graph.add_edge('pack2', 'pack3')
dfs_graph = PackDependencies.get_dependencies_subgraph_by_dfs(graph, source_node)
for i in expected_nodes_in:
assert i in dfs_graph.nodes()
for i in expected_nodes_out:
assert i not in dfs_graph.nodes()
def test_build_all_dependencies_graph(self, mocker):
"""
Given
- A list of packs and their dependencies
When
- Creating the dependencies graph using build_all_dependencies_graph method
Then
- Assert all the dependencies are correct
- Assert all the mandatory dependencies are correct
"""
def mock_find_pack_dependencies(pack_id, *_, **__):
dependencies = {'pack1': [('pack2', True), ('pack3', True)],
'pack2': [('pack3', True), ('pack2', True)],
'pack3': [],
'pack4': [('pack6', False)]}
dependencies_items = {'pack1': {
('type_item_a', 'item_a'): {'pack2': [('type_item_2', 'item2')], 'pack3': [('type_item_3', 'item3')]}},
'pack2': {('type_item_b', 'item_b'): {'pack3': [('type_item_3', 'item3')],
'pack2': [('type_item_2', 'item2')]}},
'pack3': {},
'pack4': {('type_item_c', 'item_c'): {'pack4': [('type_item_4', 'item4')]}}}
return dependencies[pack_id], dependencies_items[pack_id]
{'Expanse Behavior Severity Update': {'Expanse': 'ExpanseParseRawIncident', 'CommonScripts': 'IsGreaterThan'},
'ExpanseParseRawIncident': {'Expanse': 'ExpanseParseRawIncident'},
'Expanse Appearance': {'Expanse': 'incident_expanseseverity'},
'Expanse Behavior': {'Expanse': 'Expanse Behavior Severity Update'}}
mocker.patch(
'demisto_sdk.commands.find_dependencies.find_dependencies.PackDependencies._find_pack_dependencies',
side_effect=mock_find_pack_dependencies
)
pack_ids = ['pack1', 'pack2', 'pack3', 'pack4']
dependency_graph = PackDependencies.build_all_dependencies_graph(pack_ids, {}, False)
# Asserting Dependencies (mandatory and non-mandatory)
assert [n for n in dependency_graph.neighbors('pack1')] == ['pack2', 'pack3']
assert [n for n in dependency_graph.neighbors('pack2')] == ['pack3']
assert [n for n in dependency_graph.neighbors('pack3')] == []
assert [n for n in dependency_graph.neighbors('pack4')] == ['pack6']
# Asserting mandatory dependencies
nodes = dependency_graph.nodes(data=True)
assert nodes['pack1']['mandatory_for_packs'] == []
assert nodes['pack1']['depending_on_items_mandatorily'] == {
('type_item_a', 'item_a'): {'pack2': [('type_item_2', 'item2')], 'pack3': [('type_item_3', 'item3')]}}
assert nodes['pack1']['depending_on_packs'] == [('pack2', True), ('pack3', True)]
assert nodes['pack1']['mandatory_for_items'] == {}
assert nodes['pack2']['mandatory_for_packs'] == ['pack1']
assert nodes['pack2']['depending_on_items_mandatorily'] == {
('type_item_b', 'item_b'): {'pack3': [('type_item_3', 'item3')], 'pack2': [('type_item_2', 'item2')]}}
assert nodes['pack2']['depending_on_packs'] == [('pack3', True), ('pack2', True)]
assert nodes['pack2']['mandatory_for_items'] == {
('type_item_2', 'item2'): {'pack1': [('type_item_a', 'item_a')]}}
assert nodes['pack3']['mandatory_for_packs'] == ['pack1', 'pack2']
assert nodes['pack3']['depending_on_items_mandatorily'] == {}
assert nodes['pack3']['depending_on_packs'] == []
assert nodes['pack3']['mandatory_for_items'] == {
('type_item_3', 'item3'): {'pack1': [('type_item_a', 'item_a')], 'pack2': [('type_item_b', 'item_b')]}}
assert nodes['pack4']['mandatory_for_packs'] == []
assert nodes['pack4']['depending_on_items_mandatorily'] == {
('type_item_c', 'item_c'): {'pack4': [('type_item_4', 'item4')]}}
assert nodes['pack4']['depending_on_packs'] == [('pack6', False)]
assert nodes['pack4']['mandatory_for_items'] == {}
assert nodes['pack6']['mandatory_for_packs'] == []
assert nodes['pack6']['depending_on_items_mandatorily'] == {}
assert nodes['pack6']['depending_on_packs'] == []
assert nodes['pack6']['mandatory_for_items'] == {}
def test_build_dependency_graph(self, module_repo):
pack_name = "ImpossibleTraveler"
found_graph = PackDependencies.build_dependency_graph_single_pack(pack_id=pack_name,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False)
root_of_graph = [n for n in found_graph.nodes if found_graph.in_degree(n) == 0][0]
pack_dependencies = [n for n in found_graph.nodes if found_graph.in_degree(n) > 0]
assert root_of_graph == pack_name
assert len(pack_dependencies) > 0
def test_build_dependency_graph_include_ignored_content(self, module_repo):
"""
Given
- A pack name which depends on unsupported content.
When
- Building dependency graph for pack.
Then
- Extracting the pack dependencies with unsupported content.
"""
pack_name = "ImpossibleTraveler"
found_graph = PackDependencies.build_dependency_graph_single_pack(pack_id=pack_name,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
exclude_ignored_dependencies=False
)
root_of_graph = [n for n in found_graph.nodes if found_graph.in_degree(n) == 0][0]
pack_dependencies = [n for n in found_graph.nodes if found_graph.in_degree(n) > 0]
assert root_of_graph == pack_name
assert len(pack_dependencies) > 0
assert 'NonSupported' not in pack_dependencies
class TestDependsOnGenericField:
def test_collect_generic_field_dependencies(self, module_repo):
"""
Given
- a generic field entry in the id_set.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the generic field depends on.
"""
expected_result = {
("Volatility", True), ("pack_with_definition", True), ("pack_with_generic_type", True)
}
test_input = [
{
"Dummy Generic Field": {
"name": "Dummy Generic Field",
"fromversion": "5.0.0",
"pack": "dummy_pack",
"definitionId": "assets",
"generic_types": ["generic_type_id"],
"scripts": ["AnalyzeMemImage"],
}
}
]
found_result = PackDependencies._collect_generic_fields_dependencies(
pack_generic_fields=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert set(found_result) == set(expected_result)
class TestDependsOnGenericType:
def test_collect_generic_type_dependencies(self, module_repo):
"""
Given
- A generic type entry in the id_set.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the generic type depends on.
"""
expected_result = {("pack_with_definition", True), ("Volatility", True), ("pack5", True)}
test_input = [
{
"Dummy Generic Type": {
"name": "Dummy Generic Type",
"fromversion": "5.0.0",
"pack": "dummy_pack",
"scripts": "AnalyzeMemImage",
"definitionId": "assets",
"layout": "generic_layout_id"
}
}
]
found_result = PackDependencies._collect_generic_types_dependencies(
pack_generic_types=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert set(found_result) == set(expected_result)
class TestDependsOnGenericModules:
def test_collect_generic_module_dependencies(self, module_repo):
"""
Given
- A generic module entry in the id_set.
When
- Building dependency graph for pack.
Then
- Extracting the packs that the generic module depends on.
"""
expected_result = {("pack_with_definition", True), ("pack_4", True)}
test_input = [
{
"dummy generic module": {
"name": "dummy generic module",
"file_path": "path.json",
"fromversion": "6.5.0",
"pack": "dummy pack",
"definitionIds": ["assets"],
"views": {
"Vulnerability Management": {
"title": "Risk Base Vulnerability Management",
"dashboards": ["pack_4 - dashboard"]
}
}
}
}
]
found_result = PackDependencies._collect_generic_modules_dependencies(
pack_generic_modules=test_input,
id_set=module_repo.id_set.read_json_as_dict(),
verbose=False,
)
assert set(found_result) == set(expected_result)
def find_pack_display_name_mock(pack_folder_name):
return pack_folder_name
class TestCalculateSinglePackDependencies:
@classmethod
def setup_class(cls):
patch('demisto_sdk.commands.find_dependencies.find_dependencies.find_pack_display_name',
side_effect=find_pack_display_name_mock)
patch('Tests.scripts.utils.log_util.install_logging')
graph = nx.DiGraph()
graph.add_node('pack1', mandatory_for_packs=[], depending_on_items_mandatorily={},
mandatory_for_items={}, depending_on_packs=[])
graph.add_node('pack2', mandatory_for_packs=[], depending_on_items_mandatorily={},
mandatory_for_items={}, depending_on_packs=[])
graph.add_node('pack3', mandatory_for_packs=[], depending_on_items_mandatorily={},
mandatory_for_items={}, depending_on_packs=[])
graph.add_node('pack4', mandatory_for_packs=[], depending_on_items_mandatorily={},
mandatory_for_items={}, depending_on_packs=[])
graph.add_node('pack5', mandatory_for_packs=[], depending_on_items_mandatorily={},
mandatory_for_items={}, depending_on_packs=[])
graph.add_edge('pack1', 'pack2')
graph.add_edge('pack2', 'pack3')
graph.add_edge('pack1', 'pack4')
graph.nodes()['pack4']['mandatory_for_packs'].append('pack1')
dependencies = calculate_single_pack_dependencies('pack1', graph)
cls.first_level_dependencies, cls.all_level_dependencies, _ = dependencies
def test_calculate_single_pack_dependencies_first_level_dependencies(self):
"""
Given
- A full dependency graph where:
- pack1 -> pack2 -> pack3
- pack1 -> pack4
- pack4 is mandatory for pack1
- pack5 and pack1 are not a dependency for any pack
When
- Running `calculate_single_pack_dependencies` to extract the first and all levels dependencies
Then
- Ensure first level dependencies for pack1 are only pack2 and pack4
"""
all_nodes = {'pack1', 'pack2', 'pack3', 'pack4', 'pack5'}
expected_first_level_dependencies = {'pack2', 'pack4'}
for node in expected_first_level_dependencies:
assert node in self.first_level_dependencies
for node in all_nodes - expected_first_level_dependencies:
assert node not in self.first_level_dependencies
def test_calculate_single_pack_dependencies_all_levels_dependencies(self):
"""
Given
- A full dependency graph where:
- pack1 -> pack2 -> pack3
- pack1 -> pack4
- pack4 is mandatory for pack1
- pack5 and pack1 are not a dependency for any pack
When
- Running `calculate_single_pack_dependencies` to extract the first and all levels dependencies
Then
- Ensure all levels dependencies for pack1 are pack2, pack3 and pack4 only
"""
all_nodes = {'pack1', 'pack2', 'pack3', 'pack4', 'pack5'}
expected_all_level_dependencies = {'pack2', 'pack3', 'pack4'}
for node in expected_all_level_dependencies:
assert node in self.all_level_dependencies
for node in all_nodes - expected_all_level_dependencies:
assert node not in self.all_level_dependencies
def test_calculate_single_pack_dependencies_mandatory_dependencies(self):
"""
Given
- A full dependency graph where:
- pack1 -> pack2 -> pack3
- pack1 -> pack4
- pack4 is mandatory for pack1
- pack5 and pack1 are not a dependency for any pack
When
- Running `calculate_single_pack_dependencies` to extract the first and all levels dependencies
Then
- pack4 is mandatory for pack1 and that there are no other mandatory dependencies
"""
expected_mandatory_dependency = 'pack4'
assert self.first_level_dependencies[expected_mandatory_dependency]['mandatory']
for node in self.first_level_dependencies:
if node != expected_mandatory_dependency:
assert not self.first_level_dependencies[node]['mandatory']
def get_mock_dependency_graph():
graph = nx.DiGraph()
graph.add_node('pack1', mandatory_for_packs=[], depending_on_items_mandatorily={
('type_item_a', 'item_a'): {'pack2': ('type_item_2', 'item2'), 'pack3': ('type_item_3', 'item3')}},
mandatory_for_items={}, depending_on_packs=[('pack2', True), ('pack3', True)])
graph.add_node('pack2', mandatory_for_packs=['pack1'], depending_on_items_mandatorily={
('type_item_b', 'item_b'): {'pack3': ('type_item_3', 'item3'), 'pack2': ('type_item_2', 'item2')}},
mandatory_for_items={('type_item_2', 'item2'): {'pack1': ('type_item_a', 'item_a')}},
depending_on_packs=[('pack3', True), ('pack2', True)])
graph.add_node('pack3', mandatory_for_packs=['pack1', 'pack2'], depending_on_items_mandatorily={},
mandatory_for_items={('type_item_3', 'item3'): {'pack1': ('type_item_a', 'item_a'),
'pack2': ('type_item_b', 'item_b')}},
depending_on_packs=[])
graph.add_edge('pack1', 'pack2')
graph.add_edge('pack1', 'pack3')
graph.add_edge('pack2', 'pack3')
return graph
class TestGetDependentOnGivenPack:
def test_get_dependent_on_given_pack(self, mocker):
mocker.patch('demisto_sdk.commands.find_dependencies.find_dependencies.find_pack_display_name',
side_effect=find_pack_display_name_mock)
mocker.patch('demisto_sdk.commands.find_dependencies.find_dependencies.get_id_set', return_value={})
mocker.patch('demisto_sdk.commands.find_dependencies.find_dependencies.select_packs_for_calculation',
return_value=[])
mocker.patch('demisto_sdk.commands.find_dependencies.find_dependencies.PackDependencies.build_all_'
'dependencies_graph', return_value=get_mock_dependency_graph())
mocker.patch('demisto_sdk.commands.find_dependencies.find_dependencies.get_pack_name', return_value='pack3')
dependent_packs_dict, dependent_packs = get_packs_dependent_on_given_packs('pack3', '')
assert 'pack2' in dependent_packs
assert 'pack1' in dependent_packs
assert dependent_packs_dict['pack3']['packsDependentOnThisPackMandatorily']['pack1']['mandatory']
assert dependent_packs_dict['pack3']['packsDependentOnThisPackMandatorily']['pack2']['mandatory']
assert dependent_packs_dict['pack3']['packsDependentOnThisPackMandatorily']['pack1']['dependent_items'] == [
(('type_item_3', 'item3'), ('type_item_a', 'item_a'))]
assert dependent_packs_dict['pack3']['packsDependentOnThisPackMandatorily']['pack2']['dependent_items'] == [
(('type_item_3', 'item3'), ('type_item_b', 'item_b'))]
ID_SET = {
"integrations": [{'integration1': {}}, {'integration2': {}}],
"scripts": [{'script1': {}}, {'script2': {}}],
"playbooks": [{'playbook1': {}}, {'playbook2': {}}],
"Classifiers": [{'classifier1': {}}, {'classifier2': {}}],
"Dashboards": [{'dashboard1': {}}, {'dashboard2': {}}],
"IncidentFields": [{'field1': {}}, {'field2': {}}],
"IncidentTypes": [{'type1': {}}, {'type2': {}}],
"IndicatorFields": [{'field1': {}}, {'field2': {}}],
"IndicatorTypes": [{'type1': {}}, {'type2': {}}],
"Layouts": [{'layout1': {}}, {'layout2': {}}],
"Reports": [{'report1': {}}, {'report2': {}}],
"Widgets": [{'widget1': {}}, {'widget2': {}}],
"Mappers": [{'mapper1': {}}, {'mapper2': {}}],
"Lists": [{'list1': {}}, {'list2': {}}],
"Packs": {
"pack1": {
"name": "pack1",
"ContentItems": {
"playbooks": [
"playbook1"
]
}
},
"pack2": {
"name": "pack2",
"ContentItems": {
"scripts": [
"script1",
"script2"
]
}
}
}
}
def test_remove_items_from_content_entities_sections():
"""
Given
- id set
- items that need to be excluded from the all entities sections in the id set except the 'Packs' section
When
- removing items dependencies from id set
Then
- assuring the items were successfully removed from the id set
"""
excluded_items_by_type = {
'integration': {'integration1'},
'script': {'script1'},
'playbook': {'playbook1'},
"classifier": {'classifier1'},
"incidentfield": {'field1'},
"incidenttype": {'type1'},
"indicatorfield": {'field1'},
"indicatortype": {'type1'},
"mapper": {'mapper1'},
"dashboard": {'dashboard1'},
"widget": {'widget1'},
"list": {'list1'},
"report": {'report1'},
"layout": {'layout1'}
}
expected_id_set_entities_section = {
"integrations": [{'integration2': {}}],
"scripts": [{'script2': {}}],
"playbooks": [{'playbook2': {}}],
"Classifiers": [{'classifier2': {}}],
"Dashboards": [{'dashboard2': {}}],
"IncidentFields": [{'field2': {}}],
"IncidentTypes": [{'type2': {}}],
"IndicatorFields": [{'field2': {}}],
"IndicatorTypes": [{'type2': {}}],
"Layouts": [{'layout2': {}}],
"Reports": [{'report2': {}}],
"Widgets": [{'widget2': {}}],
"Mappers": [{'mapper2': {}}],
"Lists": [{'list2': {}}],
}
id_set = ID_SET.copy()
remove_items_from_content_entities_sections(id_set, excluded_items_by_type)
id_set.pop("Packs")
assert IsEqualFunctions.is_dicts_equal(id_set, expected_id_set_entities_section)
def test_remove_items_from_packs_section():
"""
Given
- id set
- items that need to be excluded from the 'Packs' section in the id set
When
- removing items dependencies from id set
Then
- assuring the items were successfully removed from the id set
- assuring packs without content items are being removed from the id set
"""
excluded_items_by_pack = {'pack1': {('playbook', 'playbook1')},
'pack2': {('script', 'script1')}}
expected_id_set_packs_section = {
"pack2": {
"name": "pack2",
"ContentItems": {
"scripts": [
"script2"
]
}
}
}
id_set = ID_SET.copy()
remove_items_from_packs_section(id_set, excluded_items_by_pack)
assert IsEqualFunctions.is_dicts_equal(id_set.get("Packs"), expected_id_set_packs_section)
| 41.150694
| 158
| 0.522462
|
04f62a40edd2d238f9a0a9bbe64899048f437c64
| 5,457
|
py
|
Python
|
contrib/seeds/makeseeds.py
|
CryptoDJ/Feathercoin
|
d61a127ba728e9790d0d4f0dac5cda43eee724d0
|
[
"MIT"
] | 1
|
2021-07-16T09:46:54.000Z
|
2021-07-16T09:46:54.000Z
|
contrib/seeds/makeseeds.py
|
CryptoDJ/Feathercoin
|
d61a127ba728e9790d0d4f0dac5cda43eee724d0
|
[
"MIT"
] | null | null | null |
contrib/seeds/makeseeds.py
|
CryptoDJ/Feathercoin
|
d61a127ba728e9790d0d4f0dac5cda43eee724d0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2013-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
import re
import sys
import dns.resolver
import collections
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 337600
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {}
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/Feathercoin:0.9.6.2/|/Feathercoin:0.13.(0|1|2|99)/|/Feathercoin:0.16.(0|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
| 32.676647
| 186
| 0.571926
|
a634a4b731c4372a4ee5e9f3168e5dcd4e0a2fff
| 10,673
|
py
|
Python
|
scripts/tweak_attn/align_pos.py
|
Lucas-Gui/OpenNMT-tf
|
3a4d303503f03ed1784f32e20b3f92a4d758860a
|
[
"MIT"
] | null | null | null |
scripts/tweak_attn/align_pos.py
|
Lucas-Gui/OpenNMT-tf
|
3a4d303503f03ed1784f32e20b3f92a4d758860a
|
[
"MIT"
] | null | null | null |
scripts/tweak_attn/align_pos.py
|
Lucas-Gui/OpenNMT-tf
|
3a4d303503f03ed1784f32e20b3f92a4d758860a
|
[
"MIT"
] | null | null | null |
"""'Align' a tokenized corpus with the corresponding POS,
by repeating the POS as many times as there are subparts of a word"""
import argparse
from datetime import datetime
from contextlib import ExitStack
from unicodedata import category
punc = list("""()"{}/=+*%:;.,?!&[]#°""") #no apostrophe or minus
assert ("'" not in punc)
adpdet = ['au', 'aux', 'du', "des", "desdites", "desdits", "dudit"]
adppron = ["auquel", "duquel", "desquels", "desquelles", "auxquels", "auxquelles"]
def is_numeral(word):
word = word.strip(spacer)
for l in word:
if not category(l) == 'Nd':
return False
return True
def replace_dashes(line : str):
"""It seems that pyonnmttok replaces any series of ----- by a — during tokenization"""
l = []
switch = False
for c in line:
if c != "-":
if switch :
l.append("—")
switch=False
l.append(c)
elif not switch:
switch=True
if switch:
l.append("—")
return "".join(l)
class UnknownException(ValueError):
"""Exception used to control what errors are acceptable in align_ref"""
pass
def contains_placeholder(tokens):
"""check for placeholders tokens"""
ph = ["⦅ph_ent_uri#1⦆", "⦅ph_unk#1⦆"]
return any([p in tokens for p in ph])
def main(args):
i = 0
sup_readers = []
sup_writers = []
empty_lines=0
errors=0
skipped=0
with ExitStack() as exit:
#readers and writers
tokens_r = exit.enter_context(open(args.tokens))
pos_r = exit.enter_context(open(args.dep))
writer = exit.enter_context(open(args.dep+args.suffix, "w"))
text_r = exit.enter_context(open(args.text))
tokens_writer = exit.enter_context(open(args.tokens+args.suffix, "w") )
for filename in args.sup:
sup_readers.append(exit.enter_context(open(filename)))
sup_writers.append(exit.enter_context(open(filename + args.suffix, "w")))
err_log = exit.enter_context(open("log.align_pos.err", "w"))
print("Writing to :", writer.name, " ", [w.name for w in sup_writers], tokens_writer.name)
#main loop
for line_tok, line_pos, line_text in zip(tokens_r, pos_r, text_r):
i+=1
log = Log(i)
log.write(line_text)
sup_lines = [(r.readline()) for r in sup_readers]
if not line_tok.strip("\n"): #if line is empty for any reason, pass
empty_lines+=1
log.print(err_log)
continue
if line_text.count('-')>30 or contains_placeholder(line_tok.split(" ")): #skip tables
# and lines containing placeholders
skipped+=1
log.write(skipped)
log.print(err_log)
continue
line_text = replace_dashes(line_text)
sup_lines = [l.strip("\n").split(" ") for l in sup_lines]
try : #try aligning
pos, others = align_ref(line_tok.strip("\n").split(" "), line_pos.strip("\n").split(" "),
line_text.strip("\n").split("_"), sup = sup_lines, log =log)
except UnknownException as e:
errors+=1
if args.T2:
print("line ",i)
raise e
else:
log.print(err_log)
else: #if everything went well, writes the result
writer.write(pos + "\n")
tokens_writer.write(line_tok)
for w, line in zip(sup_writers, others):
w.write(line + "\n")
if not i % args.f :
print(f'{datetime.now()} : line {i}, {errors} errors')
if args.L:
log.print()
print(f"Found {empty_lines} empty lines and {errors} errors, skipped {skipped} lines, on {i} lines")
def is_nonword_token(token):
"""Checks whether a token is a token created by the tokenizer (i.e. a case delimiter token) (or is empty)"""
A = "\uff5f" in token and not "\uff03" in token #"⦅"marks special tokens, but # marks numbers
return A or not token
def is_numeral_token(token):
return "\uff5f" in token and "\uff03" in token
def strip_num(word : str):
"""Returns the word, without numerical chars at the beginning"""
i =0
while i<len(word) and word[i].isnumeric():
i+=1
return word[i:]
def align_ref(tokens, pos, text : list, sup = None, log = None):
"""Align tokens and pos by matching tokens with text, then text with pos.
Also align any line in sup"""
if sup is None:
sup = []
matching = [-1 for _ in range(len(tokens))] #store at index i the index of the word matched with token i
j = -1
text_copy = [t for t in text]
word = ''
if sup:
sup[0] = sup[0]+["-1"]
sup[1:] = [s+["<maj>"] for s in sup[1:]]
pos.append("<maj>") #sup. token
tildes = [] #marks places where to add & : mark a missing word BEFORE token (not after like ~)
if log is None:
log = Log()
try :
for i in range(len(tokens)):
token = tokens[i].strip(spacer)
if is_nonword_token(token):
matching[i] = len(text_copy)
if args.T2 or args.L:
log.write(token, i, [word], len(text_copy), )
continue
if not word : #end of word, next word.
while not word: #ignores empty next words (that have no token)
word = text.pop(0)
word = word.strip(" ")
if not word:
tildes.append(i)
j+=1
# These remain for compatibility with French UD corpus, which count words such as "du", "desquelles" as
# two words ("de le", "de lesquelles")
if word.lower() in adpdet and pos[j] == "ADP" and j+1<len(pos) and pos[j+1] == "DET":
pos [j] = "ADPDET"
pos.pop(j+1)
for l in sup:
l.pop(j) #will count the ADPDETs as their DET. Not perfect but ok
if sup: # to mark places where a word lacks
sup[0][j]+="~"
if word.lower() in adppron and pos[j] == "ADP" and j+1<len(pos) and pos[j+1] == "PRON":
pos[j] = "ADPPRON"
pos.pop(j + 1)
for l in sup:
l.pop(j)
if sup:
sup[0][j]+="~"
if args.T2 or args.L:
log.write(token, i, [word], j)
if is_numeral_token(token): #if theres a numeral placeholder, it's much longer than the number
word=strip_num(word)
# elif all([not c.isalnum() for c in word]) and all([not c.isalnum() for c in token]): #both token and word are all symbols : next
# word = ""
else:
word = word[len(token) : ].strip(" ") #cut to token length, and removes in-word spaces (e.g. "12 000")
matching[i] = j
#sanity checks
assert not any (t.strip(" ") for t in text) #it's okay if spaces remain
assert not word
assert all([i > -1 for i in matching])
assert len(pos) == len(text_copy)+1
assert all([len(l)== len(text_copy)+1 for l in sup])
result = [pos[i] for i in matching]
k = matching[0]
result_sup = [ [l[i] for i in matching] for l in sup]
if result_sup:
for i,j in enumerate(matching): #writes word separator
if j != k or j == len(text_copy): #tokens are from different word or nonword tokens
result_sup[0][i-1]+=";"
k = j
for i in tildes:
result_sup[0][i]+="&"
except (IndexError, AssertionError) as e:
# Errors still happen on special cases (e.g. when spacy has separated a "---" in individual dashes).
if not args.T:
raise UnknownException()
log.print()
print(e)
print(spacer)
print(tokens, len(tokens))
print(text_copy, len(text_copy))
print(" ".join(pos), len(pos))
for l in sup:
print(" ".join(l), "len :",len(l))
for match, token in zip(matching, tokens) :
if match <len(pos):
print(f"{token}:{pos[match]}", end=" ")
print(matching, f"max : {max(matching)}")
print("\n")
raise UnknownException()
return " ".join(result), [" ".join(l) for l in result_sup]
class Log():
"""To keep track and print what happened during main"""
def __init__(self, i=None):
self.text=[str(i)+"\n"] if i is not None else []
self.i = i
def write(self, *args):
self.text.append(" ".join(str(a) for a in args)+"\n")
def print(self, file=None):
print("".join(self.text), file=file)
def clear(self):
self.text=[]
if __name__ == '__main__':
parser = argparse.ArgumentParser("Align the words properties with a tokenized text. "
"There should be one property by text part "
"(word, number, punctuation, supbart of a contraction)")
parser.add_argument("tokens", help="Token file")
parser.add_argument('text', help="Em-dash-separated speech components ")
parser.add_argument("dep", help="Dependencies or part-of-speech file")
parser.add_argument('--heads', help='Heads file')
parser.add_argument('--sup', help = "Other space_separated properties files to align", nargs="+")
parser.add_argument("--suffix", help="suffix to append at the name of the written files.", default=".aligned")
parser.add_argument("--spacer", help="Is square or underscore used as a spacer character for tokens ?",
required=True)
parser.add_argument("-T", action="store_true", help="Testing")
parser.add_argument("-T2", action="store_true", help="Stronger testing")
parser.add_argument('-L', help='More complete logs', action="store_true")
parser.add_argument('-f', help='Dumps logs this often', default=10000, type=int)
args = parser.parse_args()
if args.heads:
args.sup = [args.heads]+args.sup
args.T = args.T or args.T2
if args.spacer.lower() in ["__","_","u","underscore"]:
spacer = "_"
elif args.spacer.lower() in ["■", "s", "sq", "square", "\uffed"]:
spacer = "\uffed"
else :
raise ValueError("Invalid spacer")
main(args)
| 40.736641
| 142
| 0.548393
|
d9bba18ce5ea9a2a8b7ef67a6ae3b65ddb47748b
| 15,121
|
py
|
Python
|
horovod/spark/common/params.py
|
Infi-zc/horovod
|
94cd8561a21d449fc8c80c8fef422025b84dfc22
|
[
"Apache-2.0"
] | null | null | null |
horovod/spark/common/params.py
|
Infi-zc/horovod
|
94cd8561a21d449fc8c80c8fef422025b84dfc22
|
[
"Apache-2.0"
] | null | null | null |
horovod/spark/common/params.py
|
Infi-zc/horovod
|
94cd8561a21d449fc8c80c8fef422025b84dfc22
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import horovod.spark.common._namedtuple_fix
from pyspark import keyword_only
from pyspark.ml.param.shared import HasOutputCols, Param, Params, TypeConverters
from horovod.spark.common import util
class EstimatorParams(Params):
num_proc = Param(Params._dummy(), 'num_proc', 'number of processes')
train_reader_num_workers = Param(Params._dummy(),
'train_reader_num_workers',
'number of parallel worker processes to read train data')
val_reader_num_workers = Param(Params._dummy(), 'val_reader_num_workers',
'number of parallel worker processes to read validation data')
reader_pool_type = Param(Params._dummy(), 'reader_pool_type', 'type of worker pool to read data')
optimizer = Param(Params._dummy(), 'optimizer', 'optimizer')
model = Param(Params._dummy(), 'model', 'model')
backend = Param(Params._dummy(), 'backend', 'backend')
store = Param(Params._dummy(), 'store', 'store')
metrics = Param(Params._dummy(), 'metrics', 'metrics')
loss = Param(Params._dummy(), 'loss', 'loss')
gradient_compression = Param(Params._dummy(), 'gradient_compression', 'Horovod gradient compression option')
compress_sparse_cols = Param(Params._dummy(),
'compress_sparse_cols',
'flag indicating whether SparseVector columns should be compressed. '
'requires additional compute time but saves intermediate disk space. '
'recommended to avoid unless using a lot of sparse data',
typeConverter=TypeConverters.toBoolean)
loss_weights = Param(Params._dummy(), 'loss_weights', 'loss weights',
typeConverter=TypeConverters.toListFloat)
sample_weight_col = Param(Params._dummy(), 'sample_weight_col',
'name of the column containing sample weights',
typeConverter=TypeConverters.toString)
feature_cols = Param(Params._dummy(), "feature_cols", "feature column names",
typeConverter=TypeConverters.toListString)
label_cols = Param(Params._dummy(), 'label_cols', 'label column names',
typeConverter=TypeConverters.toListString)
validation = Param(Params._dummy(), 'validation',
'one of: float validation split [0, 1), or string validation column name')
callbacks = Param(Params._dummy(), 'callbacks', 'callbacks')
batch_size = Param(Params._dummy(), 'batch_size', 'batch size',
typeConverter=TypeConverters.toInt)
val_batch_size = Param(Params._dummy(), 'val_batch_size', 'validation batch size',
typeConverter=TypeConverters.toInt)
epochs = Param(Params._dummy(), 'epochs', 'epochs', typeConverter=TypeConverters.toInt)
train_steps_per_epoch = Param(Params._dummy(), 'train_steps_per_epoch',
'number of training (batches) steps per epoch',
typeConverter=TypeConverters.toInt)
validation_steps_per_epoch = Param(Params._dummy(), 'validation_steps_per_epoch',
'number of steps (batches) for validation per epoch',
typeConverter=TypeConverters.toInt)
random_seed = Param(Params._dummy(), 'random_seed',
'random seed to use for DL frameworks',
typeConverter=TypeConverters.toInt)
shuffle_buffer_size = Param(Params._dummy(),
'shuffle_buffer_size',
'shuffling buffer size of data before training in number of samples',
typeConverter=TypeConverters.toInt)
verbose = Param(Params._dummy(), 'verbose', 'verbose flag (0=silent, 1=enabled, other values used by frameworks)',
typeConverter=TypeConverters.toInt)
partitions_per_process = Param(Params._dummy(), 'partitions_per_process',
'partitions for parquet form of the DataFrame per process',
typeConverter=TypeConverters.toInt)
run_id = Param(Params._dummy(), 'run_id',
'unique ID for this run, if run already exists, '
'then training will resume from last checkpoint in the store',
typeConverter=TypeConverters.toString)
transformation_fn = Param(Params._dummy(), 'transformation_fn',
'functions that construct the transformation '
'function that applies custom transformations to '
'every batch before train and validation steps')
label_shapes = Param(Params._dummy(), 'label_shapes', 'specifies the shape (or shapes) of the label column (or columns)')
def __init__(self):
super(EstimatorParams, self).__init__()
self._setDefault(
num_proc=None,
store=None,
backend=None,
model=None,
optimizer=None,
loss=None,
loss_weights=None,
sample_weight_col=None,
metrics=[],
feature_cols=None,
label_cols=None,
validation=None,
gradient_compression=None,
compress_sparse_cols=False,
batch_size=32,
val_batch_size=None,
epochs=1,
verbose=1,
callbacks=[],
random_seed=None,
shuffle_buffer_size=None,
partitions_per_process=10,
run_id=None,
train_steps_per_epoch=None,
validation_steps_per_epoch=None,
transformation_fn=None,
train_reader_num_workers=2,
val_reader_num_workers=2,
reader_pool_type='process',
label_shapes=None)
def _check_params(self, metadata):
model = self.getModel()
if not model:
raise ValueError('Model parameter is required')
util.check_validation(self.getValidation())
feature_columns = self.getFeatureCols()
missing_features = [col for col in feature_columns if col not in metadata]
if missing_features:
raise ValueError('Feature columns {} not found in training DataFrame metadata'
.format(missing_features))
label_columns = self.getLabelCols()
missing_labels = [col for col in label_columns if col not in metadata]
if missing_labels:
raise ValueError('Label columns {} not found in training DataFrame metadata'
.format(missing_labels))
@keyword_only
def setParams(self, **kwargs):
return self._set(**kwargs)
def setNumProc(self, value):
return self._set(num_proc=value)
def getNumProc(self):
return self.getOrDefault(self.num_proc)
def setModel(self, value):
return self._set(model=value)
def getModel(self):
return self.getOrDefault(self.model)
def setBackend(self, value):
return self._set(backend=value)
def getBackend(self):
return self.getOrDefault(self.backend)
def setStore(self, value):
return self._set(store=value)
def getStore(self):
return self.getOrDefault(self.store)
def setLoss(self, value):
return self._set(loss=value)
def getLoss(self):
return self.getOrDefault(self.loss)
def setLossWeights(self, value):
return self._set(loss_weights=value)
def getLossWeights(self):
return self.getOrDefault(self.loss_weights)
def setSampleWeightCol(self, value):
return self._set(sample_weight_col=value)
def getSampleWeightCol(self):
return self.getOrDefault(self.sample_weight_col)
def setMetrics(self, value):
return self._set(metrics=value)
def getMetrics(self):
return self.getOrDefault(self.metrics)
def setFeatureCols(self, value):
return self._set(feature_cols=value)
def getFeatureCols(self):
return self.getOrDefault(self.feature_cols)
def setLabelCols(self, value):
return self._set(label_cols=value)
def getLabelCols(self):
return self.getOrDefault(self.label_cols)
def setValidation(self, value):
return self._set(validation=value)
def getValidation(self):
return self.getOrDefault(self.validation)
def setCallbacks(self, value):
return self._set(callbacks=value)
def getCallbacks(self):
return self.getOrDefault(self.callbacks)
def setBatchSize(self, value):
return self._set(batch_size=value)
def getBatchSize(self):
return self.getOrDefault(self.batch_size)
def setValBatchSize(self, value):
return self._set(val_batch_size=value)
def getValBatchSize(self):
return self.getOrDefault(self.val_batch_size)
def setEpochs(self, value):
return self._set(epochs=value)
def getEpochs(self):
return self.getOrDefault(self.epochs)
def setTrainStepsPerEpoch(self, value):
return self._set(train_steps_per_epoch=value)
def getTrainStepsPerEpoch(self):
return self.getOrDefault(self.train_steps_per_epoch)
def setValidationStepsPerEpoch(self, value):
return self._set(validation_steps_per_epoch=value)
def getValidationStepsPerEpoch(self):
return self.getOrDefault(self.validation_steps_per_epoch)
def setVerbose(self, value):
return self._set(verbose=value)
def getVerbose(self):
return self.getOrDefault(self.verbose)
def setGradientCompression(self, value):
return self._set(gradient_compression=value)
def getGradientCompression(self):
return self.getOrDefault(self.gradient_compression)
def setCompressSparseCols(self, value):
return self._set(compress_sparse_cols=value)
def getCompressSparseCols(self):
return self.getOrDefault(self.compress_sparse_cols)
def setRandomSeed(self, value):
return self._set(random_seed=value)
def getRandomSeed(self):
return self.getOrDefault(self.random_seed)
def setShufflingBufferSize(self, value):
return self._set(shuffle_buffer_size=value)
def getShufflingBufferSize(self):
return self.getOrDefault(self.shuffle_buffer_size)
def setOptimizer(self, value):
return self._set(optimizer=value)
def getOptimizer(self):
return self.getOrDefault(self.optimizer)
def setPartitionsPerProcess(self, value):
return self._set(partitions_per_process=value)
def getPartitionsPerProcess(self):
return self.getOrDefault(self.partitions_per_process)
def setRunId(self, value):
return self._set(run_id=value)
def getRunId(self):
return self.getOrDefault(self.run_id)
def setTransformationFn(self, value):
return self._set(transformation_fn=value)
def getTransformationFn(self):
return self.getOrDefault(self.transformation_fn)
def setTrainReaderNumWorker(self, value):
return self._set(train_reader_num_workers=value)
def getTrainReaderNumWorker(self):
return self.getOrDefault(self.train_reader_num_workers)
def setValReaderNumWorker(self, value):
return self._set(val_reader_num_workers=value)
def getValReaderNumWorker(self):
return self.getOrDefault(self.val_reader_num_workers)
def setReaderPoolType(self, value):
return self._set(reader_pool_type=value)
def getReaderPoolType(self):
return self.getOrDefault(self.reader_pool_type)
def setLabelShapes(self, value):
return self._set(label_shapes=value)
def getLabelShapes(self):
return self.getOrDefault(self.label_shapes)
class ModelParams(HasOutputCols):
history = Param(Params._dummy(), 'history', 'history')
model = Param(Params._dummy(), 'model', 'model')
feature_columns = Param(Params._dummy(), 'feature_columns', 'feature columns')
label_columns = Param(Params._dummy(), 'label_columns', 'label columns')
run_id = Param(Params._dummy(), 'run_id',
'unique ID for the run that generated this model, if no ID was given by the '
'user, defaults to current timestamp at the time of fit()',
typeConverter=TypeConverters.toString)
_metadata = Param(Params._dummy(), '_metadata',
'metadata contains the shape and type of input and output')
def __init__(self):
super(ModelParams, self).__init__()
# Only for internal use
def _get_metadata(self):
return self.getOrDefault(self._metadata)
@keyword_only
def setParams(self, **kwargs):
return self._set(**kwargs)
def setHistory(self, value):
return self._set(history=value)
def getHistory(self):
return self.getOrDefault(self.history)
def setModel(self, value):
return self._set(model=value)
def getModel(self):
return self.getOrDefault(self.model)
def setFeatureColumns(self, value):
return self._set(feature_columns=value)
def getFeatureColumns(self):
return self.getOrDefault(self.feature_columns)
def setLabelColoumns(self, value):
return self._set(label_columns=value)
def getLabelColumns(self):
return self.getOrDefault(self.label_columns)
def setRunId(self, value):
return self._set(run_id=value)
def getRunId(self):
return self.getOrDefault(self.run_id)
# copied from https://github.com/apache/spark/tree/master/python/pyspark/ml/param/shared.py
# has been removed from pyspark.ml.param.HasOutputCol in pyspark 3.0.0
# added here to keep ModelParams API consistent between pyspark 2 and 3
# https://github.com/apache/spark/commit/b19fd487dfe307542d65391fd7b8410fa4992698#diff-3d1fb305acc7bab18e5d91f2b69018c7
# https://github.com/apache/spark/pull/26232
# https://issues.apache.org/jira/browse/SPARK-29093
def setOutputCols(self, value):
"""
Sets the value of :py:attr:`outputCols`.
"""
return self._set(outputCols=value)
| 37.614428
| 125
| 0.650684
|
6d72532d2f6562f5de4721e77cd7f0c0dfcf86d2
| 12,017
|
py
|
Python
|
tornado/escape.py
|
globocom/tornado
|
73c0aee4a5898ef3fb7d67e753ae8d47e400b570
|
[
"Apache-2.0"
] | 1
|
2019-12-27T00:36:48.000Z
|
2019-12-27T00:36:48.000Z
|
tornado/escape.py
|
globocom/tornado
|
73c0aee4a5898ef3fb7d67e753ae8d47e400b570
|
[
"Apache-2.0"
] | null | null | null |
tornado/escape.py
|
globocom/tornado
|
73c0aee4a5898ef3fb7d67e753ae8d47e400b570
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Escaping/unescaping methods for HTML, JSON, URLs, and others.
Also includes a few other miscellaneous string manipulation functions that
have crept in over time.
"""
from __future__ import absolute_import, division, print_function, with_statement
import re
import sys
from tornado.util import bytes_type, unicode_type, basestring_type, u
try:
from urllib.parse import parse_qs # py3
except ImportError:
from urlparse import parse_qs # Python 2.6+
try:
import htmlentitydefs # py2
except ImportError:
import html.entities as htmlentitydefs # py3
try:
import urllib.parse as urllib_parse # py3
except ImportError:
import urllib as urllib_parse # py2
import json
try:
unichr
except NameError:
unichr = chr
_XHTML_ESCAPE_RE = re.compile('[&<>"]')
_XHTML_ESCAPE_DICT = {'&': '&', '<': '<', '>': '>', '"': '"'}
def xhtml_escape(value):
"""Escapes a string so it is valid within XML or XHTML."""
return _XHTML_ESCAPE_RE.sub(lambda match: _XHTML_ESCAPE_DICT[match.group(0)],
to_basestring(value))
def xhtml_unescape(value):
"""Un-escapes an XML-escaped string."""
return re.sub(r"&(#?)(\w+?);", _convert_entity, _unicode(value))
def json_encode(value):
"""JSON-encodes the given Python object."""
# JSON permits but does not require forward slashes to be escaped.
# This is useful when json data is emitted in a <script> tag
# in HTML, as it prevents </script> tags from prematurely terminating
# the javscript. Some json libraries do this escaping by default,
# although python's standard library does not, so we do it here.
# http://stackoverflow.com/questions/1580647/json-why-are-forward-slashes-escaped
return json.dumps(recursive_unicode(value)).replace("</", "<\\/")
def json_decode(value):
"""Returns Python objects for the given JSON string."""
return json.loads(to_basestring(value))
def squeeze(value):
"""Replace all sequences of whitespace chars with a single space."""
return re.sub(r"[\x00-\x20]+", " ", value).strip()
def url_escape(value):
"""Returns a valid URL-encoded version of the given value."""
return urllib_parse.quote_plus(utf8(value))
# python 3 changed things around enough that we need two separate
# implementations of url_unescape. We also need our own implementation
# of parse_qs since python 3's version insists on decoding everything.
if sys.version_info[0] < 3:
def url_unescape(value, encoding='utf-8'):
"""Decodes the given value from a URL.
The argument may be either a byte or unicode string.
If encoding is None, the result will be a byte string. Otherwise,
the result is a unicode string in the specified encoding.
"""
if encoding is None:
return urllib_parse.unquote_plus(utf8(value))
else:
return unicode_type(urllib_parse.unquote_plus(utf8(value)), encoding)
parse_qs_bytes = parse_qs
else:
def url_unescape(value, encoding='utf-8'):
"""Decodes the given value from a URL.
The argument may be either a byte or unicode string.
If encoding is None, the result will be a byte string. Otherwise,
the result is a unicode string in the specified encoding.
"""
if encoding is None:
return urllib_parse.unquote_to_bytes(value)
else:
return urllib_parse.unquote_plus(to_basestring(value), encoding=encoding)
def parse_qs_bytes(qs, keep_blank_values=False, strict_parsing=False):
"""Parses a query string like urlparse.parse_qs, but returns the
values as byte strings.
Keys still become type str (interpreted as latin1 in python3!)
because it's too painful to keep them as byte strings in
python3 and in practice they're nearly always ascii anyway.
"""
# This is gross, but python3 doesn't give us another way.
# Latin1 is the universal donor of character encodings.
result = parse_qs(qs, keep_blank_values, strict_parsing,
encoding='latin1', errors='strict')
encoded = {}
for k, v in result.items():
encoded[k] = [i.encode('latin1') for i in v]
return encoded
_UTF8_TYPES = (bytes_type, type(None))
def utf8(value):
"""Converts a string argument to a byte string.
If the argument is already a byte string or None, it is returned unchanged.
Otherwise it must be a unicode string and is encoded as utf8.
"""
if isinstance(value, _UTF8_TYPES):
return value
assert isinstance(value, unicode_type)
return value.encode("utf-8")
_TO_UNICODE_TYPES = (unicode_type, type(None))
def to_unicode(value):
"""Converts a string argument to a unicode string.
If the argument is already a unicode string or None, it is returned
unchanged. Otherwise it must be a byte string and is decoded as utf8.
"""
if isinstance(value, _TO_UNICODE_TYPES):
return value
assert isinstance(value, bytes_type)
return value.decode("utf-8")
# to_unicode was previously named _unicode not because it was private,
# but to avoid conflicts with the built-in unicode() function/type
_unicode = to_unicode
# When dealing with the standard library across python 2 and 3 it is
# sometimes useful to have a direct conversion to the native string type
if str is unicode_type:
native_str = to_unicode
else:
native_str = utf8
_BASESTRING_TYPES = (basestring_type, type(None))
def to_basestring(value):
"""Converts a string argument to a subclass of basestring.
In python2, byte and unicode strings are mostly interchangeable,
so functions that deal with a user-supplied argument in combination
with ascii string constants can use either and should return the type
the user supplied. In python3, the two types are not interchangeable,
so this method is needed to convert byte strings to unicode.
"""
if isinstance(value, _BASESTRING_TYPES):
return value
assert isinstance(value, bytes_type)
return value.decode("utf-8")
def recursive_unicode(obj):
"""Walks a simple data structure, converting byte strings to unicode.
Supports lists, tuples, and dictionaries.
"""
if isinstance(obj, dict):
return dict((recursive_unicode(k), recursive_unicode(v)) for (k, v) in obj.items())
elif isinstance(obj, list):
return list(recursive_unicode(i) for i in obj)
elif isinstance(obj, tuple):
return tuple(recursive_unicode(i) for i in obj)
elif isinstance(obj, bytes_type):
return to_unicode(obj)
else:
return obj
# I originally used the regex from
# http://daringfireball.net/2010/07/improved_regex_for_matching_urls
# but it gets all exponential on certain patterns (such as too many trailing
# dots), causing the regex matcher to never return.
# This regex should avoid those problems.
# Use to_unicode instead of tornado.util.u - we don't want backslashes getting
# processed as escapes.
_URL_RE = re.compile(to_unicode(r"""\b((?:([\w-]+):(/{1,3})|www[.])(?:(?:(?:[^\s&()]|&|")*(?:[^!"#$%&'()*+,.:;<=>?@\[\]^`{|}~\s]))|(?:\((?:[^\s&()]|&|")*\)))+)"""))
def linkify(text, shorten=False, extra_params="",
require_protocol=False, permitted_protocols=["http", "https"]):
"""Converts plain text into HTML with links.
For example: ``linkify("Hello http://tornadoweb.org!")`` would return
``Hello <a href="http://tornadoweb.org">http://tornadoweb.org</a>!``
Parameters:
shorten: Long urls will be shortened for display.
extra_params: Extra text to include in the link tag, or a callable
taking the link as an argument and returning the extra text
e.g. ``linkify(text, extra_params='rel="nofollow" class="external"')``,
or::
def extra_params_cb(url):
if url.startswith("http://example.com"):
return 'class="internal"'
else:
return 'class="external" rel="nofollow"'
linkify(text, extra_params=extra_params_cb)
require_protocol: Only linkify urls which include a protocol. If this is
False, urls such as www.facebook.com will also be linkified.
permitted_protocols: List (or set) of protocols which should be linkified,
e.g. linkify(text, permitted_protocols=["http", "ftp", "mailto"]).
It is very unsafe to include protocols such as "javascript".
"""
if extra_params and not callable(extra_params):
extra_params = " " + extra_params.strip()
def make_link(m):
url = m.group(1)
proto = m.group(2)
if require_protocol and not proto:
return url # not protocol, no linkify
if proto and proto not in permitted_protocols:
return url # bad protocol, no linkify
href = m.group(1)
if not proto:
href = "http://" + href # no proto specified, use http
if callable(extra_params):
params = " " + extra_params(href).strip()
else:
params = extra_params
# clip long urls. max_len is just an approximation
max_len = 30
if shorten and len(url) > max_len:
before_clip = url
if proto:
proto_len = len(proto) + 1 + len(m.group(3) or "") # +1 for :
else:
proto_len = 0
parts = url[proto_len:].split("/")
if len(parts) > 1:
# Grab the whole host part plus the first bit of the path
# The path is usually not that interesting once shortened
# (no more slug, etc), so it really just provides a little
# extra indication of shortening.
url = url[:proto_len] + parts[0] + "/" + \
parts[1][:8].split('?')[0].split('.')[0]
if len(url) > max_len * 1.5: # still too long
url = url[:max_len]
if url != before_clip:
amp = url.rfind('&')
# avoid splitting html char entities
if amp > max_len - 5:
url = url[:amp]
url += "..."
if len(url) >= len(before_clip):
url = before_clip
else:
# full url is visible on mouse-over (for those who don't
# have a status bar, such as Safari by default)
params += ' title="%s"' % href
return u('<a href="%s"%s>%s</a>') % (href, params, url)
# First HTML-escape so that our strings are all safe.
# The regex is modified to avoid character entites other than & so
# that we won't pick up ", etc.
text = _unicode(xhtml_escape(text))
return _URL_RE.sub(make_link, text)
def _convert_entity(m):
if m.group(1) == "#":
try:
return unichr(int(m.group(2)))
except ValueError:
return "&#%s;" % m.group(2)
try:
return _HTML_UNICODE_MAP[m.group(2)]
except KeyError:
return "&%s;" % m.group(2)
def _build_unicode_map():
unicode_map = {}
for name, value in htmlentitydefs.name2codepoint.items():
unicode_map[name] = unichr(value)
return unicode_map
_HTML_UNICODE_MAP = _build_unicode_map()
| 35.448378
| 182
| 0.645918
|
b3d081fbddac642a087abed2b1266b09f2f0db82
| 847
|
py
|
Python
|
mode.py
|
EricJKei/LO-GAN
|
e5329008fea3e09583eaeb977b5bc4f824431c30
|
[
"Apache-2.0"
] | null | null | null |
mode.py
|
EricJKei/LO-GAN
|
e5329008fea3e09583eaeb977b5bc4f824431c30
|
[
"Apache-2.0"
] | null | null | null |
mode.py
|
EricJKei/LO-GAN
|
e5329008fea3e09583eaeb977b5bc4f824431c30
|
[
"Apache-2.0"
] | null | null | null |
import os
import tensorflow as tf
from PIL import Image
import numpy as np
import time
# import util
from skimage.measure import compare_ssim as ssim
from tqdm import tqdm
def test_only(args, model, sess, saver):
saver.restore(sess,args.pre_trained_model)
print("saved model is loaded for test only!")
print("model path is %s"%args.pre_trained_model)
blur_img_name = sorted(os.listdir(args.test_Blur_path))
sess.run(model.data_loader.init_op['val_init'])
import time
start_time = time.time()
for i in range(len(blur_img_name)):
output = sess.run(model.output)
output = Image.fromarray(output[0,:,:,0])
split_name = blur_img_name[i].split('.')
output.save(os.path.join(args.result_path, '%s_sharp.png'%(''.join(map(str, split_name[:-1])))))
print(time.time()-start_time)
| 30.25
| 104
| 0.695396
|
8d75c0e87e14fc0edf470cb03f8094037cc9580f
| 4,607
|
py
|
Python
|
counter/counter.py
|
MichaelMW/crispy
|
44a076bd90078f773a58c1e83e8d0185ac2eebfd
|
[
"MIT"
] | 1
|
2019-06-28T21:14:07.000Z
|
2019-06-28T21:14:07.000Z
|
counter/counter.py
|
MichaelMW/crispy
|
44a076bd90078f773a58c1e83e8d0185ac2eebfd
|
[
"MIT"
] | null | null | null |
counter/counter.py
|
MichaelMW/crispy
|
44a076bd90078f773a58c1e83e8d0185ac2eebfd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
####### todo:
# reverse compliment #
### input fasta file
### input gRNA.bed, design file
### output count number for next step.
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE,SIG_DFL)
from sys import stdin, stderr
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-f1', dest='flank1', help="left flank of sgRNAseq, before rc")
parser.add_argument('-f2', dest='flank2', help="right flank of sgRNAseq, before rc")
parser.add_argument('-R', dest='rc', default="0", help="reverse compliment mode, 0: only input; 1: rc; this is useful for sgRNA2 when the input fastq files are paired reads.")
parser.add_argument('-H', dest='hasHeader', default=True, help="oligo file has header")
parser.add_argument('-i', dest='oligo', help="input oligo design file. Use the following fields as default: chrm, start, end, sgRNAid, barcode, set, sgRNA, [sgRNA2 if pair reads]; if user provides a non-conventional oligo design file, minimally it should contain sgRNAid and sgRNAseq, and -G and -B should be used to indicate which columns (the column index starts with 0) sgRNAid and sgRNAseq are in the -i input file. eg. -i 'myOligo.tsv' -G 0 -S 1")
parser.add_argument('-I', dest='sgRNAid', default="3", help="column index of sgRNAid in the input oligo design file, index starts with 0. default to 3.")
parser.add_argument('-E', dest='exact', default=True, help="if pattern resulted in exact read match.")
parser.add_argument('-S', dest='sgRNAseq', default="6", help="column index of sgRNAseq in the input oligo design file, index starts with 0. default to 6 for single reads, specified to 7 for usage of sgRNA2.")
args = parser.parse_args()
flank1 = args.flank1.upper()
flank2 = args.flank2.upper()
inFile = args.oligo
rc = str(args.rc)
idx_sgRNAid = int(args.sgRNAid)
idx_sgRNAseq = int(args.sgRNAseq)
hasHeader = bool(args.hasHeader)
exact = bool(args.exact)
###
# reverse compliment
def func_rc(seq):
string = seq.upper()
complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}
return "".join([complement.get(base, base) for base in reversed(seq)])
### 1. build gRNA profile
#inFile = "gRNA.bed"
gid2bar = {}
gids = []
with open(inFile) as f:
if hasHeader:
header = f.readline()
else:
pass
for l in f.readlines():
ls = l.strip().split()
gid, sgRNAseq = ls[idx_sgRNAid], ls[idx_sgRNAseq]
gids.append(gid)
sgRNAseq = sgRNAseq.upper()
if rc == "1":
gid2bar[gid] = func_rc(sgRNAseq)
else:
gid2bar[gid] = sgRNAseq
sgRNAseqs = set(gid2bar.values())
totalSgRNA = len(sgRNAseqs)
blens = [len(sgRNAseq) for sgRNAseq in sgRNAseqs]
maxbl, minbl = max(blens), min(blens)
### 2. compile regex
import re
if rc == "0":
pattern = flank1 + "([ACGT]{" + str(minbl) + "," + str(maxbl) + "})" + flank2
if rc == "1":
pattern = func_rc(flank2) + "([ACGT]{" + str(minbl) + "," + str(maxbl) + "})" + func_rc(flank1)
stderr.write("using pattern = " + pattern + "\n")
prog = re.compile(pattern)
### 3. search for sgRNA pattern that matches
hits = []
hitc = 0
readc = 0
for seq in stdin.readlines():
readc += 1
seq = seq.upper()
hit = prog.search(seq)
# this step that act like a fuzzy match improves runtime
if hit:
#stderr.write(hit + "\n")
match = hit.group()
matchSeq = match[len(flank1):len(match)-len(flank2)]
hitc += 1
# this step checks if the sequence is actually in the original gRNA design
if exact: # very fast
if matchSeq in sgRNAseqs:
if rc == "1":
matchSeq = func_rc(matchSeq)
hits.append(matchSeq)
else: # very slow
for sgRNAseq in sgRNAseqs:
if sgRNAseq in matchSeq:
hits.append(sgRNAseq)
continue
#stderr.write(sgRNAseq + "\n")
### count
from collections import Counter
hitCounts = Counter(hits)
matchedSgRNA = len(hitCounts)
#print(hitCounts)
### print
#if rc == "1":
# for gid in gids:
# sgRNAseq = gid2bar[gid]
# if func_rc(sgRNAseq) in hitCounts:
# print("\t".join([gid, str(hitCounts[func_rc(sgRNAseq)])]))
#elif rc == "0":
# for gid in gids:
# sgRNAseq = gid2bar[gid]
# if sgRNAseq in hitCounts:
# print("\t".join([gid, str(hitCounts[sgRNAseq])]))
#else:
# print("rc mode unknown, exit")
# exit
for gid in gids:
sgRNAseq = gid2bar[gid]
if rc=="1":
sgRNAseq = func_rc(sgRNAseq)
hitVal = hitCounts.get(sgRNAseq, 0)
hitStr = str(hitVal)
print("\t".join([gid, hitStr]))
stderr.write("total read count: " + str(readc)+"\n")
stderr.write("total read match (pattern): " + str(hitc)+"\n")
stderr.write("total exact match (sgRNA): " + str(len(hits))+"\n")
stderr.write("sgRNA with matches: {} / {}".format(matchedSgRNA, totalSgRNA) + "\n")
| 32.443662
| 452
| 0.681789
|
d216102c27d07ac2c996c21c9858f09986776fbf
| 25,851
|
py
|
Python
|
sympy/tensor/array/tests/test_arrayop.py
|
yupbank/sympy
|
66d7aef9dc1b26055af22e27ba42004c40b95d7c
|
[
"BSD-3-Clause"
] | 1
|
2021-11-19T03:38:42.000Z
|
2021-11-19T03:38:42.000Z
|
sympy/tensor/array/tests/test_arrayop.py
|
yupbank/sympy
|
66d7aef9dc1b26055af22e27ba42004c40b95d7c
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/tensor/array/tests/test_arrayop.py
|
yupbank/sympy
|
66d7aef9dc1b26055af22e27ba42004c40b95d7c
|
[
"BSD-3-Clause"
] | null | null | null |
import itertools
import random
from sympy.combinatorics import Permutation
from sympy.combinatorics.permutations import _af_invert
from sympy.testing.pytest import raises
from sympy.core.function import diff
from sympy.core.symbol import symbols
from sympy.functions.elementary.complexes import (adjoint, conjugate, transpose)
from sympy.functions.elementary.exponential import (exp, log)
from sympy.functions.elementary.trigonometric import (cos, sin)
from sympy.tensor.array import Array, ImmutableDenseNDimArray, ImmutableSparseNDimArray, MutableSparseNDimArray
from sympy.tensor.array.arrayop import tensorproduct, tensorcontraction, derive_by_array, permutedims, Flatten, \
tensordiagonal
def test_import_NDimArray():
from sympy.tensor.array import NDimArray
del NDimArray
def test_tensorproduct():
x,y,z,t = symbols('x y z t')
from sympy.abc import a,b,c,d
assert tensorproduct() == 1
assert tensorproduct([x]) == Array([x])
assert tensorproduct([x], [y]) == Array([[x*y]])
assert tensorproduct([x], [y], [z]) == Array([[[x*y*z]]])
assert tensorproduct([x], [y], [z], [t]) == Array([[[[x*y*z*t]]]])
assert tensorproduct(x) == x
assert tensorproduct(x, y) == x*y
assert tensorproduct(x, y, z) == x*y*z
assert tensorproduct(x, y, z, t) == x*y*z*t
for ArrayType in [ImmutableDenseNDimArray, ImmutableSparseNDimArray]:
A = ArrayType([x, y])
B = ArrayType([1, 2, 3])
C = ArrayType([a, b, c, d])
assert tensorproduct(A, B, C) == ArrayType([[[a*x, b*x, c*x, d*x], [2*a*x, 2*b*x, 2*c*x, 2*d*x], [3*a*x, 3*b*x, 3*c*x, 3*d*x]],
[[a*y, b*y, c*y, d*y], [2*a*y, 2*b*y, 2*c*y, 2*d*y], [3*a*y, 3*b*y, 3*c*y, 3*d*y]]])
assert tensorproduct([x, y], [1, 2, 3]) == tensorproduct(A, B)
assert tensorproduct(A, 2) == ArrayType([2*x, 2*y])
assert tensorproduct(A, [2]) == ArrayType([[2*x], [2*y]])
assert tensorproduct([2], A) == ArrayType([[2*x, 2*y]])
assert tensorproduct(a, A) == ArrayType([a*x, a*y])
assert tensorproduct(a, A, B) == ArrayType([[a*x, 2*a*x, 3*a*x], [a*y, 2*a*y, 3*a*y]])
assert tensorproduct(A, B, a) == ArrayType([[a*x, 2*a*x, 3*a*x], [a*y, 2*a*y, 3*a*y]])
assert tensorproduct(B, a, A) == ArrayType([[a*x, a*y], [2*a*x, 2*a*y], [3*a*x, 3*a*y]])
# tests for large scale sparse array
for SparseArrayType in [ImmutableSparseNDimArray, MutableSparseNDimArray]:
a = SparseArrayType({1:2, 3:4},(1000, 2000))
b = SparseArrayType({1:2, 3:4},(1000, 2000))
assert tensorproduct(a, b) == ImmutableSparseNDimArray({2000001: 4, 2000003: 8, 6000001: 8, 6000003: 16}, (1000, 2000, 1000, 2000))
def test_tensorcontraction():
from sympy.abc import a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x
B = Array(range(18), (2, 3, 3))
assert tensorcontraction(B, (1, 2)) == Array([12, 39])
C1 = Array([a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x], (2, 3, 2, 2))
assert tensorcontraction(C1, (0, 2)) == Array([[a + o, b + p], [e + s, f + t], [i + w, j + x]])
assert tensorcontraction(C1, (0, 2, 3)) == Array([a + p, e + t, i + x])
assert tensorcontraction(C1, (2, 3)) == Array([[a + d, e + h, i + l], [m + p, q + t, u + x]])
def test_derivative_by_array():
from sympy.abc import i, j, t, x, y, z
bexpr = x*y**2*exp(z)*log(t)
sexpr = sin(bexpr)
cexpr = cos(bexpr)
a = Array([sexpr])
assert derive_by_array(sexpr, t) == x*y**2*exp(z)*cos(x*y**2*exp(z)*log(t))/t
assert derive_by_array(sexpr, [x, y, z]) == Array([bexpr/x*cexpr, 2*y*bexpr/y**2*cexpr, bexpr*cexpr])
assert derive_by_array(a, [x, y, z]) == Array([[bexpr/x*cexpr], [2*y*bexpr/y**2*cexpr], [bexpr*cexpr]])
assert derive_by_array(sexpr, [[x, y], [z, t]]) == Array([[bexpr/x*cexpr, 2*y*bexpr/y**2*cexpr], [bexpr*cexpr, bexpr/log(t)/t*cexpr]])
assert derive_by_array(a, [[x, y], [z, t]]) == Array([[[bexpr/x*cexpr], [2*y*bexpr/y**2*cexpr]], [[bexpr*cexpr], [bexpr/log(t)/t*cexpr]]])
assert derive_by_array([[x, y], [z, t]], [x, y]) == Array([[[1, 0], [0, 0]], [[0, 1], [0, 0]]])
assert derive_by_array([[x, y], [z, t]], [[x, y], [z, t]]) == Array([[[[1, 0], [0, 0]], [[0, 1], [0, 0]]],
[[[0, 0], [1, 0]], [[0, 0], [0, 1]]]])
assert diff(sexpr, t) == x*y**2*exp(z)*cos(x*y**2*exp(z)*log(t))/t
assert diff(sexpr, Array([x, y, z])) == Array([bexpr/x*cexpr, 2*y*bexpr/y**2*cexpr, bexpr*cexpr])
assert diff(a, Array([x, y, z])) == Array([[bexpr/x*cexpr], [2*y*bexpr/y**2*cexpr], [bexpr*cexpr]])
assert diff(sexpr, Array([[x, y], [z, t]])) == Array([[bexpr/x*cexpr, 2*y*bexpr/y**2*cexpr], [bexpr*cexpr, bexpr/log(t)/t*cexpr]])
assert diff(a, Array([[x, y], [z, t]])) == Array([[[bexpr/x*cexpr], [2*y*bexpr/y**2*cexpr]], [[bexpr*cexpr], [bexpr/log(t)/t*cexpr]]])
assert diff(Array([[x, y], [z, t]]), Array([x, y])) == Array([[[1, 0], [0, 0]], [[0, 1], [0, 0]]])
assert diff(Array([[x, y], [z, t]]), Array([[x, y], [z, t]])) == Array([[[[1, 0], [0, 0]], [[0, 1], [0, 0]]],
[[[0, 0], [1, 0]], [[0, 0], [0, 1]]]])
# test for large scale sparse array
for SparseArrayType in [ImmutableSparseNDimArray, MutableSparseNDimArray]:
b = MutableSparseNDimArray({0:i, 1:j}, (10000, 20000))
assert derive_by_array(b, i) == ImmutableSparseNDimArray({0: 1}, (10000, 20000))
assert derive_by_array(b, (i, j)) == ImmutableSparseNDimArray({0: 1, 200000001: 1}, (2, 10000, 20000))
#https://github.com/sympy/sympy/issues/20655
U = Array([x, y, z])
E = 2
assert derive_by_array(E, U) == ImmutableDenseNDimArray([0, 0, 0])
def test_issue_emerged_while_discussing_10972():
ua = Array([-1,0])
Fa = Array([[0, 1], [-1, 0]])
po = tensorproduct(Fa, ua, Fa, ua)
assert tensorcontraction(po, (1, 2), (4, 5)) == Array([[0, 0], [0, 1]])
sa = symbols('a0:144')
po = Array(sa, [2, 2, 3, 3, 2, 2])
assert tensorcontraction(po, (0, 1), (2, 3), (4, 5)) == sa[0] + sa[108] + sa[111] + sa[124] + sa[127] + sa[140] + sa[143] + sa[16] + sa[19] + sa[3] + sa[32] + sa[35]
assert tensorcontraction(po, (0, 1, 4, 5), (2, 3)) == sa[0] + sa[111] + sa[127] + sa[143] + sa[16] + sa[32]
assert tensorcontraction(po, (0, 1), (4, 5)) == Array([[sa[0] + sa[108] + sa[111] + sa[3], sa[112] + sa[115] + sa[4] + sa[7],
sa[11] + sa[116] + sa[119] + sa[8]], [sa[12] + sa[120] + sa[123] + sa[15],
sa[124] + sa[127] + sa[16] + sa[19], sa[128] + sa[131] + sa[20] + sa[23]],
[sa[132] + sa[135] + sa[24] + sa[27], sa[136] + sa[139] + sa[28] + sa[31],
sa[140] + sa[143] + sa[32] + sa[35]]])
assert tensorcontraction(po, (0, 1), (2, 3)) == Array([[sa[0] + sa[108] + sa[124] + sa[140] + sa[16] + sa[32], sa[1] + sa[109] + sa[125] + sa[141] + sa[17] + sa[33]],
[sa[110] + sa[126] + sa[142] + sa[18] + sa[2] + sa[34], sa[111] + sa[127] + sa[143] + sa[19] + sa[3] + sa[35]]])
def test_array_permutedims():
sa = symbols('a0:144')
for ArrayType in [ImmutableDenseNDimArray, ImmutableSparseNDimArray]:
m1 = ArrayType(sa[:6], (2, 3))
assert permutedims(m1, (1, 0)) == transpose(m1)
assert m1.tomatrix().T == permutedims(m1, (1, 0)).tomatrix()
assert m1.tomatrix().T == transpose(m1).tomatrix()
assert m1.tomatrix().C == conjugate(m1).tomatrix()
assert m1.tomatrix().H == adjoint(m1).tomatrix()
assert m1.tomatrix().T == m1.transpose().tomatrix()
assert m1.tomatrix().C == m1.conjugate().tomatrix()
assert m1.tomatrix().H == m1.adjoint().tomatrix()
raises(ValueError, lambda: permutedims(m1, (0,)))
raises(ValueError, lambda: permutedims(m1, (0, 0)))
raises(ValueError, lambda: permutedims(m1, (1, 2, 0)))
# Some tests with random arrays:
dims = 6
shape = [random.randint(1,5) for i in range(dims)]
elems = [random.random() for i in range(tensorproduct(*shape))]
ra = ArrayType(elems, shape)
perm = list(range(dims))
# Randomize the permutation:
random.shuffle(perm)
# Test inverse permutation:
assert permutedims(permutedims(ra, perm), _af_invert(perm)) == ra
# Test that permuted shape corresponds to action by `Permutation`:
assert permutedims(ra, perm).shape == tuple(Permutation(perm)(shape))
z = ArrayType.zeros(4,5,6,7)
assert permutedims(z, (2, 3, 1, 0)).shape == (6, 7, 5, 4)
assert permutedims(z, [2, 3, 1, 0]).shape == (6, 7, 5, 4)
assert permutedims(z, Permutation([2, 3, 1, 0])).shape == (6, 7, 5, 4)
po = ArrayType(sa, [2, 2, 3, 3, 2, 2])
raises(ValueError, lambda: permutedims(po, (1, 1)))
raises(ValueError, lambda: po.transpose())
raises(ValueError, lambda: po.adjoint())
assert permutedims(po, reversed(range(po.rank()))) == ArrayType(
[[[[[[sa[0], sa[72]], [sa[36], sa[108]]], [[sa[12], sa[84]], [sa[48], sa[120]]], [[sa[24],
sa[96]], [sa[60], sa[132]]]],
[[[sa[4], sa[76]], [sa[40], sa[112]]], [[sa[16],
sa[88]], [sa[52], sa[124]]],
[[sa[28], sa[100]], [sa[64], sa[136]]]],
[[[sa[8],
sa[80]], [sa[44], sa[116]]], [[sa[20], sa[92]], [sa[56], sa[128]]], [[sa[32],
sa[104]], [sa[68], sa[140]]]]],
[[[[sa[2], sa[74]], [sa[38], sa[110]]], [[sa[14],
sa[86]], [sa[50], sa[122]]], [[sa[26], sa[98]], [sa[62], sa[134]]]],
[[[sa[6],
sa[78]], [sa[42], sa[114]]], [[sa[18], sa[90]], [sa[54], sa[126]]], [[sa[30],
sa[102]], [sa[66], sa[138]]]],
[[[sa[10], sa[82]], [sa[46], sa[118]]], [[sa[22],
sa[94]], [sa[58], sa[130]]],
[[sa[34], sa[106]], [sa[70], sa[142]]]]]],
[[[[[sa[1],
sa[73]], [sa[37], sa[109]]], [[sa[13], sa[85]], [sa[49], sa[121]]], [[sa[25],
sa[97]], [sa[61], sa[133]]]],
[[[sa[5], sa[77]], [sa[41], sa[113]]], [[sa[17],
sa[89]], [sa[53], sa[125]]],
[[sa[29], sa[101]], [sa[65], sa[137]]]],
[[[sa[9],
sa[81]], [sa[45], sa[117]]], [[sa[21], sa[93]], [sa[57], sa[129]]], [[sa[33],
sa[105]], [sa[69], sa[141]]]]],
[[[[sa[3], sa[75]], [sa[39], sa[111]]], [[sa[15],
sa[87]], [sa[51], sa[123]]], [[sa[27], sa[99]], [sa[63], sa[135]]]],
[[[sa[7],
sa[79]], [sa[43], sa[115]]], [[sa[19], sa[91]], [sa[55], sa[127]]], [[sa[31],
sa[103]], [sa[67], sa[139]]]],
[[[sa[11], sa[83]], [sa[47], sa[119]]], [[sa[23],
sa[95]], [sa[59], sa[131]]],
[[sa[35], sa[107]], [sa[71], sa[143]]]]]]])
assert permutedims(po, (1, 0, 2, 3, 4, 5)) == ArrayType(
[[[[[[sa[0], sa[1]], [sa[2], sa[3]]], [[sa[4], sa[5]], [sa[6], sa[7]]], [[sa[8], sa[9]], [sa[10],
sa[11]]]],
[[[sa[12], sa[13]], [sa[14], sa[15]]], [[sa[16], sa[17]], [sa[18],
sa[19]]], [[sa[20], sa[21]], [sa[22], sa[23]]]],
[[[sa[24], sa[25]], [sa[26],
sa[27]]], [[sa[28], sa[29]], [sa[30], sa[31]]], [[sa[32], sa[33]], [sa[34],
sa[35]]]]],
[[[[sa[72], sa[73]], [sa[74], sa[75]]], [[sa[76], sa[77]], [sa[78],
sa[79]]], [[sa[80], sa[81]], [sa[82], sa[83]]]],
[[[sa[84], sa[85]], [sa[86],
sa[87]]], [[sa[88], sa[89]], [sa[90], sa[91]]], [[sa[92], sa[93]], [sa[94],
sa[95]]]],
[[[sa[96], sa[97]], [sa[98], sa[99]]], [[sa[100], sa[101]], [sa[102],
sa[103]]],
[[sa[104], sa[105]], [sa[106], sa[107]]]]]], [[[[[sa[36], sa[37]], [sa[38],
sa[39]]],
[[sa[40], sa[41]], [sa[42], sa[43]]],
[[sa[44], sa[45]], [sa[46],
sa[47]]]],
[[[sa[48], sa[49]], [sa[50], sa[51]]],
[[sa[52], sa[53]], [sa[54],
sa[55]]],
[[sa[56], sa[57]], [sa[58], sa[59]]]],
[[[sa[60], sa[61]], [sa[62],
sa[63]]],
[[sa[64], sa[65]], [sa[66], sa[67]]],
[[sa[68], sa[69]], [sa[70],
sa[71]]]]], [
[[[sa[108], sa[109]], [sa[110], sa[111]]],
[[sa[112], sa[113]], [sa[114],
sa[115]]],
[[sa[116], sa[117]], [sa[118], sa[119]]]],
[[[sa[120], sa[121]], [sa[122],
sa[123]]],
[[sa[124], sa[125]], [sa[126], sa[127]]],
[[sa[128], sa[129]], [sa[130],
sa[131]]]],
[[[sa[132], sa[133]], [sa[134], sa[135]]],
[[sa[136], sa[137]], [sa[138],
sa[139]]],
[[sa[140], sa[141]], [sa[142], sa[143]]]]]]])
assert permutedims(po, (0, 2, 1, 4, 3, 5)) == ArrayType(
[[[[[[sa[0], sa[1]], [sa[4], sa[5]], [sa[8], sa[9]]], [[sa[2], sa[3]], [sa[6], sa[7]], [sa[10],
sa[11]]]],
[[[sa[36], sa[37]], [sa[40], sa[41]], [sa[44], sa[45]]], [[sa[38],
sa[39]], [sa[42], sa[43]], [sa[46], sa[47]]]]],
[[[[sa[12], sa[13]], [sa[16],
sa[17]], [sa[20], sa[21]]], [[sa[14], sa[15]], [sa[18], sa[19]], [sa[22],
sa[23]]]],
[[[sa[48], sa[49]], [sa[52], sa[53]], [sa[56], sa[57]]], [[sa[50],
sa[51]], [sa[54], sa[55]], [sa[58], sa[59]]]]],
[[[[sa[24], sa[25]], [sa[28],
sa[29]], [sa[32], sa[33]]], [[sa[26], sa[27]], [sa[30], sa[31]], [sa[34],
sa[35]]]],
[[[sa[60], sa[61]], [sa[64], sa[65]], [sa[68], sa[69]]], [[sa[62],
sa[63]], [sa[66], sa[67]], [sa[70], sa[71]]]]]],
[[[[[sa[72], sa[73]], [sa[76],
sa[77]], [sa[80], sa[81]]], [[sa[74], sa[75]], [sa[78], sa[79]], [sa[82],
sa[83]]]],
[[[sa[108], sa[109]], [sa[112], sa[113]], [sa[116], sa[117]]], [[sa[110],
sa[111]], [sa[114], sa[115]],
[sa[118], sa[119]]]]],
[[[[sa[84], sa[85]], [sa[88],
sa[89]], [sa[92], sa[93]]], [[sa[86], sa[87]], [sa[90], sa[91]], [sa[94],
sa[95]]]],
[[[sa[120], sa[121]], [sa[124], sa[125]], [sa[128], sa[129]]], [[sa[122],
sa[123]], [sa[126], sa[127]],
[sa[130], sa[131]]]]],
[[[[sa[96], sa[97]], [sa[100],
sa[101]], [sa[104], sa[105]]], [[sa[98], sa[99]], [sa[102], sa[103]], [sa[106],
sa[107]]]],
[[[sa[132], sa[133]], [sa[136], sa[137]], [sa[140], sa[141]]], [[sa[134],
sa[135]], [sa[138], sa[139]],
[sa[142], sa[143]]]]]]])
po2 = po.reshape(4, 9, 2, 2)
assert po2 == ArrayType([[[[sa[0], sa[1]], [sa[2], sa[3]]], [[sa[4], sa[5]], [sa[6], sa[7]]], [[sa[8], sa[9]], [sa[10], sa[11]]], [[sa[12], sa[13]], [sa[14], sa[15]]], [[sa[16], sa[17]], [sa[18], sa[19]]], [[sa[20], sa[21]], [sa[22], sa[23]]], [[sa[24], sa[25]], [sa[26], sa[27]]], [[sa[28], sa[29]], [sa[30], sa[31]]], [[sa[32], sa[33]], [sa[34], sa[35]]]], [[[sa[36], sa[37]], [sa[38], sa[39]]], [[sa[40], sa[41]], [sa[42], sa[43]]], [[sa[44], sa[45]], [sa[46], sa[47]]], [[sa[48], sa[49]], [sa[50], sa[51]]], [[sa[52], sa[53]], [sa[54], sa[55]]], [[sa[56], sa[57]], [sa[58], sa[59]]], [[sa[60], sa[61]], [sa[62], sa[63]]], [[sa[64], sa[65]], [sa[66], sa[67]]], [[sa[68], sa[69]], [sa[70], sa[71]]]], [[[sa[72], sa[73]], [sa[74], sa[75]]], [[sa[76], sa[77]], [sa[78], sa[79]]], [[sa[80], sa[81]], [sa[82], sa[83]]], [[sa[84], sa[85]], [sa[86], sa[87]]], [[sa[88], sa[89]], [sa[90], sa[91]]], [[sa[92], sa[93]], [sa[94], sa[95]]], [[sa[96], sa[97]], [sa[98], sa[99]]], [[sa[100], sa[101]], [sa[102], sa[103]]], [[sa[104], sa[105]], [sa[106], sa[107]]]], [[[sa[108], sa[109]], [sa[110], sa[111]]], [[sa[112], sa[113]], [sa[114], sa[115]]], [[sa[116], sa[117]], [sa[118], sa[119]]], [[sa[120], sa[121]], [sa[122], sa[123]]], [[sa[124], sa[125]], [sa[126], sa[127]]], [[sa[128], sa[129]], [sa[130], sa[131]]], [[sa[132], sa[133]], [sa[134], sa[135]]], [[sa[136], sa[137]], [sa[138], sa[139]]], [[sa[140], sa[141]], [sa[142], sa[143]]]]])
assert permutedims(po2, (3, 2, 0, 1)) == ArrayType([[[[sa[0], sa[4], sa[8], sa[12], sa[16], sa[20], sa[24], sa[28], sa[32]], [sa[36], sa[40], sa[44], sa[48], sa[52], sa[56], sa[60], sa[64], sa[68]], [sa[72], sa[76], sa[80], sa[84], sa[88], sa[92], sa[96], sa[100], sa[104]], [sa[108], sa[112], sa[116], sa[120], sa[124], sa[128], sa[132], sa[136], sa[140]]], [[sa[2], sa[6], sa[10], sa[14], sa[18], sa[22], sa[26], sa[30], sa[34]], [sa[38], sa[42], sa[46], sa[50], sa[54], sa[58], sa[62], sa[66], sa[70]], [sa[74], sa[78], sa[82], sa[86], sa[90], sa[94], sa[98], sa[102], sa[106]], [sa[110], sa[114], sa[118], sa[122], sa[126], sa[130], sa[134], sa[138], sa[142]]]], [[[sa[1], sa[5], sa[9], sa[13], sa[17], sa[21], sa[25], sa[29], sa[33]], [sa[37], sa[41], sa[45], sa[49], sa[53], sa[57], sa[61], sa[65], sa[69]], [sa[73], sa[77], sa[81], sa[85], sa[89], sa[93], sa[97], sa[101], sa[105]], [sa[109], sa[113], sa[117], sa[121], sa[125], sa[129], sa[133], sa[137], sa[141]]], [[sa[3], sa[7], sa[11], sa[15], sa[19], sa[23], sa[27], sa[31], sa[35]], [sa[39], sa[43], sa[47], sa[51], sa[55], sa[59], sa[63], sa[67], sa[71]], [sa[75], sa[79], sa[83], sa[87], sa[91], sa[95], sa[99], sa[103], sa[107]], [sa[111], sa[115], sa[119], sa[123], sa[127], sa[131], sa[135], sa[139], sa[143]]]]])
# test for large scale sparse array
for SparseArrayType in [ImmutableSparseNDimArray, MutableSparseNDimArray]:
A = SparseArrayType({1:1, 10000:2}, (10000, 20000, 10000))
assert permutedims(A, (0, 1, 2)) == A
assert permutedims(A, (1, 0, 2)) == SparseArrayType({1: 1, 100000000: 2}, (20000, 10000, 10000))
B = SparseArrayType({1:1, 20000:2}, (10000, 20000))
assert B.transpose() == SparseArrayType({10000: 1, 1: 2}, (20000, 10000))
def test_permutedims_with_indices():
A = Array(range(32)).reshape(2, 2, 2, 2, 2)
indices_new = list("abcde")
indices_old = list("ebdac")
new_A = permutedims(A, index_order_new=indices_new, index_order_old=indices_old)
for a, b, c, d, e in itertools.product(range(2), range(2), range(2), range(2), range(2)):
assert new_A[a, b, c, d, e] == A[e, b, d, a, c]
indices_old = list("cabed")
new_A = permutedims(A, index_order_new=indices_new, index_order_old=indices_old)
for a, b, c, d, e in itertools.product(range(2), range(2), range(2), range(2), range(2)):
assert new_A[a, b, c, d, e] == A[c, a, b, e, d]
raises(ValueError, lambda: permutedims(A, index_order_old=list("aacde"), index_order_new=list("abcde")))
raises(ValueError, lambda: permutedims(A, index_order_old=list("abcde"), index_order_new=list("abcce")))
raises(ValueError, lambda: permutedims(A, index_order_old=list("abcde"), index_order_new=list("abce")))
raises(ValueError, lambda: permutedims(A, index_order_old=list("abce"), index_order_new=list("abce")))
raises(ValueError, lambda: permutedims(A, [2, 1, 0, 3, 4], index_order_old=list("abcde")))
raises(ValueError, lambda: permutedims(A, [2, 1, 0, 3, 4], index_order_new=list("abcde")))
def test_flatten():
from sympy.matrices.dense import Matrix
for ArrayType in [ImmutableDenseNDimArray, ImmutableSparseNDimArray, Matrix]:
A = ArrayType(range(24)).reshape(4, 6)
assert [i for i in Flatten(A)] == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]
for i, v in enumerate(Flatten(A)):
assert i == v
def test_tensordiagonal():
from sympy.matrices.dense import eye
expr = Array(range(9)).reshape(3, 3)
raises(ValueError, lambda: tensordiagonal(expr, [0], [1]))
raises(ValueError, lambda: tensordiagonal(expr, [0, 0]))
assert tensordiagonal(eye(3), [0, 1]) == Array([1, 1, 1])
assert tensordiagonal(expr, [0, 1]) == Array([0, 4, 8])
x, y, z = symbols("x y z")
expr2 = tensorproduct([x, y, z], expr)
assert tensordiagonal(expr2, [1, 2]) == Array([[0, 4*x, 8*x], [0, 4*y, 8*y], [0, 4*z, 8*z]])
assert tensordiagonal(expr2, [0, 1]) == Array([[0, 3*y, 6*z], [x, 4*y, 7*z], [2*x, 5*y, 8*z]])
assert tensordiagonal(expr2, [0, 1, 2]) == Array([0, 4*y, 8*z])
# assert tensordiagonal(expr2, [0]) == permutedims(expr2, [1, 2, 0])
# assert tensordiagonal(expr2, [1]) == permutedims(expr2, [0, 2, 1])
# assert tensordiagonal(expr2, [2]) == expr2
# assert tensordiagonal(expr2, [1], [2]) == expr2
# assert tensordiagonal(expr2, [0], [1]) == permutedims(expr2, [2, 0, 1])
a, b, c, X, Y, Z = symbols("a b c X Y Z")
expr3 = tensorproduct([x, y, z], [1, 2, 3], [a, b, c], [X, Y, Z])
assert tensordiagonal(expr3, [0, 1, 2, 3]) == Array([x*a*X, 2*y*b*Y, 3*z*c*Z])
assert tensordiagonal(expr3, [0, 1], [2, 3]) == tensorproduct([x, 2*y, 3*z], [a*X, b*Y, c*Z])
# assert tensordiagonal(expr3, [0], [1, 2], [3]) == tensorproduct([x, y, z], [a, 2*b, 3*c], [X, Y, Z])
assert tensordiagonal(tensordiagonal(expr3, [2, 3]), [0, 1]) == tensorproduct([a*X, b*Y, c*Z], [x, 2*y, 3*z])
raises(ValueError, lambda: tensordiagonal([[1, 2, 3], [4, 5, 6]], [0, 1]))
raises(ValueError, lambda: tensordiagonal(expr3.reshape(3, 3, 9), [1, 2]))
| 71.411602
| 1,443
| 0.411125
|
57af48c7c6ce730f18de8225a93c02bfa45f922d
| 1,741
|
py
|
Python
|
src/pkg/caendr/caendr/models/sql/wormbase_gene_summary.py
|
AndersenLab/CAENDR
|
ce4cdb74db736db8226ffc90988959b71b0d5ff5
|
[
"MIT"
] | 3
|
2022-02-09T07:04:37.000Z
|
2022-03-11T02:46:35.000Z
|
src/pkg/caendr/caendr/models/sql/wormbase_gene_summary.py
|
AndersenLab/CAENDR
|
ce4cdb74db736db8226ffc90988959b71b0d5ff5
|
[
"MIT"
] | 4
|
2022-01-28T22:28:08.000Z
|
2022-02-11T21:47:15.000Z
|
src/pkg/caendr/caendr/models/sql/wormbase_gene_summary.py
|
AndersenLab/CAENDR
|
ce4cdb74db736db8226ffc90988959b71b0d5ff5
|
[
"MIT"
] | 1
|
2022-01-11T03:39:02.000Z
|
2022-01-11T03:39:02.000Z
|
from caendr.services.cloud.postgresql import db
from caendr.models.sql.dict_serializable import DictSerializable
from sqlalchemy import func, or_
from sqlalchemy.ext.hybrid import hybrid_property
class WormbaseGeneSummary(DictSerializable, db.Model):
"""
This is a condensed version of the WormbaseGene model;
It is constructed out of convenience and only defines the genes
(not exons/introns/etc.)
"""
id = db.Column(db.Integer, primary_key=True)
chrom = db.Column(db.String(7), index=True)
chrom_num = db.Column(db.Integer(), index=True)
start = db.Column(db.Integer(), index=True)
end = db.Column(db.Integer(), index=True)
locus = db.Column(db.String(30), index=True)
gene_id = db.Column(db.String(25), unique=True, index=True)
gene_id_type = db.Column(db.String(15), index=False)
sequence_name = db.Column(db.String(30), index=True)
biotype = db.Column(db.String(30), nullable=True)
gene_symbol = db.column_property(func.coalesce(locus, sequence_name, gene_id))
# interval = db.column_property(func.format("%s:%s-%s", chrom, start, end))
arm_or_center = db.Column(db.String(12), index=True)
__tablename__ = "wormbase_gene_summary"
__gene_id_constraint__ = db.UniqueConstraint(gene_id)
@hybrid_property
def interval(self):
return f"{self.chrom}:{self.start}-{self.end}"
# TODO: move this somewhere else
@classmethod
def resolve_gene_id(cls, query):
"""
query - a locus name or transcript ID
output - a wormbase gene ID
Example:
WormbaseGene.resolve_gene_id('pot-2') --> WBGene00010195
"""
result = cls.query.filter(or_(cls.locus == query, cls.sequence_name == query)).first()
if result:
return result.gene_id
| 37.847826
| 90
| 0.711086
|
8bcf93ed726343e934781853a836e3e04dbecb1c
| 3,323
|
py
|
Python
|
simpleapi/client/client.py
|
ghuntley/simpleapi
|
e64e05e9b2276098d3442db174a4d0204be56b39
|
[
"MIT"
] | 1
|
2019-06-27T11:41:03.000Z
|
2019-06-27T11:41:03.000Z
|
simpleapi/client/client.py
|
ghuntley/simpleapi
|
e64e05e9b2276098d3442db174a4d0204be56b39
|
[
"MIT"
] | null | null | null |
simpleapi/client/client.py
|
ghuntley/simpleapi
|
e64e05e9b2276098d3442db174a4d0204be56b39
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
__all__ = ('Client', 'ClientException', 'ConnectionException', 'RemoteException', )
import socket
import urllib
import cPickle
from simpleapi.message import formatters, wrappers
class ClientException(Exception): pass
class ConnectionException(ClientException): pass
class RemoteException(ClientException): pass
class Client(object):
"""simpleapi's client library.
:param ns: URL of your :class:`~simpleapi.Route`'s endpoint
:param access_key: string key used for authentication
:param version: Namespace version to be used (default is highest)
:param transport_type: encoding/decoding type for request/response (default
is json)
:param wrapper_type: wrapper used for formatting the response
:param timeout: connection timeout in secs (default is system parameter)
"""
def __init__(self, ns, access_key=None, version='default',
transport_type='json', wrapper_type='default', timeout=None):
if timeout is not None:
socket.setdefaulttimeout(timeout)
self.ns = ns
self.access_key = access_key
self.version = version
assert transport_type in formatters
self.transport_type = transport_type
assert wrapper_type in wrappers
self.wrapper_type = wrapper_type
def _handle_remote_call(self, fname):
def do_call(**kwargs):
data = {
'_call': fname,
'_output': self.transport_type,
'_input': self.transport_type,
'_wrapper': self.wrapper_type,
'_access_key': self.access_key or '',
'_version': self.version
}
formatter = formatters[self.transport_type](None, None)
for key, value in kwargs.iteritems():
kwargs[key] = formatter.kwargs(value)
data.update(kwargs)
try:
response = urllib.urlopen(self.ns,
urllib.urlencode(data))
assert response.getcode() in [200,], \
u'HTTP-Server returned http code %s (expected: 200) ' % \
response.getcode()
response_buffer = response.read()
except IOError, e:
raise ConnectionException(e)
try:
response = formatter.parse(response_buffer)
except (cPickle.UnpicklingError, EOFError), e:
raise ClientException(
u'Couldn\'t unpickle response ' \
'data. Did you added "pickle" to the namespace\'s' \
' __features__ list?'
)
except ValueError, e:
raise ConnectionException, e
if response.get('success'):
return response.get('result')
else:
raise RemoteException(". ".join(response.get('errors')))
return do_call
def __getattr__(self, name):
return self._handle_remote_call(name)
def set_version(self, version):
"""uses a different version for further requests"""
self.version = int(version)
def set_ns(self, ns):
"""changes the URL for the Route's endpoint"""
self.ns = ns
| 33.908163
| 83
| 0.585615
|
019e9cf9297194b84f5986b87bf1bbb2b6a09538
| 3,487
|
py
|
Python
|
Lib/site-packages/botocore/__init__.py
|
nilknarfgnow/mypython
|
6966e149aa50d6477ecfdd7490efd63d7c217dc8
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/botocore/__init__.py
|
nilknarfgnow/mypython
|
6966e149aa50d6477ecfdd7490efd63d7c217dc8
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/botocore/__init__.py
|
nilknarfgnow/mypython
|
6966e149aa50d6477ecfdd7490efd63d7c217dc8
|
[
"bzip2-1.0.6"
] | null | null | null |
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import re
import logging
__version__ = '1.10.66'
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Configure default logger to do nothing
log = logging.getLogger('botocore')
log.addHandler(NullHandler())
_first_cap_regex = re.compile('(.)([A-Z][a-z]+)')
_end_cap_regex = re.compile('([a-z0-9])([A-Z])')
# The regex below handles the special case where some acryonym
# name is pluralized, e.g GatewayARNs, ListWebACLs, SomeCNAMEs.
_special_case_transform = re.compile('[A-Z]{3,}s$')
# Prepopulate the cache with special cases that don't match
# our regular transformation.
_xform_cache = {
('CreateCachediSCSIVolume', '_'): 'create_cached_iscsi_volume',
('CreateCachediSCSIVolume', '-'): 'create-cached-iscsi-volume',
('DescribeCachediSCSIVolumes', '_'): 'describe_cached_iscsi_volumes',
('DescribeCachediSCSIVolumes', '-'): 'describe-cached-iscsi-volumes',
('DescribeStorediSCSIVolumes', '_'): 'describe_stored_iscsi_volumes',
('DescribeStorediSCSIVolumes', '-'): 'describe-stored-iscsi-volumes',
('CreateStorediSCSIVolume', '_'): 'create_stored_iscsi_volume',
('CreateStorediSCSIVolume', '-'): 'create-stored-iscsi-volume',
('ListHITsForQualificationType', '_'): 'list_hits_for_qualification_type',
('ListHITsForQualificationType', '-'): 'list-hits-for-qualification-type',
}
# The items in this dict represent partial renames to apply globally to all
# services which might have a matching argument or operation. This way a
# common mis-translation can be fixed without having to call out each
# individual case.
ScalarTypes = ('string', 'integer', 'boolean', 'timestamp', 'float', 'double')
BOTOCORE_ROOT = os.path.dirname(os.path.abspath(__file__))
# Used to specify anonymous (unsigned) request signature
class UNSIGNED(object):
def __copy__(self):
return self
def __deepcopy__(self, memodict):
return self
UNSIGNED = UNSIGNED()
def xform_name(name, sep='_', _xform_cache=_xform_cache):
"""Convert camel case to a "pythonic" name.
If the name contains the ``sep`` character, then it is
returned unchanged.
"""
if sep in name:
# If the sep is in the name, assume that it's already
# transformed and return the string unchanged.
return name
key = (name, sep)
if key not in _xform_cache:
if _special_case_transform.search(name) is not None:
is_special = _special_case_transform.search(name)
matched = is_special.group()
# Replace something like ARNs, ACLs with _arns, _acls.
name = name[:-len(matched)] + sep + matched.lower()
s1 = _first_cap_regex.sub(r'\1' + sep + r'\2', name)
transformed = _end_cap_regex.sub(r'\1' + sep + r'\2', s1).lower()
_xform_cache[key] = transformed
return _xform_cache[key]
| 37.494624
| 78
| 0.704044
|
bd7df0b18b89b6b4712cf8a513802bb4c35f8178
| 6,456
|
py
|
Python
|
rastervision2/core/data/label_store/semantic_segmentation_label_store.py
|
csaybar/raster-vision
|
617ca15f64e3b8a391432306a743f7d0dfff352f
|
[
"Apache-2.0"
] | 1
|
2020-10-10T12:32:43.000Z
|
2020-10-10T12:32:43.000Z
|
rastervision2/core/data/label_store/semantic_segmentation_label_store.py
|
alvintuitoek/raster-vision
|
ec6c8309f89c404513862369bb93dd9e6a70b455
|
[
"Apache-2.0"
] | null | null | null |
rastervision2/core/data/label_store/semantic_segmentation_label_store.py
|
alvintuitoek/raster-vision
|
ec6c8309f89c404513862369bb93dd9e6a70b455
|
[
"Apache-2.0"
] | 1
|
2021-12-02T08:07:21.000Z
|
2021-12-02T08:07:21.000Z
|
import numpy as np
import rasterio
from rastervision2.pipeline.filesystem import (get_local_path, make_dir,
upload_or_copy, file_exists)
from rastervision2.core.data.label import SemanticSegmentationLabels
from rastervision2.core.data.label_store import LabelStore
from rastervision2.core.data.label_source import SegmentationClassTransformer
from rastervision2.core.data.raster_source import RasterioSourceConfig
class SemanticSegmentationLabelStore(LabelStore):
def __init__(self,
uri,
extent,
crs_transformer,
tmp_dir,
vector_output=None,
class_config=None):
"""Constructor.
Args:
uri: (str) URI of GeoTIFF file used for storing predictions as RGB values
extent: (Box) The extent of the scene
crs_transformer: (CRSTransformer)
tmp_dir: (str) temp directory to use
vector_output: (None or array of dicts) containing vectorifiction
configuration information
class_config: (ClassConfig) with color values used to convert
class ids to RGB value
"""
self.uri = uri
self.vector_output = vector_output
self.extent = extent
self.crs_transformer = crs_transformer
self.tmp_dir = tmp_dir
# Note: can't name this class_transformer due to Python using that attribute
if class_config:
self.class_trans = SegmentationClassTransformer(class_config)
else:
self.class_trans = None
self.source = None
if file_exists(uri):
self.source = RasterioSourceConfig(uris=[uri]).build(tmp_dir)
def _subcomponents_to_activate(self):
if self.source is not None:
return [self.source]
return []
def get_labels(self, chip_size=1000):
"""Get all labels.
Returns:
SemanticSegmentationLabels with windows of size chip_size covering the
scene with no overlap.
"""
if self.source is None:
raise Exception('Raster source at {} does not exist'.format(
self.uri))
labels = SemanticSegmentationLabels()
extent = self.source.get_extent()
windows = extent.get_windows(chip_size, chip_size)
for w in windows:
raw_labels = self.source.get_raw_chip(w)
label_arr = (np.squeeze(raw_labels) if self.class_trans is None
else self.class_trans.rgb_to_class(raw_labels))
labels.set_label_arr(w, label_arr)
return labels
def save(self, labels):
"""Save.
Args:
labels - (SemanticSegmentationLabels) labels to be saved
"""
local_path = get_local_path(self.uri, self.tmp_dir)
make_dir(local_path, use_dirname=True)
transform = self.crs_transformer.get_affine_transform()
crs = self.crs_transformer.get_image_crs()
band_count = 1
dtype = np.uint8
if self.class_trans:
band_count = 3
mask = (np.zeros((self.extent.ymax, self.extent.xmax), dtype=np.uint8)
if self.vector_output else None)
# https://github.com/mapbox/rasterio/blob/master/docs/quickstart.rst
# https://rasterio.readthedocs.io/en/latest/topics/windowed-rw.html
with rasterio.open(
local_path,
'w',
driver='GTiff',
height=self.extent.ymax,
width=self.extent.xmax,
count=band_count,
dtype=dtype,
transform=transform,
crs=crs) as dataset:
for window in labels.get_windows():
label_arr = labels.get_label_arr(window)
window = window.intersection(self.extent)
label_arr = label_arr[0:window.get_height(), 0:
window.get_width()]
if mask is not None:
mask[window.ymin:window.ymax, window.xmin:
window.xmax] = label_arr
window = window.rasterio_format()
if self.class_trans:
rgb_labels = self.class_trans.class_to_rgb(label_arr)
for chan in range(3):
dataset.write_band(
chan + 1, rgb_labels[:, :, chan], window=window)
else:
img = label_arr.astype(dtype)
dataset.write_band(1, img, window=window)
upload_or_copy(local_path, self.uri)
if self.vector_output:
import mask_to_polygons.vectorification as vectorification
import mask_to_polygons.processing.denoise as denoise
for vo in self.vector_output:
denoise_radius = vo.denoise
uri = vo.uri
mode = vo.get_mode()
class_id = vo.class_id
class_mask = np.array(mask == class_id, dtype=np.uint8)
local_geojson_path = get_local_path(uri, self.tmp_dir)
def transform(x, y):
return self.crs_transformer.pixel_to_map((x, y))
if denoise_radius > 0:
class_mask = denoise.denoise(class_mask, denoise_radius)
if uri and mode == 'buildings':
geojson = vectorification.geojson_from_mask(
mask=class_mask,
transform=transform,
mode=mode,
min_aspect_ratio=vo.min_aspect_ratio,
min_area=vo.min_area,
width_factor=vo.element_width_factor,
thickness=vo.element_thickness)
elif uri and mode == 'polygons':
geojson = vectorification.geojson_from_mask(
mask=class_mask, transform=transform, mode=mode)
if local_geojson_path:
with open(local_geojson_path, 'w') as file_out:
file_out.write(geojson)
upload_or_copy(local_geojson_path, uri)
def empty_labels(self):
"""Returns an empty SemanticSegmentationLabels object."""
return SemanticSegmentationLabels()
| 38.891566
| 85
| 0.571871
|
28d86f5049d625a5a5178537c34d8c5b236f25b1
| 36,126
|
py
|
Python
|
tornado/ioloop.py
|
bgerrity/tornado
|
208672f3bf6cbb7e37f54c356e02a71ca29f1e02
|
[
"Apache-2.0"
] | 15,056
|
2015-01-01T03:08:16.000Z
|
2022-03-31T14:44:56.000Z
|
tornado/ioloop.py
|
bgerrity/tornado
|
208672f3bf6cbb7e37f54c356e02a71ca29f1e02
|
[
"Apache-2.0"
] | 1,645
|
2015-01-05T08:15:32.000Z
|
2022-03-24T20:30:10.000Z
|
tornado/ioloop.py
|
bgerrity/tornado
|
208672f3bf6cbb7e37f54c356e02a71ca29f1e02
|
[
"Apache-2.0"
] | 5,098
|
2015-01-02T15:43:36.000Z
|
2022-03-30T06:04:43.000Z
|
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""An I/O event loop for non-blocking sockets.
In Tornado 6.0, `.IOLoop` is a wrapper around the `asyncio` event
loop, with a slightly different interface for historical reasons.
Applications can use either the `.IOLoop` interface or the underlying
`asyncio` event loop directly (unless compatibility with older
versions of Tornado is desired, in which case `.IOLoop` must be used).
Typical applications will use a single `IOLoop` object, accessed via
`IOLoop.current` class method. The `IOLoop.start` method (or
equivalently, `asyncio.AbstractEventLoop.run_forever`) should usually
be called at the end of the ``main()`` function. Atypical applications
may use more than one `IOLoop`, such as one `IOLoop` per thread, or
per `unittest` case.
"""
import asyncio
import concurrent.futures
import datetime
import functools
import logging
import numbers
import os
import sys
import time
import math
import random
from inspect import isawaitable
from tornado.concurrent import (
Future,
is_future,
chain_future,
future_set_exc_info,
future_add_done_callback,
)
from tornado.log import app_log
from tornado.util import Configurable, TimeoutError, import_object
import typing
from typing import Union, Any, Type, Optional, Callable, TypeVar, Tuple, Awaitable
if typing.TYPE_CHECKING:
from typing import Dict, List # noqa: F401
from typing_extensions import Protocol
else:
Protocol = object
class _Selectable(Protocol):
def fileno(self) -> int:
pass
def close(self) -> None:
pass
_T = TypeVar("_T")
_S = TypeVar("_S", bound=_Selectable)
class IOLoop(Configurable):
"""An I/O event loop.
As of Tornado 6.0, `IOLoop` is a wrapper around the `asyncio` event
loop.
Example usage for a simple TCP server:
.. testcode::
import errno
import functools
import socket
import tornado.ioloop
from tornado.iostream import IOStream
async def handle_connection(connection, address):
stream = IOStream(connection)
message = await stream.read_until_close()
print("message from client:", message.decode().strip())
def connection_ready(sock, fd, events):
while True:
try:
connection, address = sock.accept()
except BlockingIOError:
return
connection.setblocking(0)
io_loop = tornado.ioloop.IOLoop.current()
io_loop.spawn_callback(handle_connection, connection, address)
if __name__ == '__main__':
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
sock.bind(("", 8888))
sock.listen(128)
io_loop = tornado.ioloop.IOLoop.current()
callback = functools.partial(connection_ready, sock)
io_loop.add_handler(sock.fileno(), callback, io_loop.READ)
io_loop.start()
.. testoutput::
:hide:
By default, a newly-constructed `IOLoop` becomes the thread's current
`IOLoop`, unless there already is a current `IOLoop`. This behavior
can be controlled with the ``make_current`` argument to the `IOLoop`
constructor: if ``make_current=True``, the new `IOLoop` will always
try to become current and it raises an error if there is already a
current instance. If ``make_current=False``, the new `IOLoop` will
not try to become current.
In general, an `IOLoop` cannot survive a fork or be shared across
processes in any way. When multiple processes are being used, each
process should create its own `IOLoop`, which also implies that
any objects which depend on the `IOLoop` (such as
`.AsyncHTTPClient`) must also be created in the child processes.
As a guideline, anything that starts processes (including the
`tornado.process` and `multiprocessing` modules) should do so as
early as possible, ideally the first thing the application does
after loading its configuration in ``main()``.
.. versionchanged:: 4.2
Added the ``make_current`` keyword argument to the `IOLoop`
constructor.
.. versionchanged:: 5.0
Uses the `asyncio` event loop by default. The
``IOLoop.configure`` method cannot be used on Python 3 except
to redundantly specify the `asyncio` event loop.
"""
# These constants were originally based on constants from the epoll module.
NONE = 0
READ = 0x001
WRITE = 0x004
ERROR = 0x018
# In Python 3, _ioloop_for_asyncio maps from asyncio loops to IOLoops.
_ioloop_for_asyncio = dict() # type: Dict[asyncio.AbstractEventLoop, IOLoop]
@classmethod
def configure(
cls, impl: "Union[None, str, Type[Configurable]]", **kwargs: Any
) -> None:
if asyncio is not None:
from tornado.platform.asyncio import BaseAsyncIOLoop
if isinstance(impl, str):
impl = import_object(impl)
if isinstance(impl, type) and not issubclass(impl, BaseAsyncIOLoop):
raise RuntimeError(
"only AsyncIOLoop is allowed when asyncio is available"
)
super(IOLoop, cls).configure(impl, **kwargs)
@staticmethod
def instance() -> "IOLoop":
"""Deprecated alias for `IOLoop.current()`.
.. versionchanged:: 5.0
Previously, this method returned a global singleton
`IOLoop`, in contrast with the per-thread `IOLoop` returned
by `current()`. In nearly all cases the two were the same
(when they differed, it was generally used from non-Tornado
threads to communicate back to the main thread's `IOLoop`).
This distinction is not present in `asyncio`, so in order
to facilitate integration with that package `instance()`
was changed to be an alias to `current()`. Applications
using the cross-thread communications aspect of
`instance()` should instead set their own global variable
to point to the `IOLoop` they want to use.
.. deprecated:: 5.0
"""
return IOLoop.current()
def install(self) -> None:
"""Deprecated alias for `make_current()`.
.. versionchanged:: 5.0
Previously, this method would set this `IOLoop` as the
global singleton used by `IOLoop.instance()`. Now that
`instance()` is an alias for `current()`, `install()`
is an alias for `make_current()`.
.. deprecated:: 5.0
"""
self.make_current()
@staticmethod
def clear_instance() -> None:
"""Deprecated alias for `clear_current()`.
.. versionchanged:: 5.0
Previously, this method would clear the `IOLoop` used as
the global singleton by `IOLoop.instance()`. Now that
`instance()` is an alias for `current()`,
`clear_instance()` is an alias for `clear_current()`.
.. deprecated:: 5.0
"""
IOLoop.clear_current()
@typing.overload
@staticmethod
def current() -> "IOLoop":
pass
@typing.overload
@staticmethod
def current(instance: bool = True) -> Optional["IOLoop"]: # noqa: F811
pass
@staticmethod
def current(instance: bool = True) -> Optional["IOLoop"]: # noqa: F811
"""Returns the current thread's `IOLoop`.
If an `IOLoop` is currently running or has been marked as
current by `make_current`, returns that instance. If there is
no current `IOLoop` and ``instance`` is true, creates one.
.. versionchanged:: 4.1
Added ``instance`` argument to control the fallback to
`IOLoop.instance()`.
.. versionchanged:: 5.0
On Python 3, control of the current `IOLoop` is delegated
to `asyncio`, with this and other methods as pass-through accessors.
The ``instance`` argument now controls whether an `IOLoop`
is created automatically when there is none, instead of
whether we fall back to `IOLoop.instance()` (which is now
an alias for this method). ``instance=False`` is deprecated,
since even if we do not create an `IOLoop`, this method
may initialize the asyncio loop.
"""
try:
loop = asyncio.get_event_loop()
except (RuntimeError, AssertionError):
if not instance:
return None
raise
try:
return IOLoop._ioloop_for_asyncio[loop]
except KeyError:
if instance:
from tornado.platform.asyncio import AsyncIOMainLoop
current = AsyncIOMainLoop(make_current=True) # type: Optional[IOLoop]
else:
current = None
return current
def make_current(self) -> None:
"""Makes this the `IOLoop` for the current thread.
An `IOLoop` automatically becomes current for its thread
when it is started, but it is sometimes useful to call
`make_current` explicitly before starting the `IOLoop`,
so that code run at startup time can find the right
instance.
.. versionchanged:: 4.1
An `IOLoop` created while there is no current `IOLoop`
will automatically become current.
.. versionchanged:: 5.0
This method also sets the current `asyncio` event loop.
"""
# The asyncio event loops override this method.
raise NotImplementedError()
@staticmethod
def clear_current() -> None:
"""Clears the `IOLoop` for the current thread.
Intended primarily for use by test frameworks in between tests.
.. versionchanged:: 5.0
This method also clears the current `asyncio` event loop.
"""
old = IOLoop.current(instance=False)
if old is not None:
old._clear_current_hook()
if asyncio is None:
IOLoop._current.instance = None
def _clear_current_hook(self) -> None:
"""Instance method called when an IOLoop ceases to be current.
May be overridden by subclasses as a counterpart to make_current.
"""
pass
@classmethod
def configurable_base(cls) -> Type[Configurable]:
return IOLoop
@classmethod
def configurable_default(cls) -> Type[Configurable]:
from tornado.platform.asyncio import AsyncIOLoop
return AsyncIOLoop
def initialize(self, make_current: Optional[bool] = None) -> None:
if make_current is None:
if IOLoop.current(instance=False) is None:
self.make_current()
elif make_current:
current = IOLoop.current(instance=False)
# AsyncIO loops can already be current by this point.
if current is not None and current is not self:
raise RuntimeError("current IOLoop already exists")
self.make_current()
def close(self, all_fds: bool = False) -> None:
"""Closes the `IOLoop`, freeing any resources used.
If ``all_fds`` is true, all file descriptors registered on the
IOLoop will be closed (not just the ones created by the
`IOLoop` itself).
Many applications will only use a single `IOLoop` that runs for the
entire lifetime of the process. In that case closing the `IOLoop`
is not necessary since everything will be cleaned up when the
process exits. `IOLoop.close` is provided mainly for scenarios
such as unit tests, which create and destroy a large number of
``IOLoops``.
An `IOLoop` must be completely stopped before it can be closed. This
means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must
be allowed to return before attempting to call `IOLoop.close()`.
Therefore the call to `close` will usually appear just after
the call to `start` rather than near the call to `stop`.
.. versionchanged:: 3.1
If the `IOLoop` implementation supports non-integer objects
for "file descriptors", those objects will have their
``close`` method when ``all_fds`` is true.
"""
raise NotImplementedError()
@typing.overload
def add_handler(
self, fd: int, handler: Callable[[int, int], None], events: int
) -> None:
pass
@typing.overload # noqa: F811
def add_handler(
self, fd: _S, handler: Callable[[_S, int], None], events: int
) -> None:
pass
def add_handler( # noqa: F811
self, fd: Union[int, _Selectable], handler: Callable[..., None], events: int
) -> None:
"""Registers the given handler to receive the given events for ``fd``.
The ``fd`` argument may either be an integer file descriptor or
a file-like object with a ``fileno()`` and ``close()`` method.
The ``events`` argument is a bitwise or of the constants
``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``.
When an event occurs, ``handler(fd, events)`` will be run.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def update_handler(self, fd: Union[int, _Selectable], events: int) -> None:
"""Changes the events we listen for ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def remove_handler(self, fd: Union[int, _Selectable]) -> None:
"""Stop listening for events on ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def start(self) -> None:
"""Starts the I/O loop.
The loop will run until one of the callbacks calls `stop()`, which
will make the loop stop after the current event iteration completes.
"""
raise NotImplementedError()
def _setup_logging(self) -> None:
"""The IOLoop catches and logs exceptions, so it's
important that log output be visible. However, python's
default behavior for non-root loggers (prior to python
3.2) is to print an unhelpful "no handlers could be
found" message rather than the actual log entry, so we
must explicitly configure logging if we've made it this
far without anything.
This method should be called from start() in subclasses.
"""
if not any(
[
logging.getLogger().handlers,
logging.getLogger("tornado").handlers,
logging.getLogger("tornado.application").handlers,
]
):
logging.basicConfig()
def stop(self) -> None:
"""Stop the I/O loop.
If the event loop is not currently running, the next call to `start()`
will return immediately.
Note that even after `stop` has been called, the `IOLoop` is not
completely stopped until `IOLoop.start` has also returned.
Some work that was scheduled before the call to `stop` may still
be run before the `IOLoop` shuts down.
"""
raise NotImplementedError()
def run_sync(self, func: Callable, timeout: Optional[float] = None) -> Any:
"""Starts the `IOLoop`, runs the given function, and stops the loop.
The function must return either an awaitable object or
``None``. If the function returns an awaitable object, the
`IOLoop` will run until the awaitable is resolved (and
`run_sync()` will return the awaitable's result). If it raises
an exception, the `IOLoop` will stop and the exception will be
re-raised to the caller.
The keyword-only argument ``timeout`` may be used to set
a maximum duration for the function. If the timeout expires,
a `tornado.util.TimeoutError` is raised.
This method is useful to allow asynchronous calls in a
``main()`` function::
async def main():
# do stuff...
if __name__ == '__main__':
IOLoop.current().run_sync(main)
.. versionchanged:: 4.3
Returning a non-``None``, non-awaitable value is now an error.
.. versionchanged:: 5.0
If a timeout occurs, the ``func`` coroutine will be cancelled.
"""
future_cell = [None] # type: List[Optional[Future]]
def run() -> None:
try:
result = func()
if result is not None:
from tornado.gen import convert_yielded
result = convert_yielded(result)
except Exception:
fut = Future() # type: Future[Any]
future_cell[0] = fut
future_set_exc_info(fut, sys.exc_info())
else:
if is_future(result):
future_cell[0] = result
else:
fut = Future()
future_cell[0] = fut
fut.set_result(result)
assert future_cell[0] is not None
self.add_future(future_cell[0], lambda future: self.stop())
self.add_callback(run)
if timeout is not None:
def timeout_callback() -> None:
# If we can cancel the future, do so and wait on it. If not,
# Just stop the loop and return with the task still pending.
# (If we neither cancel nor wait for the task, a warning
# will be logged).
assert future_cell[0] is not None
if not future_cell[0].cancel():
self.stop()
timeout_handle = self.add_timeout(self.time() + timeout, timeout_callback)
self.start()
if timeout is not None:
self.remove_timeout(timeout_handle)
assert future_cell[0] is not None
if future_cell[0].cancelled() or not future_cell[0].done():
raise TimeoutError("Operation timed out after %s seconds" % timeout)
return future_cell[0].result()
def time(self) -> float:
"""Returns the current time according to the `IOLoop`'s clock.
The return value is a floating-point number relative to an
unspecified time in the past.
Historically, the IOLoop could be customized to use e.g.
`time.monotonic` instead of `time.time`, but this is not
currently supported and so this method is equivalent to
`time.time`.
"""
return time.time()
def add_timeout(
self,
deadline: Union[float, datetime.timedelta],
callback: Callable[..., Optional[Awaitable]],
*args: Any,
**kwargs: Any
) -> object:
"""Runs the ``callback`` at the time ``deadline`` from the I/O loop.
Returns an opaque handle that may be passed to
`remove_timeout` to cancel.
``deadline`` may be a number denoting a time (on the same
scale as `IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time. Since Tornado 4.0, `call_later` is a more
convenient alternative for the relative case since it does not
require a timedelta object.
Note that it is not safe to call `add_timeout` from other threads.
Instead, you must use `add_callback` to transfer control to the
`IOLoop`'s thread, and then call `add_timeout` from there.
Subclasses of IOLoop must implement either `add_timeout` or
`call_at`; the default implementations of each will call
the other. `call_at` is usually easier to implement, but
subclasses that wish to maintain compatibility with Tornado
versions prior to 4.0 must use `add_timeout` instead.
.. versionchanged:: 4.0
Now passes through ``*args`` and ``**kwargs`` to the callback.
"""
if isinstance(deadline, numbers.Real):
return self.call_at(deadline, callback, *args, **kwargs)
elif isinstance(deadline, datetime.timedelta):
return self.call_at(
self.time() + deadline.total_seconds(), callback, *args, **kwargs
)
else:
raise TypeError("Unsupported deadline %r" % deadline)
def call_later(
self, delay: float, callback: Callable, *args: Any, **kwargs: Any
) -> object:
"""Runs the ``callback`` after ``delay`` seconds have passed.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.call_at(self.time() + delay, callback, *args, **kwargs)
def call_at(
self, when: float, callback: Callable, *args: Any, **kwargs: Any
) -> object:
"""Runs the ``callback`` at the absolute time designated by ``when``.
``when`` must be a number using the same reference point as
`IOLoop.time`.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.add_timeout(when, callback, *args, **kwargs)
def remove_timeout(self, timeout: object) -> None:
"""Cancels a pending timeout.
The argument is a handle as returned by `add_timeout`. It is
safe to call `remove_timeout` even if the callback has already
been run.
"""
raise NotImplementedError()
def add_callback(self, callback: Callable, *args: Any, **kwargs: Any) -> None:
"""Calls the given callback on the next I/O loop iteration.
It is safe to call this method from any thread at any time,
except from a signal handler. Note that this is the **only**
method in `IOLoop` that makes this thread-safety guarantee; all
other interaction with the `IOLoop` must be done from that
`IOLoop`'s thread. `add_callback()` may be used to transfer
control from other threads to the `IOLoop`'s thread.
To add a callback from a signal handler, see
`add_callback_from_signal`.
"""
raise NotImplementedError()
def add_callback_from_signal(
self, callback: Callable, *args: Any, **kwargs: Any
) -> None:
"""Calls the given callback on the next I/O loop iteration.
Safe for use from a Python signal handler; should not be used
otherwise.
"""
raise NotImplementedError()
def spawn_callback(self, callback: Callable, *args: Any, **kwargs: Any) -> None:
"""Calls the given callback on the next IOLoop iteration.
As of Tornado 6.0, this method is equivalent to `add_callback`.
.. versionadded:: 4.0
"""
self.add_callback(callback, *args, **kwargs)
def add_future(
self,
future: "Union[Future[_T], concurrent.futures.Future[_T]]",
callback: Callable[["Future[_T]"], None],
) -> None:
"""Schedules a callback on the ``IOLoop`` when the given
`.Future` is finished.
The callback is invoked with one argument, the
`.Future`.
This method only accepts `.Future` objects and not other
awaitables (unlike most of Tornado where the two are
interchangeable).
"""
if isinstance(future, Future):
# Note that we specifically do not want the inline behavior of
# tornado.concurrent.future_add_done_callback. We always want
# this callback scheduled on the next IOLoop iteration (which
# asyncio.Future always does).
#
# Wrap the callback in self._run_callback so we control
# the error logging (i.e. it goes to tornado.log.app_log
# instead of asyncio's log).
future.add_done_callback(
lambda f: self._run_callback(functools.partial(callback, future))
)
else:
assert is_future(future)
# For concurrent futures, we use self.add_callback, so
# it's fine if future_add_done_callback inlines that call.
future_add_done_callback(
future, lambda f: self.add_callback(callback, future)
)
def run_in_executor(
self,
executor: Optional[concurrent.futures.Executor],
func: Callable[..., _T],
*args: Any
) -> Awaitable[_T]:
"""Runs a function in a ``concurrent.futures.Executor``. If
``executor`` is ``None``, the IO loop's default executor will be used.
Use `functools.partial` to pass keyword arguments to ``func``.
.. versionadded:: 5.0
"""
if executor is None:
if not hasattr(self, "_executor"):
from tornado.process import cpu_count
self._executor = concurrent.futures.ThreadPoolExecutor(
max_workers=(cpu_count() * 5)
) # type: concurrent.futures.Executor
executor = self._executor
c_future = executor.submit(func, *args)
# Concurrent Futures are not usable with await. Wrap this in a
# Tornado Future instead, using self.add_future for thread-safety.
t_future = Future() # type: Future[_T]
self.add_future(c_future, lambda f: chain_future(f, t_future))
return t_future
def set_default_executor(self, executor: concurrent.futures.Executor) -> None:
"""Sets the default executor to use with :meth:`run_in_executor`.
.. versionadded:: 5.0
"""
self._executor = executor
def _run_callback(self, callback: Callable[[], Any]) -> None:
"""Runs a callback with error handling.
.. versionchanged:: 6.0
CancelledErrors are no longer logged.
"""
try:
ret = callback()
if ret is not None:
from tornado import gen
# Functions that return Futures typically swallow all
# exceptions and store them in the Future. If a Future
# makes it out to the IOLoop, ensure its exception (if any)
# gets logged too.
try:
ret = gen.convert_yielded(ret)
except gen.BadYieldError:
# It's not unusual for add_callback to be used with
# methods returning a non-None and non-yieldable
# result, which should just be ignored.
pass
else:
self.add_future(ret, self._discard_future_result)
except asyncio.CancelledError:
pass
except Exception:
app_log.error("Exception in callback %r", callback, exc_info=True)
def _discard_future_result(self, future: Future) -> None:
"""Avoid unhandled-exception warnings from spawned coroutines."""
future.result()
def split_fd(
self, fd: Union[int, _Selectable]
) -> Tuple[int, Union[int, _Selectable]]:
# """Returns an (fd, obj) pair from an ``fd`` parameter.
# We accept both raw file descriptors and file-like objects as
# input to `add_handler` and related methods. When a file-like
# object is passed, we must retain the object itself so we can
# close it correctly when the `IOLoop` shuts down, but the
# poller interfaces favor file descriptors (they will accept
# file-like objects and call ``fileno()`` for you, but they
# always return the descriptor itself).
# This method is provided for use by `IOLoop` subclasses and should
# not generally be used by application code.
# .. versionadded:: 4.0
# """
if isinstance(fd, int):
return fd, fd
return fd.fileno(), fd
def close_fd(self, fd: Union[int, _Selectable]) -> None:
# """Utility method to close an ``fd``.
# If ``fd`` is a file-like object, we close it directly; otherwise
# we use `os.close`.
# This method is provided for use by `IOLoop` subclasses (in
# implementations of ``IOLoop.close(all_fds=True)`` and should
# not generally be used by application code.
# .. versionadded:: 4.0
# """
try:
if isinstance(fd, int):
os.close(fd)
else:
fd.close()
except OSError:
pass
class _Timeout(object):
"""An IOLoop timeout, a UNIX timestamp and a callback"""
# Reduce memory overhead when there are lots of pending callbacks
__slots__ = ["deadline", "callback", "tdeadline"]
def __init__(
self, deadline: float, callback: Callable[[], None], io_loop: IOLoop
) -> None:
if not isinstance(deadline, numbers.Real):
raise TypeError("Unsupported deadline %r" % deadline)
self.deadline = deadline
self.callback = callback
self.tdeadline = (
deadline,
next(io_loop._timeout_counter),
) # type: Tuple[float, int]
# Comparison methods to sort by deadline, with object id as a tiebreaker
# to guarantee a consistent ordering. The heapq module uses __le__
# in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons
# use __lt__).
def __lt__(self, other: "_Timeout") -> bool:
return self.tdeadline < other.tdeadline
def __le__(self, other: "_Timeout") -> bool:
return self.tdeadline <= other.tdeadline
class PeriodicCallback(object):
"""Schedules the given callback to be called periodically.
The callback is called every ``callback_time`` milliseconds when
``callback_time`` is a float. Note that the timeout is given in
milliseconds, while most other time-related functions in Tornado use
seconds. ``callback_time`` may alternatively be given as a
`datetime.timedelta` object.
If ``jitter`` is specified, each callback time will be randomly selected
within a window of ``jitter * callback_time`` milliseconds.
Jitter can be used to reduce alignment of events with similar periods.
A jitter of 0.1 means allowing a 10% variation in callback time.
The window is centered on ``callback_time`` so the total number of calls
within a given interval should not be significantly affected by adding
jitter.
If the callback runs for longer than ``callback_time`` milliseconds,
subsequent invocations will be skipped to get back on schedule.
`start` must be called after the `PeriodicCallback` is created.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
.. versionchanged:: 5.1
The ``jitter`` argument is added.
.. versionchanged:: 6.2
If the ``callback`` argument is a coroutine, and a callback runs for
longer than ``callback_time``, subsequent invocations will be skipped.
Previously this was only true for regular functions, not coroutines,
which were "fire-and-forget" for `PeriodicCallback`.
"""
def __init__(
self,
callback: Callable[[], Optional[Awaitable]],
callback_time: Union[datetime.timedelta, float],
jitter: float = 0,
) -> None:
self.callback = callback
if isinstance(callback_time, datetime.timedelta):
self.callback_time = callback_time / datetime.timedelta(milliseconds=1)
else:
if callback_time <= 0:
raise ValueError("Periodic callback must have a positive callback_time")
self.callback_time = callback_time
self.jitter = jitter
self._running = False
self._timeout = None # type: object
def start(self) -> None:
"""Starts the timer."""
# Looking up the IOLoop here allows to first instantiate the
# PeriodicCallback in another thread, then start it using
# IOLoop.add_callback().
self.io_loop = IOLoop.current()
self._running = True
self._next_timeout = self.io_loop.time()
self._schedule_next()
def stop(self) -> None:
"""Stops the timer."""
self._running = False
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
def is_running(self) -> bool:
"""Returns ``True`` if this `.PeriodicCallback` has been started.
.. versionadded:: 4.1
"""
return self._running
async def _run(self) -> None:
if not self._running:
return
try:
val = self.callback()
if val is not None and isawaitable(val):
await val
except Exception:
app_log.error("Exception in callback %r", self.callback, exc_info=True)
finally:
self._schedule_next()
def _schedule_next(self) -> None:
if self._running:
self._update_next(self.io_loop.time())
self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
def _update_next(self, current_time: float) -> None:
callback_time_sec = self.callback_time / 1000.0
if self.jitter:
# apply jitter fraction
callback_time_sec *= 1 + (self.jitter * (random.random() - 0.5))
if self._next_timeout <= current_time:
# The period should be measured from the start of one call
# to the start of the next. If one call takes too long,
# skip cycles to get back to a multiple of the original
# schedule.
self._next_timeout += (
math.floor((current_time - self._next_timeout) / callback_time_sec) + 1
) * callback_time_sec
else:
# If the clock moved backwards, ensure we advance the next
# timeout instead of recomputing the same value again.
# This may result in long gaps between callbacks if the
# clock jumps backwards by a lot, but the far more common
# scenario is a small NTP adjustment that should just be
# ignored.
#
# Note that on some systems if time.time() runs slower
# than time.monotonic() (most common on windows), we
# effectively experience a small backwards time jump on
# every iteration because PeriodicCallback uses
# time.time() while asyncio schedules callbacks using
# time.monotonic().
# https://github.com/tornadoweb/tornado/issues/2333
self._next_timeout += callback_time_sec
| 37.553015
| 88
| 0.619748
|
410190765f55813e843e0b84bfaa8dbe8ec44353
| 865
|
py
|
Python
|
frc_characterization/elevator_characterization/templates/Talon/robotconfig.py
|
prateekma/frc-characterization
|
cfb9af35a2bf1f1e3dc8a743a9482997034305e2
|
[
"Apache-2.0"
] | null | null | null |
frc_characterization/elevator_characterization/templates/Talon/robotconfig.py
|
prateekma/frc-characterization
|
cfb9af35a2bf1f1e3dc8a743a9482997034305e2
|
[
"Apache-2.0"
] | null | null | null |
frc_characterization/elevator_characterization/templates/Talon/robotconfig.py
|
prateekma/frc-characterization
|
cfb9af35a2bf1f1e3dc8a743a9482997034305e2
|
[
"Apache-2.0"
] | null | null | null |
{
# Class names of motor controllers used.
# Options:
# 'WPI_TalonSRX'
# 'WPI_TalonFX'
# 'WPI_VictorSPX'
# Note: The first motor should always be a TalonSRX/FX, as the VictorSPX
# does not support encoder connections.
"controllerTypes": ["WPI_TalonSRX"],
# Ports for the motors
"motorPorts": [0],
# Inversions for the motors
"motorsInverted": [False],
# Unit of analysis
# Options:
# 'Degrees'
# 'Radians'
# 'Rotations'
"units": "Degrees",
# Pulley diameter (in units of your choice - will dictate units of analysis)
"pulleyDiameter": 0.333,
# This value should be the edges per revolution *of the pulley*, and so
# should take into account gearing between the encoder and the pulley
"encoderEPR": 512,
# Whether the encoder is inverted
"encoderInverted": False,
}
| 30.892857
| 80
| 0.650867
|
54474b375635b0f06bfdc62fdc3b61b522b431b7
| 3,109
|
py
|
Python
|
frontera/contrib/backends/remote/messagebus.py
|
TeamHG-Memex/frontera
|
06ab4002428528a2d8b67c1e82368cc5988b2228
|
[
"BSD-3-Clause"
] | 3
|
2015-11-11T19:37:16.000Z
|
2017-03-15T13:33:54.000Z
|
frontera/contrib/backends/remote/messagebus.py
|
TeamHG-Memex/frontera
|
06ab4002428528a2d8b67c1e82368cc5988b2228
|
[
"BSD-3-Clause"
] | null | null | null |
frontera/contrib/backends/remote/messagebus.py
|
TeamHG-Memex/frontera
|
06ab4002428528a2d8b67c1e82368cc5988b2228
|
[
"BSD-3-Clause"
] | 2
|
2016-09-08T08:30:24.000Z
|
2018-10-02T22:00:47.000Z
|
# -*- coding: utf-8 -*-
from frontera import Backend
from frontera.core import OverusedBuffer
from codecs.msgpack import Encoder, Decoder
from frontera.utils.misc import load_object
import logging
class MessageBusBackend(Backend):
def __init__(self, manager):
settings = manager.settings
messagebus = load_object(settings.get('MESSAGE_BUS'))
self.mb = messagebus(settings)
store_content = settings.get('STORE_CONTENT')
self._encoder = Encoder(manager.request_model, send_body=store_content)
self._decoder = Decoder(manager.request_model, manager.response_model)
self.spider_log_producer = self.mb.spider_log().producer()
spider_feed = self.mb.spider_feed()
self.partition_id = int(settings.get('SPIDER_PARTITION_ID'))
if self.partition_id < 0 or self.partition_id >= settings.get('SPIDER_FEED_PARTITIONS'):
raise ValueError("Spider partition id cannot be less than 0 or more than SPIDER_FEED_PARTITIONS.")
self.consumer = spider_feed.consumer(partition_id=self.partition_id)
self._get_timeout = float(settings.get('KAFKA_GET_TIMEOUT'))
self._logger = logging.getLogger("messagebus-backend")
self._buffer = OverusedBuffer(self._get_next_requests,
self._logger.debug)
self._logger.info("Consuming from partition id %d", self.partition_id)
@classmethod
def from_manager(clas, manager):
return clas(manager)
def frontier_start(self):
pass
def frontier_stop(self):
self.spider_log_producer.flush()
def add_seeds(self, seeds):
self.spider_log_producer.send(seeds[0].meta['fingerprint'], self._encoder.encode_add_seeds(seeds))
def page_crawled(self, response, links):
self.spider_log_producer.send(response.meta['fingerprint'], self._encoder.encode_page_crawled(response, links))
def request_error(self, page, error):
self.spider_log_producer.send(page.meta['fingerprint'], self._encoder.encode_request_error(page, error))
def _get_next_requests(self, max_n_requests, **kwargs):
requests = []
for encoded in self.consumer.get_messages(count=max_n_requests, timeout=self._get_timeout):
try:
request = self._decoder.decode_request(encoded)
except Exception, exc:
self._logger.warning("Could not decode message: {0}, error {1}".format(encoded, str(exc)))
else:
requests.append(request)
self.spider_log_producer.send('0123456789abcdef0123456789abcdef012345678',
self._encoder.encode_offset(self.partition_id, self.consumer.get_offset()))
return requests
def get_next_requests(self, max_n_requests, **kwargs):
return self._buffer.get_next_requests(max_n_requests, **kwargs)
def finished(self):
return False
@property
def metadata(self):
return None
@property
def queue(self):
return None
@property
def states(self):
return None
| 40.376623
| 119
| 0.681891
|
93fc6743a2c3d354b72e7b889f4a8a320320221a
| 25
|
py
|
Python
|
cd2h_repo_project/modules/contact_us/__init__.py
|
galterlibrary/InvenioRDM-at-NU
|
5aff6ac7c428c9a61bdf221627bfc05f2280d1a3
|
[
"MIT"
] | 6
|
2019-09-02T00:01:50.000Z
|
2021-11-04T08:23:40.000Z
|
cd2h_repo_project/modules/contact_us/__init__.py
|
galterlibrary/InvenioRDM-at-NU
|
5aff6ac7c428c9a61bdf221627bfc05f2280d1a3
|
[
"MIT"
] | 72
|
2019-09-04T18:52:35.000Z
|
2020-07-21T19:58:15.000Z
|
cd2h_repo_project/modules/contact_us/__init__.py
|
galterlibrary/InvenioRDM-at-NU
|
5aff6ac7c428c9a61bdf221627bfc05f2280d1a3
|
[
"MIT"
] | null | null | null |
"""Contact us module."""
| 12.5
| 24
| 0.6
|
dccefe230b7d1ccfe05b97e0b8790d1820fc7a92
| 1,514
|
py
|
Python
|
backend/admingym/users/models/users.py
|
ManuelRivera98/AdminGym
|
caf2b6f5e9a0ed9e98567a036bec9a34b44ecf13
|
[
"MIT"
] | 1
|
2020-09-14T04:23:07.000Z
|
2020-09-14T04:23:07.000Z
|
backend/admingym/users/models/users.py
|
ManuelRivera98/AdminGym
|
caf2b6f5e9a0ed9e98567a036bec9a34b44ecf13
|
[
"MIT"
] | null | null | null |
backend/admingym/users/models/users.py
|
ManuelRivera98/AdminGym
|
caf2b6f5e9a0ed9e98567a036bec9a34b44ecf13
|
[
"MIT"
] | null | null | null |
"""Owner model."""
# Django
from django.db import models
from django.contrib.auth.models import AbstractUser
# Validators => Allows us to validate a field against a regular expression.
from django.core.validators import RegexValidator
# Utilities
from admingym.utils.models import BaseModel
class User(BaseModel, AbstractUser):
"""Owner gym model."""
# Regex validations
cc_regex = RegexValidator(
regex=r'\d{8,10}$',
message='Identification number must be of type 1,000,000',
)
email = models.EmailField(
'email address',
unique=True,
error_messages={
'unique': 'A user with that email already exits.'
}
)
cc = models.CharField(
validators=[cc_regex, ],
max_length=10,
unique=True
)
is_verified = models.BooleanField(
'verified client',
default=False,
help_text='set to true when the user have verified its email address.'
)
already_owns = models.BooleanField(
'active user gym',
default=False,
help_text='The user must have a gym as a property in order to use the app, this field will be active when creating a gym object.'
)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username', 'cc', 'first_name', 'last_name']
def __str__(self):
"""Return name's gym"""
return self.username
def __get_short_name(self):
"""Return name as identifier on the db."""
return self.username
| 25.661017
| 137
| 0.638705
|
8329f86c406aa9559f7a4e0e902ff53373e4797e
| 803
|
py
|
Python
|
treeqn/models/encoding.py
|
botcs/treeqn
|
e2ebc46a99483fa188dd229b1a5828459167ccae
|
[
"MIT"
] | 107
|
2018-03-09T14:28:21.000Z
|
2022-03-23T08:24:48.000Z
|
treeqn/models/encoding.py
|
botcs/treeqn
|
e2ebc46a99483fa188dd229b1a5828459167ccae
|
[
"MIT"
] | 3
|
2018-05-01T16:31:41.000Z
|
2018-10-04T15:57:41.000Z
|
treeqn/models/encoding.py
|
botcs/treeqn
|
e2ebc46a99483fa188dd229b1a5828459167ccae
|
[
"MIT"
] | 22
|
2018-03-09T23:17:27.000Z
|
2020-12-11T20:53:05.000Z
|
import torch.nn as nn
import numpy as np
from treeqn.utils.pytorch_utils import nn_init
def atari_encoder(in_channels):
encoder = nn.Sequential(
nn_init(nn.Conv2d(in_channels, 16, kernel_size=8, stride=4), w_scale=np.sqrt(2)),
nn.ReLU(True),
nn_init(nn.Conv2d(16, 32, kernel_size=4, stride=2), w_scale=np.sqrt(2)),
nn.ReLU(True),
)
return encoder
def push_encoder(in_channels):
encoder = nn.Sequential(
nn_init(nn.Conv2d(in_channels, 24, kernel_size=3, stride=1), w_scale=1.0),
nn.ReLU(inplace=True),
nn_init(nn.Conv2d(24, 24, kernel_size=3, stride=1), w_scale=1.0),
nn.ReLU(inplace=True),
nn_init(nn.Conv2d(24, 48, kernel_size=4, stride=2), w_scale=1.0),
nn.ReLU(inplace=True),
)
return encoder
| 32.12
| 89
| 0.652553
|
b9cc7431174769b9f85de4daa2809d1eadcb547e
| 729
|
py
|
Python
|
algorithms/bfs-dfs/BFS.py
|
1tanwang/graph-and-graph-algorithms
|
0bf7d54630eaec56ccef9c224bbe86d4a2a63ebe
|
[
"Apache-2.0"
] | null | null | null |
algorithms/bfs-dfs/BFS.py
|
1tanwang/graph-and-graph-algorithms
|
0bf7d54630eaec56ccef9c224bbe86d4a2a63ebe
|
[
"Apache-2.0"
] | null | null | null |
algorithms/bfs-dfs/BFS.py
|
1tanwang/graph-and-graph-algorithms
|
0bf7d54630eaec56ccef9c224bbe86d4a2a63ebe
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from graph import Graph, read_adj_list
def bfs_shortest_path(graph, start, goal):
queue = [(start, [start])]
while queue:
(vertex, path) = queue.pop(0)
for next in set(graph.get_neighbors(vertex)) - set(path):
if next == goal:
return path + [next]
else:
queue.append((next, path + [next]))
return "no path from {} to {}".format(start, goal)
def bfs_connected_components(graph, start):
queue, visited = [start], set()
while queue:
vertex = stack.pop(0)
if vertex not in visited:
visited.add(vertex)
stack += set(graph.get_neighbors(vertex)) - visited
return visited
| 27
| 65
| 0.580247
|
fd470b3868a4c1df77263111f04d0a1b0f6edb3e
| 669
|
py
|
Python
|
aiogram/types/animation.py
|
victorusachev/aiogram
|
9571669ca4b06165031d8f9830130f3c638b60d8
|
[
"MIT"
] | 3
|
2020-12-06T16:55:53.000Z
|
2021-11-19T19:25:57.000Z
|
aiogram/types/animation.py
|
Kylmakalle/aiogram
|
550c41e1752aa08c493d7cb4ec5fec402d8e849c
|
[
"MIT"
] | 1
|
2019-10-18T19:33:20.000Z
|
2019-10-18T19:33:20.000Z
|
aiogram/types/animation.py
|
Kylmakalle/aiogram
|
550c41e1752aa08c493d7cb4ec5fec402d8e849c
|
[
"MIT"
] | 2
|
2020-12-30T09:51:30.000Z
|
2021-11-10T16:50:28.000Z
|
from . import base
from . import fields
from . import mixins
from .photo_size import PhotoSize
class Animation(base.TelegramObject, mixins.Downloadable):
"""
You can provide an animation for your game so that it looks stylish in chats
(check out Lumberjack for an example).
This object represents an animation file to be displayed in the message containing a game.
https://core.telegram.org/bots/api#animation
"""
file_id: base.String = fields.Field()
thumb: PhotoSize = fields.Field(base=PhotoSize)
file_name: base.String = fields.Field()
mime_type: base.String = fields.Field()
file_size: base.Integer = fields.Field()
| 31.857143
| 94
| 0.724963
|
f9af4dd11f586d2ee77855be8be05bfd044b05d3
| 1,314
|
py
|
Python
|
app/migrations/0011_notifica_notifica_vista.py
|
mapoetto/group2_CTFLab
|
5b492ce46875ea37a57701686897bd9613e2dd13
|
[
"MIT"
] | 1
|
2021-10-15T14:37:33.000Z
|
2021-10-15T14:37:33.000Z
|
app/migrations/0011_notifica_notifica_vista.py
|
mapoetto/group2_CTFLab
|
5b492ce46875ea37a57701686897bd9613e2dd13
|
[
"MIT"
] | null | null | null |
app/migrations/0011_notifica_notifica_vista.py
|
mapoetto/group2_CTFLab
|
5b492ce46875ea37a57701686897bd9613e2dd13
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.15 on 2020-09-02 09:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0010_auto_20200901_0951'),
]
operations = [
migrations.CreateModel(
name='Notifica',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('testo', models.CharField(max_length=120)),
('link', models.CharField(max_length=220)),
('destinatario', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Notifica_vista',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('stato', models.CharField(max_length=120)),
('notifica_id', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='notifica_id', to='app.Notifica')),
('user_id', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='user_id', to='app.User')),
],
),
]
| 39.818182
| 180
| 0.606545
|
a73be8d0f3ce4711660c1034776079c4d23aa2dd
| 16,516
|
py
|
Python
|
merlion/models/automl/autosarima.py
|
jimgoo/Merlion
|
2239f4ba6fc4fc08b3f88be842908851ee17ddbf
|
[
"BSD-3-Clause"
] | 1
|
2022-01-16T01:18:31.000Z
|
2022-01-16T01:18:31.000Z
|
merlion/models/automl/autosarima.py
|
jimgoo/Merlion
|
2239f4ba6fc4fc08b3f88be842908851ee17ddbf
|
[
"BSD-3-Clause"
] | null | null | null |
merlion/models/automl/autosarima.py
|
jimgoo/Merlion
|
2239f4ba6fc4fc08b3f88be842908851ee17ddbf
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Copyright (c) 2021 salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
import logging
import warnings
from collections import Iterator
from typing import Tuple, Any, Optional
import numpy as np
from merlion.models.automl.forecasting_layer_base import ForecasterAutoMLBase
from merlion.models.forecast.base import ForecasterBase
from merlion.models.forecast.sarima import SarimaConfig, Sarima
from merlion.transform.resample import TemporalResample
from merlion.utils import TimeSeries, autosarima_utils, UnivariateTimeSeries
from copy import deepcopy
logger = logging.getLogger(__name__)
class AutoSarimaConfig(SarimaConfig):
"""
Configuration class for `AutoSarima`.
"""
_default_transform = TemporalResample()
def __init__(
self,
max_forecast_steps: int = None,
target_seq_index: int = None,
order=("auto", "auto", "auto"),
seasonal_order=("auto", "auto", "auto", "auto"),
periodicity_strategy: str = "max",
maxiter: int = None,
max_k: int = 100,
max_dur: float = 3600,
approximation: bool = None,
approx_iter: int = None,
**kwargs,
):
"""
For order and seasonal_order, 'auto' indicates automatically select the parameter.
Now autosarima support automatically select differencing order, length of the
seasonality cycle, seasonal differencing order, and the rest of AR, MA, seasonal AR
and seasonal MA parameters. Note that automatic selection of AR, MA, seasonal AR
and seasonal MA parameters are implemented in a coupled way. Only when all these
parameters are specified it will not trigger the automatic selection.
:param max_forecast_steps: Max number of steps we aim to forecast
:param target_seq_index: The index of the univariate (amongst all
univariates in a general multivariate time series) whose value we
would like to forecast.
:param order: Order is (p, d, q) for an ARIMA(p, d, q) process. d must
be an integer indicating the integration order of the process, while
p and q must be integers indicating the AR and MA orders (so that
all lags up to those orders are included).
:param seasonal_order: Seasonal order is (P, D, Q, S) for seasonal ARIMA
process, where s is the length of the seasonality cycle (e.g. s=24
for 24 hours on hourly granularity). P, D, Q are as for ARIMA.
:param periodicity_strategy: selection strategy when detecting multiple
periods. 'min' signifies to select the smallest period, while 'max' signifies to select
the largest period
:param maxiter: The maximum number of iterations to perform
:param max_k: Maximum number of models considered in the stepwise search
:param max_dur: Maximum training time considered in the stepwise search
:param approximation: Whether to use ``approx_iter`` iterations (instead
of ``maxiter``) to speed up computation. If ``None``, we use
approximation mode when the training data is too long (>150), or when
the length off the period is too high (``periodicity > 12``).
:param approx_iter: The number of iterations to perform in approximation mode
"""
super().__init__(max_forecast_steps=max_forecast_steps, target_seq_index=target_seq_index, **kwargs)
self.order = order
self.seasonal_order = seasonal_order
self.periodicity_strategy = periodicity_strategy
self.maxiter = maxiter
self.max_k = max_k
self.max_dur = max_dur
self.approximation = approximation
self.approx_iter = approx_iter
class AutoSarima(ForecasterAutoMLBase):
config_class = AutoSarimaConfig
def __init__(self, model: ForecasterBase = None, **kwargs):
if model is None:
model = {}
if isinstance(model, dict):
model = Sarima(AutoSarimaConfig.from_dict({**model, **kwargs}))
super().__init__(model)
def _generate_sarima_parameters(self, train_data: TimeSeries) -> dict:
y = train_data.univariates[self.target_name].np_values
X = None
order = list(self.config.order)
seasonal_order = list(self.config.seasonal_order)
approximation = self.config.approximation
maxiter = self.config.maxiter
approx_iter = self.config.approx_iter
max_k = self.config.max_k
max_dur = self.config.max_dur
# These should be set in config
periodicity_strategy = "min"
stationary = False
seasonal_test = "seas"
method = "lbfgs"
test = "kpss"
stepwise = True
max_d = 2
start_p = 2
max_p = 5
start_q = 2
max_q = 5
max_D = 1
start_P = 1
max_P = 2
start_Q = 1
max_Q = 2
relative_improve = 0
trend = None
information_criterion = "aic"
n_samples = y.shape[0]
if n_samples <= 3:
information_criterion = "aic"
# check y
if y.ndim > 1:
raise ValueError("auto_sarima can only handle univariate time series")
if any(np.isnan(y)):
raise ValueError("there exists missing values in observed time series")
# detect seasonality
m = seasonal_order[-1]
if not isinstance(m, (int, float)):
m = 1
warnings.warn(
"Set periodicity to 1, use the SeasonalityLayer()" "wrapper to automatically detect seasonality."
)
# adjust max p,q,P,Q start p,q,P,Q
max_p = int(min(max_p, np.floor(n_samples / 3)))
max_q = int(min(max_q, np.floor(n_samples / 3)))
max_P = int(min(max_P, np.floor(n_samples / 3 / m))) if m != 1 else 0
max_Q = int(min(max_Q, np.floor(n_samples / 3 / m))) if m != 1 else 0
start_p = min(start_p, max_p)
start_q = min(start_q, max_q)
start_P = min(start_P, max_Q)
start_Q = min(start_Q, max_Q)
# set the seasonal differencing order with statistical test
D = seasonal_order[1] if seasonal_order[1] != "auto" else None
D = 0 if m == 1 else D
xx = y.copy()
if stationary:
D = 0
elif D is None:
D = autosarima_utils.nsdiffs(xx, m=m, max_D=max_D, test=seasonal_test)
if D > 0:
dx = autosarima_utils.diff(xx, differences=D, lag=m)
if dx.shape[0] == 0:
D = D - 1
dx = autosarima_utils.diff(xx, differences=D, lag=m) if D > 0 else xx
logger.info(f"Seasonal difference order is {str(D)}")
# set the differencing order by estimating the number of orders
# it would take in order to make the time series stationary
d = order[1] if order[1] != "auto" else autosarima_utils.ndiffs(dx, alpha=0.05, max_d=max_d, test=test)
if stationary:
d = 0
if d > 0:
dx = autosarima_utils.diff(dx, differences=d, lag=1)
logger.info(f"Difference order is {str(d)}")
# pqPQ is an indicator about whether need to automatically select
# AR, MA, seasonal AR and seasonal MA parameters
pqPQ = None
if order[0] != "auto" and order[2] != "auto" and seasonal_order[0] != "auto" and seasonal_order[2] != "auto":
pqPQ = True
# automatically detect whether to use approximation method and the periodicity
if approximation is None:
approximation = (y.shape[0] > 150) or (m > 12)
# check the size of y
n_samples = y.shape[0]
if n_samples <= 3:
information_criterion = "aic"
if m > 1:
if max_P > 0:
max_p = min(max_p, m - 1)
if max_Q > 0:
max_q = min(max_q, m - 1)
if (d + D) in (0, 1):
trend = "c"
if n_samples < 10:
start_p = min(start_p, 1)
start_q = min(start_q, 1)
start_P = start_Q = 0
# seed p, q, P, Q vals
p = min(start_p, max_p)
q = min(start_q, max_q)
P = min(start_P, max_P)
Q = min(start_Q, max_Q)
refititer = maxiter
return_dict = dict(
y=y,
X=X,
p=p,
d=d,
q=q,
P=P,
D=D,
Q=Q,
m=m,
dx=dx,
pqPQ=pqPQ,
max_p=max_p,
max_d=max_d,
max_q=max_q,
max_P=max_P,
max_D=max_D,
max_Q=max_Q,
trend=trend,
method=method,
maxiter=maxiter,
information_criterion=information_criterion,
relative_improve=relative_improve,
approximation=approximation,
max_k=max_k,
max_dur=max_dur,
approx_iter=approx_iter,
refititer=refititer,
stepwise=stepwise,
order=order,
seasonal_order=seasonal_order,
)
return return_dict
def generate_theta(self, train_data: TimeSeries) -> Iterator:
"""
generate [action, theta]. action is an indicator for stepwise seach (stepwsie) of
p, q, P, Q, trend parameters or use a predefined parameter combination (pqPQ)
theta is a list of parameter combination [order, seasonal_order, trend]
"""
val_dict = self._generate_sarima_parameters(train_data)
y = val_dict["y"]
pqPQ = val_dict["pqPQ"]
order = val_dict["order"]
seasonal_order = val_dict["seasonal_order"]
d = val_dict["d"]
D = val_dict["D"]
m = val_dict["m"]
dx = val_dict["dx"]
stepwise = val_dict["stepwise"]
action = None
trend = None
# input time-series is completely constant
if np.max(y) == np.min(y):
order = [0, 0, 0]
seasonal_order = [0, 0, 0, 0]
elif pqPQ is not None:
action = "pqPQ"
order[1] = d
seasonal_order[1] = D
seasonal_order[3] = m
if m == 1:
seasonal_order = [0, 0, 0, m]
elif np.max(dx) == np.min(dx):
order = [0, 0, 0]
seasonal_order = (0, 0, 0, m) if m == 1 else (0, D, 0, m)
elif stepwise:
action = "stepwise"
return iter([{"action": action, "theta": [order, seasonal_order, trend]}])
def evaluate_theta(
self, thetas: Iterator, train_data: TimeSeries, train_config=None
) -> Tuple[Any, Optional[ForecasterBase], Optional[Tuple[TimeSeries, Optional[TimeSeries]]]]:
theta_value = thetas.__next__()
# preprocess
train_config = train_config if train_config is not None else {}
if "enforce_stationarity" not in train_config:
train_config["enforce_stationarity"] = False
if "enforce_invertibility" not in train_config:
train_config["enforce_invertibility"] = False
val_dict = self._generate_sarima_parameters(train_data)
y = val_dict["y"]
X = val_dict["X"]
p = val_dict["p"]
d = val_dict["d"]
q = val_dict["q"]
P = val_dict["P"]
D = val_dict["D"]
Q = val_dict["Q"]
m = val_dict["m"]
max_p = val_dict["max_p"]
max_q = val_dict["max_q"]
max_P = val_dict["max_P"]
max_Q = val_dict["max_Q"]
trend = val_dict["trend"]
method = val_dict["method"]
maxiter = val_dict["maxiter"]
information_criterion = val_dict["information_criterion"]
approximation = val_dict["approximation"]
refititer = val_dict["refititer"]
relative_improve = val_dict["relative_improve"]
max_k = val_dict["max_k"]
max_dur = val_dict["max_dur"]
approx_iter = val_dict["approx_iter"]
# use zero model to automatically detect the optimal maxiter
if maxiter is None:
maxiter = autosarima_utils.detect_maxiter_sarima_model(
y=y, X=X, d=d, D=D, m=m, method=method, information_criterion=information_criterion
)
if theta_value["action"] == "stepwise":
refititer = maxiter
if approximation:
if approx_iter is None:
maxiter = max(int(maxiter / 5), 1)
else:
maxiter = approx_iter
logger.info(f"Fitting models using approximations(approx_iter is {str(maxiter)}) to speed things up")
# stepwise search
stepwise_search = autosarima_utils._StepwiseFitWrapper(
y=y,
X=X,
p=p,
d=d,
q=q,
P=P,
D=D,
Q=Q,
m=m,
max_p=max_p,
max_q=max_q,
max_P=max_P,
max_Q=max_Q,
trend=trend,
method=method,
maxiter=maxiter,
information_criterion=information_criterion,
relative_improve=relative_improve,
max_k=max_k,
max_dur=max_dur,
**train_config,
)
filtered_models_ics = stepwise_search.stepwisesearch()
if approximation:
logger.debug(f"Now re-fitting the best model(s) without approximations...")
if len(filtered_models_ics) > 0:
best_model_theta = filtered_models_ics[0][1]
best_model_fit = autosarima_utils._refit_sarima_model(
filtered_models_ics[0][0],
filtered_models_ics[0][2],
method,
maxiter,
refititer,
information_criterion,
)
logger.info(f"Best model: {autosarima_utils._model_name(best_model_fit.model)}")
else:
raise ValueError("Could not successfully fit a viable SARIMA model")
else:
if len(filtered_models_ics) > 0:
best_model_fit = filtered_models_ics[0][0]
best_model_theta = filtered_models_ics[0][1]
logger.info(f"Best model: {autosarima_utils._model_name(best_model_fit.model)}")
else:
raise ValueError("Could not successfully fit a viable SARIMA model")
elif theta_value["action"] == "pqPQ":
best_model_theta = theta_value["theta"]
order = theta_value["theta"][0]
seasonal_order = theta_value["theta"][1]
trend = theta_value["theta"][2]
if seasonal_order[3] == 1:
seasonal_order = [0, 0, 0, 0]
best_model_fit, fit_time, ic = autosarima_utils._fit_sarima_model(
y=y,
X=X,
order=order,
seasonal_order=seasonal_order,
trend=trend,
method=method,
maxiter=maxiter,
information_criterion=information_criterion,
**train_config,
)
else:
return theta_value, None, None
model = deepcopy(self.model)
model.reset()
self.set_theta(model, best_model_theta, train_data)
model.train_pre_process(train_data, require_even_sampling=True, require_univariate=False)
model.model = best_model_fit
name = model.target_name
train_data = train_data.univariates[name].to_pd()
times = train_data.index
yhat = model.model.fittedvalues
err = [np.sqrt(model.model.params[-1])] * len(train_data)
train_result = (
UnivariateTimeSeries(times, yhat, name).to_ts(),
UnivariateTimeSeries(times, err, f"{name}_err").to_ts(),
)
return best_model_theta, model, train_result
def set_theta(self, model, theta, train_data: TimeSeries = None):
order, seasonal_order, trend = theta
model.config.order = order
model.config.seasonal_order = seasonal_order
| 37.79405
| 117
| 0.577077
|
65eb35b294406a618f86393a1fed6ce207422438
| 38,442
|
py
|
Python
|
dns/zone.py
|
mbakke/dnspython
|
2d6630c50663e9cd55d7485642bac2eda02e96a0
|
[
"ISC"
] | null | null | null |
dns/zone.py
|
mbakke/dnspython
|
2d6630c50663e9cd55d7485642bac2eda02e96a0
|
[
"ISC"
] | null | null | null |
dns/zone.py
|
mbakke/dnspython
|
2d6630c50663e9cd55d7485642bac2eda02e96a0
|
[
"ISC"
] | null | null | null |
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Zones."""
import contextlib
import hashlib
import io
import os
import struct
import dns.exception
import dns.name
import dns.node
import dns.rdataclass
import dns.rdatatype
import dns.rdata
import dns.rdtypes.ANY.SOA
import dns.rdtypes.ANY.ZONEMD
import dns.rrset
import dns.tokenizer
import dns.transaction
import dns.ttl
import dns.grange
import dns.zonefile
class BadZone(dns.exception.DNSException):
"""The DNS zone is malformed."""
class NoSOA(BadZone):
"""The DNS zone has no SOA RR at its origin."""
class NoNS(BadZone):
"""The DNS zone has no NS RRset at its origin."""
class UnknownOrigin(BadZone):
"""The DNS zone's origin is unknown."""
class UnsupportedDigestScheme(dns.exception.DNSException):
"""The zone digest's scheme is unsupported."""
class UnsupportedDigestHashAlgorithm(dns.exception.DNSException):
"""The zone digest's origin is unsupported."""
class NoDigest(dns.exception.DNSException):
"""The DNS zone has no ZONEMD RRset at its origin."""
class DigestVerificationFailure(dns.exception.DNSException):
"""The ZONEMD digest failed to verify."""
class DigestScheme(dns.enum.IntEnum):
"""ZONEMD Scheme"""
SIMPLE = 1
@classmethod
def _maximum(cls):
return 255
class DigestHashAlgorithm(dns.enum.IntEnum):
"""ZONEMD Hash Algorithm"""
SHA384 = 1
SHA512 = 2
@classmethod
def _maximum(cls):
return 255
_digest_hashers = {
DigestHashAlgorithm.SHA384: hashlib.sha384,
DigestHashAlgorithm.SHA512 : hashlib.sha512
}
class Zone(dns.transaction.TransactionManager):
"""A DNS zone.
A ``Zone`` is a mapping from names to nodes. The zone object may be
treated like a Python dictionary, e.g. ``zone[name]`` will retrieve
the node associated with that name. The *name* may be a
``dns.name.Name object``, or it may be a string. In either case,
if the name is relative it is treated as relative to the origin of
the zone.
"""
node_factory = dns.node.Node
__slots__ = ['rdclass', 'origin', 'nodes', 'relativize']
def __init__(self, origin, rdclass=dns.rdataclass.IN, relativize=True):
"""Initialize a zone object.
*origin* is the origin of the zone. It may be a ``dns.name.Name``,
a ``str``, or ``None``. If ``None``, then the zone's origin will
be set by the first ``$ORIGIN`` line in a zone file.
*rdclass*, an ``int``, the zone's rdata class; the default is class IN.
*relativize*, a ``bool``, determine's whether domain names are
relativized to the zone's origin. The default is ``True``.
"""
if origin is not None:
if isinstance(origin, str):
origin = dns.name.from_text(origin)
elif not isinstance(origin, dns.name.Name):
raise ValueError("origin parameter must be convertible to a "
"DNS name")
if not origin.is_absolute():
raise ValueError("origin parameter must be an absolute name")
self.origin = origin
self.rdclass = rdclass
self.nodes = {}
self.relativize = relativize
def __eq__(self, other):
"""Two zones are equal if they have the same origin, class, and
nodes.
Returns a ``bool``.
"""
if not isinstance(other, Zone):
return False
if self.rdclass != other.rdclass or \
self.origin != other.origin or \
self.nodes != other.nodes:
return False
return True
def __ne__(self, other):
"""Are two zones not equal?
Returns a ``bool``.
"""
return not self.__eq__(other)
def _validate_name(self, name):
if isinstance(name, str):
name = dns.name.from_text(name, None)
elif not isinstance(name, dns.name.Name):
raise KeyError("name parameter must be convertible to a DNS name")
if name.is_absolute():
if not name.is_subdomain(self.origin):
raise KeyError(
"name parameter must be a subdomain of the zone origin")
if self.relativize:
name = name.relativize(self.origin)
return name
def __getitem__(self, key):
key = self._validate_name(key)
return self.nodes[key]
def __setitem__(self, key, value):
key = self._validate_name(key)
self.nodes[key] = value
def __delitem__(self, key):
key = self._validate_name(key)
del self.nodes[key]
def __iter__(self):
return self.nodes.__iter__()
def keys(self):
return self.nodes.keys() # pylint: disable=dict-keys-not-iterating
def values(self):
return self.nodes.values() # pylint: disable=dict-values-not-iterating
def items(self):
return self.nodes.items() # pylint: disable=dict-items-not-iterating
def get(self, key):
key = self._validate_name(key)
return self.nodes.get(key)
def __contains__(self, key):
key = self._validate_name(key)
return key in self.nodes
def find_node(self, name, create=False):
"""Find a node in the zone, possibly creating it.
*name*: the name of the node to find.
The value may be a ``dns.name.Name`` or a ``str``. If absolute, the
name must be a subdomain of the zone's origin. If ``zone.relativize``
is ``True``, then the name will be relativized.
*create*, a ``bool``. If true, the node will be created if it does
not exist.
Raises ``KeyError`` if the name is not known and create was
not specified, or if the name was not a subdomain of the origin.
Returns a ``dns.node.Node``.
"""
name = self._validate_name(name)
node = self.nodes.get(name)
if node is None:
if not create:
raise KeyError
node = self.node_factory()
self.nodes[name] = node
return node
def get_node(self, name, create=False):
"""Get a node in the zone, possibly creating it.
This method is like ``find_node()``, except it returns None instead
of raising an exception if the node does not exist and creation
has not been requested.
*name*: the name of the node to find.
The value may be a ``dns.name.Name`` or a ``str``. If absolute, the
name must be a subdomain of the zone's origin. If ``zone.relativize``
is ``True``, then the name will be relativized.
*create*, a ``bool``. If true, the node will be created if it does
not exist.
Raises ``KeyError`` if the name is not known and create was
not specified, or if the name was not a subdomain of the origin.
Returns a ``dns.node.Node`` or ``None``.
"""
try:
node = self.find_node(name, create)
except KeyError:
node = None
return node
def delete_node(self, name):
"""Delete the specified node if it exists.
*name*: the name of the node to find.
The value may be a ``dns.name.Name`` or a ``str``. If absolute, the
name must be a subdomain of the zone's origin. If ``zone.relativize``
is ``True``, then the name will be relativized.
It is not an error if the node does not exist.
"""
name = self._validate_name(name)
if name in self.nodes:
del self.nodes[name]
def find_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE,
create=False):
"""Look for an rdataset with the specified name and type in the zone,
and return an rdataset encapsulating it.
The rdataset returned is not a copy; changes to it will change
the zone.
KeyError is raised if the name or type are not found.
*name*: the name of the node to find.
The value may be a ``dns.name.Name`` or a ``str``. If absolute, the
name must be a subdomain of the zone's origin. If ``zone.relativize``
is ``True``, then the name will be relativized.
*rdtype*, an ``int`` or ``str``, the rdata type desired.
*covers*, an ``int`` or ``str`` or ``None``, the covered type.
Usually this value is ``dns.rdatatype.NONE``, but if the
rdtype is ``dns.rdatatype.SIG`` or ``dns.rdatatype.RRSIG``,
then the covers value will be the rdata type the SIG/RRSIG
covers. The library treats the SIG and RRSIG types as if they
were a family of types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA).
This makes RRSIGs much easier to work with than if RRSIGs
covering different rdata types were aggregated into a single
RRSIG rdataset.
*create*, a ``bool``. If true, the node will be created if it does
not exist.
Raises ``KeyError`` if the name is not known and create was
not specified, or if the name was not a subdomain of the origin.
Returns a ``dns.rdataset.Rdataset``.
"""
name = self._validate_name(name)
rdtype = dns.rdatatype.RdataType.make(rdtype)
if covers is not None:
covers = dns.rdatatype.RdataType.make(covers)
node = self.find_node(name, create)
return node.find_rdataset(self.rdclass, rdtype, covers, create)
def get_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE,
create=False):
"""Look for an rdataset with the specified name and type in the zone.
This method is like ``find_rdataset()``, except it returns None instead
of raising an exception if the rdataset does not exist and creation
has not been requested.
The rdataset returned is not a copy; changes to it will change
the zone.
*name*: the name of the node to find.
The value may be a ``dns.name.Name`` or a ``str``. If absolute, the
name must be a subdomain of the zone's origin. If ``zone.relativize``
is ``True``, then the name will be relativized.
*rdtype*, an ``int`` or ``str``, the rdata type desired.
*covers*, an ``int`` or ``str`` or ``None``, the covered type.
Usually this value is ``dns.rdatatype.NONE``, but if the
rdtype is ``dns.rdatatype.SIG`` or ``dns.rdatatype.RRSIG``,
then the covers value will be the rdata type the SIG/RRSIG
covers. The library treats the SIG and RRSIG types as if they
were a family of types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA).
This makes RRSIGs much easier to work with than if RRSIGs
covering different rdata types were aggregated into a single
RRSIG rdataset.
*create*, a ``bool``. If true, the node will be created if it does
not exist.
Raises ``KeyError`` if the name is not known and create was
not specified, or if the name was not a subdomain of the origin.
Returns a ``dns.rdataset.Rdataset`` or ``None``.
"""
try:
rdataset = self.find_rdataset(name, rdtype, covers, create)
except KeyError:
rdataset = None
return rdataset
def delete_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE):
"""Delete the rdataset matching *rdtype* and *covers*, if it
exists at the node specified by *name*.
It is not an error if the node does not exist, or if there is no
matching rdataset at the node.
If the node has no rdatasets after the deletion, it will itself
be deleted.
*name*: the name of the node to find.
The value may be a ``dns.name.Name`` or a ``str``. If absolute, the
name must be a subdomain of the zone's origin. If ``zone.relativize``
is ``True``, then the name will be relativized.
*rdtype*, an ``int`` or ``str``, the rdata type desired.
*covers*, an ``int`` or ``str`` or ``None``, the covered type.
Usually this value is ``dns.rdatatype.NONE``, but if the
rdtype is ``dns.rdatatype.SIG`` or ``dns.rdatatype.RRSIG``,
then the covers value will be the rdata type the SIG/RRSIG
covers. The library treats the SIG and RRSIG types as if they
were a family of types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA).
This makes RRSIGs much easier to work with than if RRSIGs
covering different rdata types were aggregated into a single
RRSIG rdataset.
"""
name = self._validate_name(name)
rdtype = dns.rdatatype.RdataType.make(rdtype)
if covers is not None:
covers = dns.rdatatype.RdataType.make(covers)
node = self.get_node(name)
if node is not None:
node.delete_rdataset(self.rdclass, rdtype, covers)
if len(node) == 0:
self.delete_node(name)
def replace_rdataset(self, name, replacement):
"""Replace an rdataset at name.
It is not an error if there is no rdataset matching I{replacement}.
Ownership of the *replacement* object is transferred to the zone;
in other words, this method does not store a copy of *replacement*
at the node, it stores *replacement* itself.
If the node does not exist, it is created.
*name*: the name of the node to find.
The value may be a ``dns.name.Name`` or a ``str``. If absolute, the
name must be a subdomain of the zone's origin. If ``zone.relativize``
is ``True``, then the name will be relativized.
*replacement*, a ``dns.rdataset.Rdataset``, the replacement rdataset.
"""
if replacement.rdclass != self.rdclass:
raise ValueError('replacement.rdclass != zone.rdclass')
node = self.find_node(name, True)
node.replace_rdataset(replacement)
def find_rrset(self, name, rdtype, covers=dns.rdatatype.NONE):
"""Look for an rdataset with the specified name and type in the zone,
and return an RRset encapsulating it.
This method is less efficient than the similar
``find_rdataset()`` because it creates an RRset instead of
returning the matching rdataset. It may be more convenient
for some uses since it returns an object which binds the owner
name to the rdataset.
This method may not be used to create new nodes or rdatasets;
use ``find_rdataset`` instead.
*name*: the name of the node to find.
The value may be a ``dns.name.Name`` or a ``str``. If absolute, the
name must be a subdomain of the zone's origin. If ``zone.relativize``
is ``True``, then the name will be relativized.
*rdtype*, an ``int`` or ``str``, the rdata type desired.
*covers*, an ``int`` or ``str`` or ``None``, the covered type.
Usually this value is ``dns.rdatatype.NONE``, but if the
rdtype is ``dns.rdatatype.SIG`` or ``dns.rdatatype.RRSIG``,
then the covers value will be the rdata type the SIG/RRSIG
covers. The library treats the SIG and RRSIG types as if they
were a family of types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA).
This makes RRSIGs much easier to work with than if RRSIGs
covering different rdata types were aggregated into a single
RRSIG rdataset.
*create*, a ``bool``. If true, the node will be created if it does
not exist.
Raises ``KeyError`` if the name is not known and create was
not specified, or if the name was not a subdomain of the origin.
Returns a ``dns.rrset.RRset`` or ``None``.
"""
name = self._validate_name(name)
rdtype = dns.rdatatype.RdataType.make(rdtype)
if covers is not None:
covers = dns.rdatatype.RdataType.make(covers)
rdataset = self.nodes[name].find_rdataset(self.rdclass, rdtype, covers)
rrset = dns.rrset.RRset(name, self.rdclass, rdtype, covers)
rrset.update(rdataset)
return rrset
def get_rrset(self, name, rdtype, covers=dns.rdatatype.NONE):
"""Look for an rdataset with the specified name and type in the zone,
and return an RRset encapsulating it.
This method is less efficient than the similar ``get_rdataset()``
because it creates an RRset instead of returning the matching
rdataset. It may be more convenient for some uses since it
returns an object which binds the owner name to the rdataset.
This method may not be used to create new nodes or rdatasets;
use ``get_rdataset()`` instead.
*name*: the name of the node to find.
The value may be a ``dns.name.Name`` or a ``str``. If absolute, the
name must be a subdomain of the zone's origin. If ``zone.relativize``
is ``True``, then the name will be relativized.
*rdtype*, an ``int`` or ``str``, the rdata type desired.
*covers*, an ``int`` or ``str`` or ``None``, the covered type.
Usually this value is ``dns.rdatatype.NONE``, but if the
rdtype is ``dns.rdatatype.SIG`` or ``dns.rdatatype.RRSIG``,
then the covers value will be the rdata type the SIG/RRSIG
covers. The library treats the SIG and RRSIG types as if they
were a family of types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA).
This makes RRSIGs much easier to work with than if RRSIGs
covering different rdata types were aggregated into a single
RRSIG rdataset.
*create*, a ``bool``. If true, the node will be created if it does
not exist.
Raises ``KeyError`` if the name is not known and create was
not specified, or if the name was not a subdomain of the origin.
Returns a ``dns.rrset.RRset`` or ``None``.
"""
try:
rrset = self.find_rrset(name, rdtype, covers)
except KeyError:
rrset = None
return rrset
def iterate_rdatasets(self, rdtype=dns.rdatatype.ANY,
covers=dns.rdatatype.NONE):
"""Return a generator which yields (name, rdataset) tuples for
all rdatasets in the zone which have the specified *rdtype*
and *covers*. If *rdtype* is ``dns.rdatatype.ANY``, the default,
then all rdatasets will be matched.
*rdtype*, an ``int`` or ``str``, the rdata type desired.
*covers*, an ``int`` or ``str`` or ``None``, the covered type.
Usually this value is ``dns.rdatatype.NONE``, but if the
rdtype is ``dns.rdatatype.SIG`` or ``dns.rdatatype.RRSIG``,
then the covers value will be the rdata type the SIG/RRSIG
covers. The library treats the SIG and RRSIG types as if they
were a family of types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA).
This makes RRSIGs much easier to work with than if RRSIGs
covering different rdata types were aggregated into a single
RRSIG rdataset.
"""
rdtype = dns.rdatatype.RdataType.make(rdtype)
if covers is not None:
covers = dns.rdatatype.RdataType.make(covers)
for (name, node) in self.items():
for rds in node:
if rdtype == dns.rdatatype.ANY or \
(rds.rdtype == rdtype and rds.covers == covers):
yield (name, rds)
def iterate_rdatas(self, rdtype=dns.rdatatype.ANY,
covers=dns.rdatatype.NONE):
"""Return a generator which yields (name, ttl, rdata) tuples for
all rdatas in the zone which have the specified *rdtype*
and *covers*. If *rdtype* is ``dns.rdatatype.ANY``, the default,
then all rdatas will be matched.
*rdtype*, an ``int`` or ``str``, the rdata type desired.
*covers*, an ``int`` or ``str`` or ``None``, the covered type.
Usually this value is ``dns.rdatatype.NONE``, but if the
rdtype is ``dns.rdatatype.SIG`` or ``dns.rdatatype.RRSIG``,
then the covers value will be the rdata type the SIG/RRSIG
covers. The library treats the SIG and RRSIG types as if they
were a family of types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA).
This makes RRSIGs much easier to work with than if RRSIGs
covering different rdata types were aggregated into a single
RRSIG rdataset.
"""
rdtype = dns.rdatatype.RdataType.make(rdtype)
if covers is not None:
covers = dns.rdatatype.RdataType.make(covers)
for (name, node) in self.items():
for rds in node:
if rdtype == dns.rdatatype.ANY or \
(rds.rdtype == rdtype and rds.covers == covers):
for rdata in rds:
yield (name, rds.ttl, rdata)
def to_file(self, f, sorted=True, relativize=True, nl=None,
want_comments=False):
"""Write a zone to a file.
*f*, a file or `str`. If *f* is a string, it is treated
as the name of a file to open.
*sorted*, a ``bool``. If True, the default, then the file
will be written with the names sorted in DNSSEC order from
least to greatest. Otherwise the names will be written in
whatever order they happen to have in the zone's dictionary.
*relativize*, a ``bool``. If True, the default, then domain
names in the output will be relativized to the zone's origin
if possible.
*nl*, a ``str`` or None. The end of line string. If not
``None``, the output will use the platform's native
end-of-line marker (i.e. LF on POSIX, CRLF on Windows).
*want_comments*, a ``bool``. If ``True``, emit end-of-line comments
as part of writing the file. If ``False``, the default, do not
emit them.
"""
with contextlib.ExitStack() as stack:
if isinstance(f, str):
f = stack.enter_context(open(f, 'wb'))
# must be in this way, f.encoding may contain None, or even
# attribute may not be there
file_enc = getattr(f, 'encoding', None)
if file_enc is None:
file_enc = 'utf-8'
if nl is None:
# binary mode, '\n' is not enough
nl_b = os.linesep.encode(file_enc)
nl = '\n'
elif isinstance(nl, str):
nl_b = nl.encode(file_enc)
else:
nl_b = nl
nl = nl.decode()
if sorted:
names = list(self.keys())
names.sort()
else:
names = self.keys()
for n in names:
l = self[n].to_text(n, origin=self.origin,
relativize=relativize,
want_comments=want_comments)
l_b = l.encode(file_enc)
try:
f.write(l_b)
f.write(nl_b)
except TypeError: # textual mode
f.write(l)
f.write(nl)
def to_text(self, sorted=True, relativize=True, nl=None,
want_comments=False):
"""Return a zone's text as though it were written to a file.
*sorted*, a ``bool``. If True, the default, then the file
will be written with the names sorted in DNSSEC order from
least to greatest. Otherwise the names will be written in
whatever order they happen to have in the zone's dictionary.
*relativize*, a ``bool``. If True, the default, then domain
names in the output will be relativized to the zone's origin
if possible.
*nl*, a ``str`` or None. The end of line string. If not
``None``, the output will use the platform's native
end-of-line marker (i.e. LF on POSIX, CRLF on Windows).
*want_comments*, a ``bool``. If ``True``, emit end-of-line comments
as part of writing the file. If ``False``, the default, do not
emit them.
Returns a ``str``.
"""
temp_buffer = io.StringIO()
self.to_file(temp_buffer, sorted, relativize, nl, want_comments)
return_value = temp_buffer.getvalue()
temp_buffer.close()
return return_value
def check_origin(self):
"""Do some simple checking of the zone's origin.
Raises ``dns.zone.NoSOA`` if there is no SOA RRset.
Raises ``dns.zone.NoNS`` if there is no NS RRset.
Raises ``KeyError`` if there is no origin node.
"""
if self.relativize:
name = dns.name.empty
else:
name = self.origin
if self.get_rdataset(name, dns.rdatatype.SOA) is None:
raise NoSOA
if self.get_rdataset(name, dns.rdatatype.NS) is None:
raise NoNS
def _compute_digest(self, hash_algorithm, scheme=DigestScheme.SIMPLE):
hashinfo = _digest_hashers.get(hash_algorithm)
if not hashinfo:
raise UnsupportedDigestHashAlgorithm
if scheme != DigestScheme.SIMPLE:
raise UnsupportedDigestScheme
if self.relativize:
origin_name = dns.name.empty
else:
origin_name = self.origin
hasher = hashinfo()
for (name, node) in sorted(self.items()):
rrnamebuf = name.to_digestable(self.origin)
for rdataset in sorted(node,
key=lambda rds: (rds.rdtype, rds.covers)):
if name == origin_name and \
dns.rdatatype.ZONEMD in (rdataset.rdtype, rdataset.covers):
continue
rrfixed = struct.pack('!HHI', rdataset.rdtype,
rdataset.rdclass, rdataset.ttl)
for rr in sorted(rdataset):
rrdata = rr.to_digestable(self.origin)
rrlen = struct.pack('!H', len(rrdata))
hasher.update(rrnamebuf + rrfixed + rrlen + rrdata)
return hasher.digest()
def compute_digest(self, hash_algorithm, scheme=DigestScheme.SIMPLE):
if self.relativize:
origin_name = dns.name.empty
else:
origin_name = self.origin
serial = self.get_rdataset(origin_name, dns.rdatatype.SOA)[0].serial
digest = self._compute_digest(hash_algorithm, scheme)
return dns.rdtypes.ANY.ZONEMD.ZONEMD(self.rdclass,
dns.rdatatype.ZONEMD,
serial, scheme, hash_algorithm,
digest)
def verify_digest(self, zonemd=None):
if zonemd:
digests = [zonemd]
else:
digests = self.get_rdataset(self.origin, dns.rdatatype.ZONEMD)
if digests is None:
raise NoDigest
for digest in digests:
try:
computed = self._compute_digest(digest.hash_algorithm,
digest.scheme)
if computed == digest.digest:
return
except Exception:
pass
raise DigestVerificationFailure
# TransactionManager methods
def reader(self):
return Transaction(self, False, True)
def writer(self, replacement=False):
return Transaction(self, replacement, False)
def origin_information(self):
if self.relativize:
effective = dns.name.empty
else:
effective = self.origin
return (self.origin, self.relativize, effective)
def get_class(self):
return self.rdclass
class Transaction(dns.transaction.Transaction):
_deleted_rdataset = dns.rdataset.Rdataset(dns.rdataclass.ANY,
dns.rdatatype.ANY)
def __init__(self, zone, replacement, read_only):
super().__init__(zone, replacement, read_only)
self.rdatasets = {}
@property
def zone(self):
return self.manager
def _get_rdataset(self, name, rdtype, covers):
rdataset = self.rdatasets.get((name, rdtype, covers))
if rdataset is self._deleted_rdataset:
return None
elif rdataset is None:
rdataset = self.zone.get_rdataset(name, rdtype, covers)
return rdataset
def _put_rdataset(self, name, rdataset):
assert not self.read_only
self.zone._validate_name(name)
self.rdatasets[(name, rdataset.rdtype, rdataset.covers)] = rdataset
def _delete_name(self, name):
assert not self.read_only
# First remove any changes involving the name
remove = []
for key in self.rdatasets:
if key[0] == name:
remove.append(key)
if len(remove) > 0:
for key in remove:
del self.rdatasets[key]
# Next add deletion records for any rdatasets matching the
# name in the zone
node = self.zone.get_node(name)
if node is not None:
for rdataset in node.rdatasets:
self.rdatasets[(name, rdataset.rdtype, rdataset.covers)] = \
self._deleted_rdataset
def _delete_rdataset(self, name, rdtype, covers):
assert not self.read_only
try:
del self.rdatasets[(name, rdtype, covers)]
except KeyError:
pass
rdataset = self.zone.get_rdataset(name, rdtype, covers)
if rdataset is not None:
self.rdatasets[(name, rdataset.rdtype, rdataset.covers)] = \
self._deleted_rdataset
def _name_exists(self, name):
for key, rdataset in self.rdatasets.items():
if key[0] == name:
if rdataset != self._deleted_rdataset:
return True
else:
return None
self.zone._validate_name(name)
if self.zone.get_node(name):
return True
return False
def _changed(self):
if self.read_only:
return False
else:
return len(self.rdatasets) > 0
def _end_transaction(self, commit):
if commit and self._changed():
for (name, rdtype, covers), rdataset in \
self.rdatasets.items():
if rdataset is self._deleted_rdataset:
self.zone.delete_rdataset(name, rdtype, covers)
else:
self.zone.replace_rdataset(name, rdataset)
def _set_origin(self, origin):
if self.zone.origin is None:
self.zone.origin = origin
def _iterate_rdatasets(self):
# Expensive but simple! Use a versioned zone for efficient txn
# iteration.
rdatasets = {}
for (name, rdataset) in self.zone.iterate_rdatasets():
rdatasets[(name, rdataset.rdtype, rdataset.covers)] = rdataset
rdatasets.update(self.rdatasets)
for (name, _, _), rdataset in rdatasets.items():
yield (name, rdataset)
def from_text(text, origin=None, rdclass=dns.rdataclass.IN,
relativize=True, zone_factory=Zone, filename=None,
allow_include=False, check_origin=True, idna_codec=None):
"""Build a zone object from a zone file format string.
*text*, a ``str``, the zone file format input.
*origin*, a ``dns.name.Name``, a ``str``, or ``None``. The origin
of the zone; if not specified, the first ``$ORIGIN`` statement in the
zone file will determine the origin of the zone.
*rdclass*, an ``int``, the zone's rdata class; the default is class IN.
*relativize*, a ``bool``, determine's whether domain names are
relativized to the zone's origin. The default is ``True``.
*zone_factory*, the zone factory to use or ``None``. If ``None``, then
``dns.zone.Zone`` will be used. The value may be any class or callable
that returns a subclass of ``dns.zone.Zone``.
*filename*, a ``str`` or ``None``, the filename to emit when
describing where an error occurred; the default is ``'<string>'``.
*allow_include*, a ``bool``. If ``True``, the default, then ``$INCLUDE``
directives are permitted. If ``False``, then encoutering a ``$INCLUDE``
will raise a ``SyntaxError`` exception.
*check_origin*, a ``bool``. If ``True``, the default, then sanity
checks of the origin node will be made by calling the zone's
``check_origin()`` method.
*idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA
encoder/decoder. If ``None``, the default IDNA 2003 encoder/decoder
is used.
Raises ``dns.zone.NoSOA`` if there is no SOA RRset.
Raises ``dns.zone.NoNS`` if there is no NS RRset.
Raises ``KeyError`` if there is no origin node.
Returns a subclass of ``dns.zone.Zone``.
"""
# 'text' can also be a file, but we don't publish that fact
# since it's an implementation detail. The official file
# interface is from_file().
if filename is None:
filename = '<string>'
zone = zone_factory(origin, rdclass, relativize=relativize)
with zone.writer(True) as txn:
tok = dns.tokenizer.Tokenizer(text, filename, idna_codec=idna_codec)
reader = dns.zonefile.Reader(tok, rdclass, txn,
allow_include=allow_include)
try:
reader.read()
except dns.zonefile.UnknownOrigin:
# for backwards compatibility
raise dns.zone.UnknownOrigin
# Now that we're done reading, do some basic checking of the zone.
if check_origin:
zone.check_origin()
return zone
def from_file(f, origin=None, rdclass=dns.rdataclass.IN,
relativize=True, zone_factory=Zone, filename=None,
allow_include=True, check_origin=True):
"""Read a zone file and build a zone object.
*f*, a file or ``str``. If *f* is a string, it is treated
as the name of a file to open.
*origin*, a ``dns.name.Name``, a ``str``, or ``None``. The origin
of the zone; if not specified, the first ``$ORIGIN`` statement in the
zone file will determine the origin of the zone.
*rdclass*, an ``int``, the zone's rdata class; the default is class IN.
*relativize*, a ``bool``, determine's whether domain names are
relativized to the zone's origin. The default is ``True``.
*zone_factory*, the zone factory to use or ``None``. If ``None``, then
``dns.zone.Zone`` will be used. The value may be any class or callable
that returns a subclass of ``dns.zone.Zone``.
*filename*, a ``str`` or ``None``, the filename to emit when
describing where an error occurred; the default is ``'<string>'``.
*allow_include*, a ``bool``. If ``True``, the default, then ``$INCLUDE``
directives are permitted. If ``False``, then encoutering a ``$INCLUDE``
will raise a ``SyntaxError`` exception.
*check_origin*, a ``bool``. If ``True``, the default, then sanity
checks of the origin node will be made by calling the zone's
``check_origin()`` method.
*idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA
encoder/decoder. If ``None``, the default IDNA 2003 encoder/decoder
is used.
Raises ``dns.zone.NoSOA`` if there is no SOA RRset.
Raises ``dns.zone.NoNS`` if there is no NS RRset.
Raises ``KeyError`` if there is no origin node.
Returns a subclass of ``dns.zone.Zone``.
"""
with contextlib.ExitStack() as stack:
if isinstance(f, str):
if filename is None:
filename = f
f = stack.enter_context(open(f))
return from_text(f, origin, rdclass, relativize, zone_factory,
filename, allow_include, check_origin)
def from_xfr(xfr, zone_factory=Zone, relativize=True, check_origin=True):
"""Convert the output of a zone transfer generator into a zone object.
*xfr*, a generator of ``dns.message.Message`` objects, typically
``dns.query.xfr()``.
*relativize*, a ``bool``, determine's whether domain names are
relativized to the zone's origin. The default is ``True``.
It is essential that the relativize setting matches the one specified
to the generator.
*check_origin*, a ``bool``. If ``True``, the default, then sanity
checks of the origin node will be made by calling the zone's
``check_origin()`` method.
Raises ``dns.zone.NoSOA`` if there is no SOA RRset.
Raises ``dns.zone.NoNS`` if there is no NS RRset.
Raises ``KeyError`` if there is no origin node.
Returns a subclass of ``dns.zone.Zone``.
"""
z = None
for r in xfr:
if z is None:
if relativize:
origin = r.origin
else:
origin = r.answer[0].name
rdclass = r.answer[0].rdclass
z = zone_factory(origin, rdclass, relativize=relativize)
for rrset in r.answer:
znode = z.nodes.get(rrset.name)
if not znode:
znode = z.node_factory()
z.nodes[rrset.name] = znode
zrds = znode.find_rdataset(rrset.rdclass, rrset.rdtype,
rrset.covers, True)
zrds.update_ttl(rrset.ttl)
for rd in rrset:
zrds.add(rd)
if check_origin:
z.check_origin()
return z
| 37.17795
| 79
| 0.606654
|
0d0ce87fa8ac7eaa3d9abf7849044d59d1524871
| 6,889
|
py
|
Python
|
tests/h/views/admin/organizations_test.py
|
bibliotechie/h
|
16e275f79ef7d1086971bd30ef403501c6b93beb
|
[
"BSD-2-Clause"
] | null | null | null |
tests/h/views/admin/organizations_test.py
|
bibliotechie/h
|
16e275f79ef7d1086971bd30ef403501c6b93beb
|
[
"BSD-2-Clause"
] | null | null | null |
tests/h/views/admin/organizations_test.py
|
bibliotechie/h
|
16e275f79ef7d1086971bd30ef403501c6b93beb
|
[
"BSD-2-Clause"
] | null | null | null |
from unittest.mock import Mock
import pytest
from h_matchers import Any
from h.models import Organization
from h.traversal import OrganizationContext
from h.views.admin.organizations import (
OrganizationCreateController,
OrganizationEditController,
index,
)
class FakeForm:
def set_appstruct(self, appstruct):
self.appstruct = appstruct
def render(self):
return self.appstruct
class TestIndex:
@pytest.mark.parametrize(
"query,expected_orgs",
[
# With no query, all orgs are returned, including the default
# "Hypothesis" organization.
(None, ["BioPub", "ChemPub", "Hypothesis"]),
# With a query, only matching orgs are returned.
("bio", ["BioPub"]),
],
)
def test_it_returns_filtered_orgs(
self, orgs, pyramid_request, query, expected_orgs
):
if query:
pyramid_request.GET["q"] = query
response = index(None, pyramid_request)
filtered_orgs = sorted([org.name for org in response["results"]])
assert filtered_orgs == expected_orgs
@pytest.fixture
def orgs(self, factories):
return [
factories.Organization(name="BioPub"),
factories.Organization(name="ChemPub"),
]
@pytest.mark.usefixtures("routes")
class TestOrganizationCreateController:
def test_get_sets_default_values(self, pyramid_request):
controller = OrganizationCreateController(pyramid_request)
response = controller.get()
assert response["form"] == {"authority": pyramid_request.default_authority}
def test_post_creates_org(self, pyramid_request, handle_form_submission):
def call_on_success(request, form, on_success, on_failure):
return on_success(
{
"name": "New organization",
"authority": "example.organization",
"logo": "<svg>a logo</svg>",
}
)
handle_form_submission.side_effect = call_on_success
controller = OrganizationCreateController(pyramid_request)
controller.post()
org = (
pyramid_request.db.query(Organization)
.filter_by(name="New organization")
.one()
)
assert org.authority == "example.organization"
assert org.logo == "<svg>a logo</svg>"
def test_post_redirects_to_list_view(
self, pyramid_request, handle_form_submission, matchers
):
def call_on_success(request, form, on_success, on_failure):
return on_success(
{
"name": "New organization",
"authority": "example.organization",
"logo": "<svg>a logo</svg>",
}
)
handle_form_submission.side_effect = call_on_success
controller = OrganizationCreateController(pyramid_request)
response = controller.post()
list_url = pyramid_request.route_url("admin.organizations")
assert response == matchers.Redirect302To(list_url)
@pytest.mark.usefixtures("routes")
class TestOrganizationEditController:
def test_read(self, get_controller, pyramid_request, organization):
response = get_controller().read()
expected_delete_url = pyramid_request.route_url(
"admin.organizations_delete", pubid=organization.pubid
)
assert response == {
"form": self._expected_form(organization),
"delete_url": expected_delete_url,
}
def test_logo_is_empty_if_not_set(self, get_controller, organization):
organization.logo = None
response = get_controller().read()
assert response["form"]["logo"] == ""
def test_read_does_not_show_delete_button_for_default_org(
self, get_controller, organization
):
organization.pubid = Organization.DEFAULT_PUBID
response = get_controller().read()
assert response["delete_url"] is None
def test_update_saves_org(
self, get_controller, organization, handle_form_submission
):
def call_on_success(request, form, on_success, on_failure):
return on_success(
{
"name": "Updated name",
"authority": organization.authority,
"logo": "<svg>new logo</svg>",
}
)
handle_form_submission.side_effect = call_on_success
response = get_controller().update()
assert organization.name == "Updated name"
assert organization.logo == "<svg>new logo</svg>"
assert response["form"] == self._expected_form(organization)
def test_delete(self, get_controller, organization, pyramid_request, matchers):
response = get_controller().delete()
assert organization in pyramid_request.db.deleted
list_url = pyramid_request.route_path("admin.organizations")
assert response == matchers.Redirect302To(list_url)
def test_delete_fails_if_org_has_groups(
self, get_controller, organization, pyramid_request, factories
):
factories.Group(name="Test", organization=organization)
response = get_controller().delete()
assert organization not in pyramid_request.db.deleted
assert pyramid_request.response.status_int == 400
pyramid_request.session.flash.assert_called_with(
Any.string.matching(".*Cannot delete.*1 groups"), "error"
)
assert response["form"] == self._expected_form(organization)
def _expected_form(self, organization):
return {
"authority": organization.authority,
"logo": organization.logo,
"name": organization.name,
}
@pytest.fixture
def get_controller(self, organization, pyramid_request):
def get_controller():
organization_context = OrganizationContext(
request=pyramid_request, organization=organization
)
return OrganizationEditController(organization_context, pyramid_request)
return get_controller
@pytest.fixture
def organization(self, factories):
return factories.Organization(logo="<svg></svg>")
@pytest.fixture
def pyramid_request(pyramid_request):
pyramid_request.session = Mock(spec_set=["flash", "get_csrf_token"])
pyramid_request.create_form.return_value = FakeForm()
return pyramid_request
@pytest.fixture
def handle_form_submission(patch):
return patch("h.views.admin.groups.form.handle_form_submission")
@pytest.fixture
def routes(pyramid_config):
pyramid_config.add_route("admin.organizations", "/admin/organizations")
pyramid_config.add_route(
"admin.organizations_delete", "/admin/organizations/delete/{pubid}"
)
| 31.893519
| 84
| 0.647119
|
df1dc2a9c474994e706aefd2e38fd8d99926282d
| 777
|
py
|
Python
|
ws2122-lspm/Lib/site-packages/pm4py/algo/analysis/marking_equation/__init__.py
|
Malekhy/ws2122-lspm
|
e4dc8b801d12f862b8ef536a0f125f346f085a00
|
[
"MIT"
] | 1
|
2022-01-19T04:02:46.000Z
|
2022-01-19T04:02:46.000Z
|
ws2122-lspm/Lib/site-packages/pm4py/algo/analysis/marking_equation/__init__.py
|
Malekhy/ws2122-lspm
|
e4dc8b801d12f862b8ef536a0f125f346f085a00
|
[
"MIT"
] | 1
|
2021-11-19T07:21:48.000Z
|
2021-11-19T07:21:48.000Z
|
ws2122-lspm/Lib/site-packages/pm4py/algo/analysis/marking_equation/__init__.py
|
Malekhy/ws2122-lspm
|
e4dc8b801d12f862b8ef536a0f125f346f085a00
|
[
"MIT"
] | 1
|
2022-01-14T17:15:38.000Z
|
2022-01-14T17:15:38.000Z
|
'''
This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
PM4Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PM4Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PM4Py. If not, see <https://www.gnu.org/licenses/>.
'''
from pm4py.algo.analysis.marking_equation import algorithm
| 43.166667
| 76
| 0.740026
|
dcdf07dba04956cdb01779f370cc4d37efdedb90
| 1,473
|
py
|
Python
|
Chapter13/Example13-1.py
|
liloganle/Reinforcement-Learning
|
29ffb74a1c8e506c544245c9aff37e958e503f26
|
[
"MIT"
] | 1
|
2018-08-27T10:09:06.000Z
|
2018-08-27T10:09:06.000Z
|
Chapter13/Example13-1.py
|
liloganle/Reinforcement-Learning
|
29ffb74a1c8e506c544245c9aff37e958e503f26
|
[
"MIT"
] | null | null | null |
Chapter13/Example13-1.py
|
liloganle/Reinforcement-Learning
|
29ffb74a1c8e506c544245c9aff37e958e503f26
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
def true_state_value(probability):
"""
compute the true value of the first state according to dynamics
:param probability: the probability of right action
:return: the true value
V(S1)=p*1*(-1+V(S2))+(1-p)*1*(-1+V(S1))
V(S2)=p*1*(-1+V(S1))+(1-p)*1*(-1+V(S3))
V(S3)=p*1*(-1+0)+(1-p)*1*(-1+V(S2))
p is the probability of right action
===> V(S1) = 2*(p-2)/(p*(1-p))
"""
return 2*(probability-2)/(probability*(1-probability))
if __name__ == "__main__":
epsilon = 0.1 # the epsilon-greedy
probability_range = np.linspace(0.03, 0.98, 100) # set the probability of right action
state_value = true_state_value(probability_range) # get the true state value
idx_max = np.argmax(state_value) # get the index of the maximum of state value
plt.figure(1)
plt.plot(probability_range, state_value, "k")
plt.plot(probability_range[idx_max], state_value[idx_max], "ro", label=r"optimal stochastic policy")
plt.plot(epsilon/2, true_state_value(epsilon/2), "bo", label=r"$\epsilon$-greedy left")
plt.plot(1-epsilon/2, true_state_value(1-epsilon/2), "go", label=r"$\epsilon$-greedy right")
plt.ylim(-100)
plt.xlabel("Probability of right action")
plt.ylabel("True Value of First State")
plt.legend()
plt.savefig("./images/Example13-1.png")
plt.show()
plt.close()
| 35.071429
| 104
| 0.638153
|
da6700b3cd59dccf7d4018e54d8275e7a115d795
| 7,449
|
py
|
Python
|
consumers/venv/lib/python3.7/site-packages/faust/transport/producer.py
|
spencerpomme/Public-Transit-Status-with-Apache-Kafka
|
2c85d7daadf4614fe7ce2eabcd13ff87236b1c7e
|
[
"MIT"
] | null | null | null |
consumers/venv/lib/python3.7/site-packages/faust/transport/producer.py
|
spencerpomme/Public-Transit-Status-with-Apache-Kafka
|
2c85d7daadf4614fe7ce2eabcd13ff87236b1c7e
|
[
"MIT"
] | null | null | null |
consumers/venv/lib/python3.7/site-packages/faust/transport/producer.py
|
spencerpomme/Public-Transit-Status-with-Apache-Kafka
|
2c85d7daadf4614fe7ce2eabcd13ff87236b1c7e
|
[
"MIT"
] | null | null | null |
"""Producer.
The Producer is responsible for:
- Holds reference to the transport that created it
- ... and the app via ``self.transport.app``.
- Sending messages.
"""
import asyncio
from asyncio import QueueEmpty
from typing import Any, Awaitable, Mapping, Optional, cast
from mode import Seconds, Service
from faust.types import AppT, HeadersArg
from faust.types.tuples import FutureMessage, RecordMetadata, TP
from faust.types.transports import ProducerT, TransportT
__all__ = ['Producer']
class ProducerBuffer(Service):
max_messages: int = 100
pending: asyncio.Queue
def __post_init__(self) -> None:
self.pending = asyncio.Queue()
def put(self, fut: FutureMessage) -> None:
"""Add message to buffer.
The message will be eventually produced, you can await
the future to wait for that to happen.
"""
self.pending.put_nowait(fut)
async def on_stop(self) -> None:
await self.flush()
async def flush(self) -> None:
"""Flush all messages (draining the buffer)."""
get_pending = self.pending.get_nowait
send_pending = self._send_pending
if self.size:
while True:
try:
msg = get_pending()
except QueueEmpty:
break
else:
await send_pending(msg)
async def flush_atmost(self, n: int) -> int:
"""Flush at most ``n`` messages."""
get_pending = self.pending.get_nowait
send_pending = self._send_pending
if self.size:
for i in range(n):
try:
msg = get_pending()
except QueueEmpty:
return i
else:
await send_pending(msg)
return 0
async def _send_pending(self, fut: FutureMessage) -> None:
await fut.message.channel.publish_message(fut, wait=False)
async def wait_until_ebb(self) -> None:
"""Wait until buffer is of an acceptable size.
Modifying a table key is using the Python dictionary API,
and as ``__getitem__`` is synchronous we have to add
pending messages to a buffer.
The ``__getitem__`` method cannot drain the buffer as doing
so requires trampolining into the event loop.
To solve this, we have the conductor wait until the buffer
is of an acceptable size before resuming stream processing flow.
"""
if self.size > self.max_messages:
await self.flush_atmost(self.max_messages)
@Service.task
async def _handle_pending(self) -> None:
get_pending = self.pending.get
send_pending = self._send_pending
while not self.should_stop:
msg = await get_pending()
await send_pending(msg)
@property
def size(self) -> int:
"""Current buffer size (messages waiting to be produced)."""
queue_items = self.pending._queue # type: ignore
queue_items = cast(list, queue_items)
return len(queue_items)
class Producer(Service, ProducerT):
"""Base Producer."""
app: AppT
_api_version: str
def __init__(self, transport: TransportT,
loop: asyncio.AbstractEventLoop = None,
**kwargs: Any) -> None:
self.transport = transport
self.app = self.transport.app
conf = self.transport.app.conf
self.client_id = conf.broker_client_id
self.linger_ms = conf.producer_linger_ms
self.max_batch_size = conf.producer_max_batch_size
self.acks = conf.producer_acks
self.max_request_size = conf.producer_max_request_size
self.compression_type = conf.producer_compression_type
self.request_timeout = conf.producer_request_timeout
self.ssl_context = conf.ssl_context
self.credentials = conf.broker_credentials
self.partitioner = conf.producer_partitioner
api_version = self._api_version = conf.producer_api_version
assert api_version is not None
super().__init__(loop=loop or self.transport.loop, **kwargs)
self.buffer = ProducerBuffer(loop=self.loop, beacon=self.beacon)
async def on_start(self) -> None:
await self.add_runtime_dependency(self.buffer)
async def send(self, topic: str, key: Optional[bytes],
value: Optional[bytes],
partition: Optional[int],
timestamp: Optional[float],
headers: Optional[HeadersArg],
*,
transactional_id: str = None) -> Awaitable[RecordMetadata]:
"""Schedule message to be sent by producer."""
raise NotImplementedError()
def send_soon(self, fut: FutureMessage) -> None:
self.buffer.put(fut)
async def send_and_wait(self, topic: str, key: Optional[bytes],
value: Optional[bytes],
partition: Optional[int],
timestamp: Optional[float],
headers: Optional[HeadersArg],
*,
transactional_id: str = None) -> RecordMetadata:
"""Send message and wait for it to be transmitted."""
raise NotImplementedError()
async def flush(self) -> None:
"""Flush all in-flight messages."""
# XXX subclasses must call self.buffer.flush() here.
...
async def create_topic(self,
topic: str,
partitions: int,
replication: int,
*,
config: Mapping[str, Any] = None,
timeout: Seconds = 1000.0,
retention: Seconds = None,
compacting: bool = None,
deleting: bool = None,
ensure_created: bool = False) -> None:
"""Create/declare topic on server."""
raise NotImplementedError()
def key_partition(self, topic: str, key: bytes) -> TP:
"""Hash key to determine partition."""
raise NotImplementedError()
async def begin_transaction(self, transactional_id: str) -> None:
"""Begin transaction by id."""
raise NotImplementedError()
async def commit_transaction(self, transactional_id: str) -> None:
"""Commit transaction by id."""
raise NotImplementedError()
async def abort_transaction(self, transactional_id: str) -> None:
"""Abort and rollback transaction by id."""
raise NotImplementedError()
async def stop_transaction(self, transactional_id: str) -> None:
"""Stop transaction by id."""
raise NotImplementedError()
async def maybe_begin_transaction(self, transactional_id: str) -> None:
"""Begin transaction by id, if not already started."""
raise NotImplementedError()
async def commit_transactions(
self,
tid_to_offset_map: Mapping[str, Mapping[TP, int]],
group_id: str,
start_new_transaction: bool = True) -> None:
"""Commit transactions."""
raise NotImplementedError()
def supports_headers(self) -> bool:
"""Return :const:`True` if headers are supported by this transport."""
return False
| 34.971831
| 78
| 0.595113
|
17ca26e36bf0e3956bdf1f0aeaba467bf8ba1ccb
| 6,792
|
py
|
Python
|
tests/unit/utils/test_dpb.py
|
urban48/debpackager
|
08efda2ffcb89286d802893c25ada35eed7f432d
|
[
"MIT"
] | 66
|
2016-07-09T15:02:26.000Z
|
2020-06-15T07:01:36.000Z
|
tests/unit/utils/test_dpb.py
|
urban48/debpackager
|
08efda2ffcb89286d802893c25ada35eed7f432d
|
[
"MIT"
] | 2
|
2016-07-10T06:46:45.000Z
|
2021-09-08T19:20:53.000Z
|
tests/unit/utils/test_dpb.py
|
urban48/debpackager
|
08efda2ffcb89286d802893c25ada35eed7f432d
|
[
"MIT"
] | 7
|
2016-07-10T06:17:26.000Z
|
2021-02-27T01:20:18.000Z
|
import os
import tempfile
import shutil
import pytest
from debpackager.utils.debain_package_manager import Dpm
@pytest.mark.unit
class TestDpb(object):
"""
unit tests for debian package builder module
"""
def setup_method(self, method):
""" setup any state tied to the execution of the given method in a
class. setup_method is invoked for every test method of a class.
"""
self.tmpdir = tempfile.mkdtemp()
self.dpb = Dpm(self.tmpdir, 'test-proj',
'1.0.0', self.tmpdir + '/install')
def teardown_method(self, method):
""" teardown any state that was previously setup with a setup_method
call.
"""
if os.path.exists(self.tmpdir):
shutil.rmtree(self.tmpdir)
def test_dh_make(self):
""" Test if method created debian folder with default files"""
assert self.dpb._dh_make() == 0
assert os.path.exists(self.dpb.debian_package_path + '/debian') is True
def test_create_install_file(self):
""" test that install file is created"""
self.dpb._dh_make()
self.dpb._create_install_file()
install_file_path = self.dpb.debian_package_path + \
'/debian/test-proj.install'
assert os.path.exists(install_file_path) is True
install_content = '../{}/* {}\n'.format(os.path.basename(self.tmpdir),
self.tmpdir + '/install')
with open(install_file_path, 'r') as inst_file:
assert install_content in inst_file.read()
def test_add_deb_dependencies(self):
""" make sure deb dependencies are parsed and added properly"""
self.dpb._dh_make()
self.dpb.dependencies = ['deb1>=0.1.2', 'deb2']
self.dpb._add_deb_dependencies()
control_file_path = self.dpb.debian_package_path + '/debian/control'
dependencies_string = 'Depends: ${misc:Depends}, deb1(>=0.1.2), deb2'
with open(control_file_path, 'r') as inst_file:
assert dependencies_string in inst_file.read()
def test_set_exclude(self):
""" verifies that debian excludes are added"""
self.dpb._dh_make()
self.dpb.excludes = ['.pyc', 'doc', 'specs.cfg']
self.dpb._set_exclude()
rules_file_path = self.dpb.debian_package_path + '/debian/rules'
dependencies_string = '\tdh_install -X .pyc -X doc -X specs.cfg'
with open(rules_file_path, 'r') as inst_file:
assert dependencies_string in inst_file.read()
def test_add_maintainer_scripts(self):
""" verify maintainern scripts if exists added to debian"""
os.mkdir(self.tmpdir + '/debian')
with open(self.tmpdir + '/debian/postinst', 'w') as post_file:
post_file.write('echo "im a custom postinst script"')
self.dpb._dh_make()
self.dpb._add_maintainer_scripts()
postinst_file_path = self.dpb.debian_package_path + \
'/debian/test-proj.postinst'
postinst_content = 'echo "im a custom postinst script"'
with open(postinst_file_path, 'r') as inst_file:
assert postinst_content in inst_file.read()
def test_add_maintainer_scripts_no_debian_dir(self):
""" check that maintainer scripts not created, and no errors thrown
if no debian dir exists"""
self.dpb._dh_make()
self.dpb._add_maintainer_scripts()
postinst_file_path = self.dpb.debian_package_path + \
'/debian/test-proj.postinst'
assert os.path.exists(postinst_file_path) is False
def test_add_startup_script_upstart(self):
""" verify that upstart script is added correctly"""
os.mkdir(self.tmpdir + '/debian')
with open(self.tmpdir + '/debian/ad-server.upstart', 'w') as up_file:
up_file.write('echo "im a upstart script"')
self.dpb._dh_make()
self.dpb._add_startup_script()
upstart_file_path = self.dpb.debian_package_path + \
'/debian/ad-server.upstart'
upstart_content = 'echo "im a upstart script"'
with open(upstart_file_path, 'r') as inst_file:
assert upstart_content in inst_file.read()
def test_add_startup_script_initd(self):
""" verify that init.d script is added correctly"""
os.mkdir(self.tmpdir + '/debian')
with open(self.tmpdir + '/debian/ad-server.init', 'w') as init_file:
init_file.write('echo "im a init script"')
self.dpb._dh_make()
self.dpb._add_startup_script()
init_file_path = self.dpb.debian_package_path + \
'/debian/ad-server.init'
init_content = 'echo "im a init script"'
with open(init_file_path, 'r') as inst_file:
assert init_content in inst_file.read()
def test_add_startup_script_no_deb_dir(self):
""" check that no errors thrown if no deb dir exists"""
self.dpb._dh_make()
self.dpb._add_startup_script()
upstart_file_path = self.dpb.debian_package_path + \
'/debian/ad-server.upstart'
assert os.path.exists(upstart_file_path) is False
init_file_path = self.dpb.debian_package_path + \
'/debian/ad-server.init'
assert os.path.exists(init_file_path) is False
def test_add_default_description(self):
""" test that description is added"""
self.dpb._dh_make()
self.dpb._add_description()
control_file_path = self.dpb.debian_package_path + '/debian/control'
description_string = 'Description: test-proj Package'
with open(control_file_path, 'r') as inst_file:
assert description_string in inst_file.read()
def test_add_too_long_description(self):
""" test that too long description is discarded"""
self.dpb._dh_make()
self.dpb.description = 'very long description' * 100
self.dpb._add_description()
control_file_path = self.dpb.debian_package_path + '/debian/control'
description_string = 'Description: test-proj Package'
with open(control_file_path, 'r') as inst_file:
assert description_string in inst_file.read()
def test_custom_description(self):
""" test that custom description is added"""
self.dpb._dh_make()
self.dpb.description = 'custom description'
self.dpb._add_description()
control_file_path = self.dpb.debian_package_path + '/debian/control'
description_string = 'custom description'
with open(control_file_path, 'r') as inst_file:
assert description_string in inst_file.read()
| 36.713514
| 79
| 0.632509
|
9b1fe320ed633b49b2c2634868cafc289eac3b95
| 190
|
py
|
Python
|
tests/test_api.py
|
eoranged/squash
|
9fb088e419ab0b0a16dbde126b12051ea2dc4bab
|
[
"MIT"
] | null | null | null |
tests/test_api.py
|
eoranged/squash
|
9fb088e419ab0b0a16dbde126b12051ea2dc4bab
|
[
"MIT"
] | 13
|
2019-08-06T00:39:44.000Z
|
2019-10-28T21:55:16.000Z
|
tests/test_api.py
|
eoranged/squash
|
9fb088e419ab0b0a16dbde126b12051ea2dc4bab
|
[
"MIT"
] | 1
|
2019-08-21T17:20:22.000Z
|
2019-08-21T17:20:22.000Z
|
async def test_api_hello(test_cli):
resp = await test_cli.get('/api/v1/hello')
assert resp.status == 200
json_data = await resp.json()
assert json_data == {'hello': 'world'}
| 31.666667
| 46
| 0.663158
|
dcb85a555a8e65cb3a8028a5043f5ddd82d2df24
| 243
|
py
|
Python
|
tests/stats/test_rv_base.py
|
EUB-LE/domain-adaptation
|
8e548cf99663b12b9fac322ee1c7668118cb1ade
|
[
"MIT"
] | null | null | null |
tests/stats/test_rv_base.py
|
EUB-LE/domain-adaptation
|
8e548cf99663b12b9fac322ee1c7668118cb1ade
|
[
"MIT"
] | 5
|
2021-05-22T07:58:28.000Z
|
2021-05-23T10:58:37.000Z
|
tests/stats/test_rv_base.py
|
EUB-LE/domain-adaptation
|
8e548cf99663b12b9fac322ee1c7668118cb1ade
|
[
"MIT"
] | null | null | null |
import unittest
from daproperties.stats.rv_base import rv_base
class TestRVBase(unittest.TestCase):
# TODO: Implement
def test_divergence_from_distribution(self):
class rv_subclass(rv_base):
pass
pass
| 18.692308
| 49
| 0.699588
|
6ff4d235b410cecb74e5e0e00ced025dab1d0a2a
| 1,473
|
py
|
Python
|
ha-na-ma-nextstrain/scripts/filter_ha_ma.py
|
blab/cartography
|
36ef6408e7bdb73b59003166ad7725662fd8fbe6
|
[
"MIT"
] | 1
|
2021-11-19T14:23:29.000Z
|
2021-11-19T14:23:29.000Z
|
ha-na-nextstrain/scripts/filter_ha_ma.py
|
blab/cartography
|
36ef6408e7bdb73b59003166ad7725662fd8fbe6
|
[
"MIT"
] | 17
|
2021-07-14T17:44:02.000Z
|
2022-02-11T18:43:46.000Z
|
ha-na-ma-nextstrain/scripts/filter_ha_ma.py
|
blab/cartography
|
36ef6408e7bdb73b59003166ad7725662fd8fbe6
|
[
"MIT"
] | 1
|
2020-04-11T15:23:09.000Z
|
2020-04-11T15:23:09.000Z
|
import argparse
import numpy as np
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import re
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--sequence", nargs=3, help="sequences to filter, the one to filter first and one to filter by second")
parser.add_argument("--output_fasta", nargs=3, help="FASTA files of split genomes")
args = parser.parse_args()
# Index sequences without loading them into memory. This gives us access to
# names of strains in both files that we can cross-check.
sequences_a = SeqIO.index(args.sequence[0], "fasta")
sequences_b = SeqIO.index(args.sequence[1], "fasta")
sequences_c = SeqIO.index(args.sequence[2], "fasta")
# Identify shared sequences between the two sets.
shared_strains = set(sequences_a.keys()) & set(sequences_b.keys() & set(sequences_c.keys()))
print(f"Found {len(shared_strains)} between input sequence files.")
# Write out shared strains for sequence set a.
SeqIO.write(
(sequences_a[strain] for strain in shared_strains),
args.output_fasta[0],
"fasta"
)
# Write out shared strains for sequence set b.
SeqIO.write(
(sequences_b[strain] for strain in shared_strains),
args.output_fasta[1],
"fasta"
)
SeqIO.write(
(sequences_c[strain] for strain in shared_strains),
args.output_fasta[2],
"fasta"
)
| 32.733333
| 127
| 0.68296
|
603f769847f843d4609ce5cbaeb8eec9f635d354
| 10,154
|
py
|
Python
|
examples/BigBoy_refactor/players/MediumQPlayer.py
|
attraylor/poke-env
|
05eb57800c16229ec683762e628aacb0b6dd9cc3
|
[
"MIT"
] | 4
|
2020-09-15T15:24:57.000Z
|
2021-03-02T19:48:24.000Z
|
examples/BigBoy_refactor/players/MediumQPlayer.py
|
attraylor/poke-env
|
05eb57800c16229ec683762e628aacb0b6dd9cc3
|
[
"MIT"
] | 10
|
2021-11-01T10:20:30.000Z
|
2022-03-29T10:27:25.000Z
|
examples/BigBoy_refactor/players/MediumQPlayer.py
|
attraylor/poke-env
|
05eb57800c16229ec683762e628aacb0b6dd9cc3
|
[
"MIT"
] | 1
|
2021-03-08T16:02:46.000Z
|
2021-03-08T16:02:46.000Z
|
from poke_env.player.env_player import (
Gen8EnvSinglePlayer,
)
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
import torch
import torch.optim as optim
from players.BigBoyEncoderPlayer import BigBoyEncoderPlayer
class MediumQPlayer(BigBoyEncoderPlayer):
def __init__(self,
name,
shortname,
team,
battle_format="gen8ou",
log_level = 0,
server_configuration=None,
save_replays=False,
args = {}):
super().__init__(
name = name,
shortname=shortname,
team=team,
battle_format=battle_format,
log_level = log_level,
server_configuration=server_configuration,
save_replays=save_replays)
self.name = name
self.num_batches = args.num_fit_batches #TODO: DUMMY!
self.batch_size = args.fit_batch_size #TODO: DUMMY!
self.rb_beta = args.rb_beta #TODO: DUMMY!
self.gamma = args.gamma #TODO: DUMMY!
self.num_layers = args.num_layers
self.eps_start = args.eps_start
self.eps_end = args.eps_end
self.eps_decay = args.eps_decay#25000
self.learning_rate = args.learning_rate
self.nstep = args.nstep_returns
self.input_dim_size = 301
self.hidden_dim_size = 64
self.output_dim_size = 22
self.policy_net_theta = self.make_model()
self.target_net_theta = self.make_model()
self.target_net_theta.load_state_dict(self.policy_net_theta.state_dict())
self.optimizer_theta = optim.Adam(self.policy_net_theta.parameters(), lr=self.learning_rate)
self.use_pokemon_encoder = True
if self.use_pokemon_encoder == True:
self.pokemon_encoder_size = 32
self.pokemon_encoder_input_size = 19 + 7 + 1
self.pokemon_encoder = []
self.pokemon_encoder.append(nn.Linear(self.pokemon_encoder_input_size, self.pokemon_encoder_size))#self.pokemon_encoder_size)
self.pokemon_encoder.append(nn.ReLU())
self.pokemon_encoder.append(nn.Linear(self.pokemon_encoder_size, self.pokemon_encoder_input_size))#self.pokemon_encoder_size)
self.pokemon_encoder = nn.Sequential(*self.pokemon_encoder)
self.type_embedding = nn.Embedding(19, 19)
self.type_embedding.weight.data = torch.FloatTensor(np.eye(19))
self.type_embedding.weight.requires_grad = False
self.status_embedding = nn.Embedding(7, 7)
self.status_embedding.weight.data = torch.FloatTensor(np.eye(7))
self.status_embedding.weight.requires_grad = False
self.weather_embedding = nn.Embedding(8, 8)
self.weather_embedding.weight.data = torch.FloatTensor(np.eye(8))
self.weather_embedding.weight.requires_grad = False
def make_model(self):
input_layer = nn.Linear(self.input_dim_size, self.hidden_dim_size)
layers = [nn.LayerNorm(self.input_dim_size), input_layer]
layers.append(nn.ReLU())
for i in range(0, self.num_layers):
layers.append(nn.LayerNorm(self.hidden_dim_size))
layers.append(nn.Linear(self.hidden_dim_size, self.hidden_dim_size))
layers.append(nn.ReLU())
last_layer = nn.Linear(self.hidden_dim_size, self.output_dim_size)
last_layer.weight.data.fill_(0)
last_layer.bias.data.fill_(0)
layers.append(last_layer)
return nn.Sequential(*layers)
def forward(self, model, batch):
if len(batch.shape) == 1:
batch_size = 1
batch = batch.unsqueeze(0)
else:
batch_size = batch.shape[0]
features = []
features.append(torch.FloatTensor(batch[:,self.field_to_idx["our_pokemon_1_move_powers"]]))
move_type_ids = self.type_embedding(batch[:,self.field_to_idx["our_pokemon_1_move_type_ids"]].long())
features.append(move_type_ids.reshape(batch_size, move_type_ids.shape[1] * move_type_ids.shape[2]))
features.append(torch.FloatTensor(batch[:,self.field_to_idx["our_pokemon_1_boosts"]]))
for i in range(1, 7):
pokemon_object = []
pokemon_object.append(self.type_embedding(batch[:,self.field_to_idx["our_pokemon_{}_type_ids".format(i)][0]].long()) + self.type_embedding(batch[:,self.field_to_idx["our_pokemon_{}_type_ids".format(i)][1]].long()))
pokemon_object.append(torch.FloatTensor(batch[:,self.field_to_idx["our_pokemon_{}_hp_percentage".format(i)]]))
pokemon_object.append(self.status_embedding(batch[:,self.field_to_idx["our_pokemon_{}_status_id".format(i)]].long()))
pokemon_object = torch.cat(pokemon_object,dim=1)
if self.use_pokemon_encoder == True:
features.append(self.pokemon_encoder(pokemon_object))
else:
features.append(pokemon_object)
features.append(self.type_embedding(batch[:,self.field_to_idx["opponent_pokemon_active_type_ids"][0]].long()) + self.type_embedding(batch[:,self.field_to_idx["opponent_pokemon_active_type_ids"][1]].long()))
features.append(torch.FloatTensor(batch[:,self.field_to_idx["opponent_pokemon_active_boosts"]]))
features.append(torch.FloatTensor(batch[:,self.field_to_idx["opponent_pokemon_active_hp_percentage"]]))
#STATUSED?
features.append(self.status_embedding(batch[:,self.field_to_idx["opponent_pokemon_active_status_id"]].long()))
#WEATHER?
features.append(self.weather_embedding(batch[:,self.field_to_idx["weather"]].long()))
#TERRAIN, GRAVITY, TRICK ROOM?
features.append(torch.FloatTensor(batch[:,self.field_to_idx["fields"]]))
#TAUNTED?
features.append(torch.FloatTensor(batch[:,self.field_to_idx["our_pokemon_1_volatiles"][-3]]).unsqueeze(1))
features.append(torch.FloatTensor(batch[:,self.field_to_idx["opponent_pokemon_active_volatiles"][-3]]).unsqueeze(1))
#ROCKS?
features.append(torch.FloatTensor(batch[:,self.field_to_idx["our_side_conditions"][7]]).unsqueeze(1))
features.append(torch.FloatTensor(batch[:,self.field_to_idx["opponent_side_conditions"][7]]).unsqueeze(1))
#TODO: knock off, move IDs, opponent team backup pokemon, trapped?, uturn
features = torch.cat(features,dim=1)
state_embedding = model(features)
return state_embedding
def fit_target(self):
self.target_net_theta.load_state_dict(self.policy_net_theta.state_dict())
def fit(self, replay_buffer):
num_batches = self.num_batches
batch_loss_theta = 0
for batch_idx in range(0, num_batches):
batch = replay_buffer.sample(self.batch_size, beta = self.rb_beta)
state_batch = torch.FloatTensor(batch["obs"].squeeze(2))
action_batch = torch.LongTensor(batch["act"])
next_state = torch.FloatTensor(batch["next_obs"].squeeze(2))
reward_batch = torch.FloatTensor(batch["rew"])
#print(state_batch.shape, action_batch.shape, next_state.shape, reward_batch.shape)
q_values_theta = self.forward(self.policy_net_theta, state_batch)
q_values_ns_theta = self.forward(self.target_net_theta, next_state)
max_action_indices_theta = q_values_ns_theta.argmax(dim=1) #TODO: doublecheck
# Compute Q(s_t, a) - the model computes Q(s_t), then we select the
# columns of actions taken. These are the actions which would've been taken
# for each batch state according to policy_net
if self.batch_size == 1:
q_values_theta = q_values_theta.unsqueeze(1)
else:
state_action_values_theta = q_values_theta.gather(1, action_batch)
#state_action_values torch.FloatTensor([q_values[i][action_batch[i]] for i in range(q_values.shape[0])])
# Compute V(s_{t+1}) for all next states.
# Expected values of actions for non_final_next_states are computed based
# on the "older" target_net; selecting their best reward with max(1)[0].
# This is merged based on the mask, such that we'll have either the expected
# state value or 0 in case the state was final.
next_state_values_theta = q_values_ns_theta.gather(1, max_action_indices_theta.unsqueeze(1))
# Compute a mask of non-final states and concatenate the batch elements
# (a final state would've been the one after which simulation ended)
done_batch = torch.BoolTensor(batch["done"])
next_state_values_theta[done_batch == True] = 0
# Compute the expected Q values
expected_state_action_values_theta = (next_state_values_theta * (self.gamma ** self.nstep)) + reward_batch
actions = action_batch.float()
loss_theta = F.smooth_l1_loss(state_action_values_theta, expected_state_action_values_theta)
# Optimize the model
self.optimizer_theta.zero_grad()
batch_loss_theta += loss_theta
loss_theta.backward()
for name, param in self.policy_net_theta.named_parameters():
if param.grad is not None:
param.grad.data.clamp_(-1, 1)
self.optimizer_theta.step()
#wandb.log({"loss_theta": batch_loss_theta})
return
def select_action(self, state, action_mask = None, test= False, current_step = 0):
#Epsilon greedy action selection with action mask from environment
with torch.no_grad():
q_values = self.forward(self.policy_net_theta, state)
q_values = q_values.squeeze(0)
assert len(q_values.shape) == 1
nb_actions = q_values.shape[0]
if test == False:
current_eps = self.eps_end + (self.eps_start - self.eps_end) * \
np.exp(-1 * current_step / self.eps_decay)
if action_mask != None:
#Mask out to only actions that are legal within the state space.
#action_mask_neg_infinity = [float("-inf") if action_mask[i] == 0 else 1 for i in range(0, len(action_mask))]
action_mask_neg_infinity = [-1000000 if action_mask[i] == 0 else 0 for i in range(0, len(action_mask))]
action_mask_neg_infinity = torch.autograd.Variable(torch.FloatTensor(action_mask_neg_infinity), requires_grad=False)
legal_actions = [i for i in range(0, len(action_mask)) if action_mask[i] == 1]#np.where(action_mask == 1)
if len(legal_actions) == 0:
print("no actions legal! unclear why this happens-- potentially trapped and disabled? Maybe bug?", action_mask)
return torch.LongTensor([[0]])
if test == False and np.random.uniform() < current_eps:
action = np.random.choice(legal_actions)#np.random.randint(0, nb_actions)
else:
action = np.argmax(q_values + action_mask_neg_infinity)
if test == True:
pass
else: #This shouldnt be called
if test == False and np.random.uniform() < current_eps:
action = np.random.randint(0, nb_actions)
else:
action = np.argmax(q_values)
print("\n\n\nhmmmm\n\n\n")
#q_stuff = q_values + action_mask_neg_infinity
#print("move_actions", q_stuff[0:4])
#print("switch actions", q_stuff[-6:])
return torch.LongTensor([action])
| 39.509728
| 217
| 0.749163
|
88a900407ac4ce72bd69838788d22714a9c77bab
| 27,799
|
py
|
Python
|
mindspore/ops/composite/multitype_ops/_constexpr_utils.py
|
i4oolish/mindspore
|
dac3be31d0f2c0a3516200f47af30980e566601b
|
[
"Apache-2.0"
] | 2
|
2020-08-12T16:14:40.000Z
|
2020-12-04T03:05:57.000Z
|
mindspore/ops/composite/multitype_ops/_constexpr_utils.py
|
dilingsong/mindspore
|
4276050f2494cfbf8682560a1647576f859991e8
|
[
"Apache-2.0"
] | null | null | null |
mindspore/ops/composite/multitype_ops/_constexpr_utils.py
|
dilingsong/mindspore
|
4276050f2494cfbf8682560a1647576f859991e8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""constexpr util"""
from functools import reduce
import numpy as np
from ...primitive import constexpr
from .... import log as logger
from ....common import dtype as mstype
from ....common.tensor import Tensor
from ....ops import _utils as op_utils
ALL_TENSOR = 0
NO_TENSOR = 1
CONTAIN_TENSOR = 2
ALL_SCALAR = 3
INT_ = 0
BOOL_ = 1
UNSUPPORTED_DTYPE = 2
TENSOR_SETITEM = "tensor setitem"
TENSOR_GETITEM = "tensor getitem"
SET_ITEM_BY_ONE_TENSOR = 0
SET_ITEM_BY_TUPLE_OF_TENSOR = 1
@constexpr
def raise_index_error(msg):
raise IndexError(msg)
@constexpr
def raise_type_error(msg):
raise TypeError(msg)
@constexpr
def check_equal(param1, param2, msg="{},{}"):
"""Checks whether the two parameters are equal or not."""
if param1 != param2:
raise ValueError(msg.format(param1, param2))
return param1
@constexpr
def check_ellipsis_shape_size(data_shape, value_shape, data_size, value_size):
"""Checks the shape and size of the sensor and value."""
if data_shape == value_shape or data_size == value_size or value_size == 1:
return True
raise ValueError("The value(shape={}), can not assign to tensor(shape={}).".format(
value_shape, data_shape))
@constexpr
def check_tensor_setitem_index(index, element_type=None):
"""Checks tuple index type of tensor assignment."""
if index is None:
raise IndexError("Tensor's index cannot be None.")
# eg. Tensor[Slice] = u
if isinstance(index, slice):
return True
# eg. Tensor[tuple] = u
if isinstance(index, tuple):
if not index:
raise IndexError("Tensor's index cannot be empty.")
# eg. Tensor[tuple(Slice,...)] = u
for item in index:
if not isinstance(item, (slice, type(...), int)):
raise IndexError(
"Index of type '{}' is not supported yet.".format(type(item)))
return True
# eg. Tensor[Tensor[dtype=bool]] = u
if isinstance(index, mstype.tensor_type):
if element_type is None or element_type != mstype.bool_:
raise TypeError(
"The index of tensor should be a bool type tensor. "
"{} type is not supported yet.".format(element_type))
return True
raise IndexError(
"Index of type '{}' is not supported yet.".format(type(index)))
@constexpr
def is_same_type(inst, type_):
"""
Checks whether an object is an instance of a target type.
Inputs:
inst (mindspore.dtype): Inspected type.
type_ (mindspore.dtype): Target type.
Outputs:
bool, the check result.
"""
return inst == type_
def slice_expand(input_slices, shape):
"""
Converts slice to indices.
Inputs:
slices (Union[Slice, tuple[Slice]]): Slice tuple or slice.
shape (tuple): The shape of a sensor is an integer element tuple.
Outputs:
tuple[list], This is expressed as (begins, ends, strides).
"""
begin = []
end = []
strides = []
index = 0
slices = None
# Slice or tuple(Slice...)
if isinstance(input_slices, slice):
slices = (input_slices,)
elif isinstance(input_slices, (tuple, list)) and input_slices and isinstance(input_slices[0], (slice, type(...))):
is_have_ellipsis = False
for _, element in enumerate(input_slices):
if isinstance(element, type(...)):
is_have_ellipsis = True
break
if is_have_ellipsis:
slices = ellipsis2slice(input_slices, shape)
else:
slices = input_slices
else:
raise IndexError("Tensor's index type is not supported yet.")
for s in slices:
start = 0 if (s.start is None) else s.start
stop = shape[index] if (s.stop is None) else s.stop
step = 1 if (s.step is None) else s.step
begin.append(start)
end.append(stop)
strides.append(step)
index += 1
while index < len(shape):
begin.append(0)
end.append(shape[index])
strides.append(1)
index += 1
return begin, end, strides
def ellipsis2slice(input_, shape):
"""Converts ellipsis to slice."""
input_slice = input_
result = []
if isinstance(input_, type(...)):
input_slice = (input_,)
ell_count = 0
for _, element in enumerate(input_slice):
if not isinstance(element, type(...)):
result.append(element)
continue
ell_count += 1
if ell_count > 1:
raise IndexError("There cannot be more than one ellisis (...) in the index of the tensor, "
"but it is currently {}".format(input_slice))
for _ in range(len(shape) - len(input_slice) + 1):
result.append(slice(None, None, None))
return tuple(result)
@constexpr
def slice2indices(input_slices, shape):
"""
Converts slice to indices.
Inputs:
slices (Union[Slice, tuple[Slice]]): Slice tuple or slice.
shape (tuple): The shape of a tensor is an integer element tuple.
Outputs:
Tensor, the shape is (n, 1).
"""
begin, end, strides = slice_expand(input_slices, shape)
np_r = []
for i, element in enumerate(shape):
s = begin[i] if (begin[i] >= 0) else (element + begin[i])
e = end[i] if (end[i] >= 0) else (element + end[i])
np_r.append(np.r_[s:e:strides[i]])
# Reference: np.ravel_multi_index((np.ix_(np.r_[1:3:1], np.r_[0:4:1], np.r_[4:0:-1])), a.shape)
np_ix = np.ix_(*np_r)
ravel = np.ravel_multi_index(np_ix, shape)
ravel = Tensor(ravel.reshape(-1, 1), dtype=mstype.int32)
return ravel
@constexpr
def check_indices(indices_size, index):
"""Checks indices whether is empty."""
if indices_size < 1:
raise IndexError(
"The tensor's index is unreasonable. index:{}".format(index))
return indices_size
@constexpr
def check_indices_value_size(indices_size, value_size):
"""Checks if the sizes are already matched."""
if value_size < 1:
raise ValueError("The value assigned to tensor cannot be empty.")
if value_size > 1:
if value_size != indices_size:
raise ValueError(
"The value given to tensor does not match the index size,"
" value size:{}, indics size:{}".format(value_size, indices_size))
return value_size
@constexpr
def integer_to_indices(index, shape):
"""Converts int or tuple[int] to indices."""
size = reduce(lambda x, y: x * y, shape)
range_ = np.arange(size).reshape(shape)
value = range_[index]
value = value.reshape(-1, 1)
return Tensor(value, dtype=mstype.int32)
@constexpr
def tuple_element_is_slice(indexs):
"""Judges tuple element type."""
if not indexs:
raise IndexError("Tensor's index cannot be empty.")
if isinstance(indexs, tuple):
for _, ele in enumerate(indexs):
if not isinstance(ele, slice):
return False
return True
return False
@constexpr
def tuple_element_is_int(indexs):
"""Judges tuple element type."""
if not indexs:
raise IndexError("Tensor's index cannot be empty.")
if isinstance(indexs, tuple):
for _, ele in enumerate(indexs):
if not isinstance(ele, int):
return False
return True
return False
@constexpr
def tuple_index_elements_type(types, op_name):
"""Judges the type of all elements of the tuple."""
tensors_number = 0
for ele in types:
if isinstance(ele, mstype.tensor_type):
tensors_number += 1
if tensors_number == len(types):
return ALL_TENSOR
if tensors_number == 0:
return NO_TENSOR
return CONTAIN_TENSOR
@constexpr
def check_value_elements(data_dtype, types):
"""Judges the type of all elements of the tuple."""
tensors_number = 0
scalars_number = 0
for i, ele in enumerate(types):
if isinstance(ele, mstype.tensor_type):
ele_dtype = ele.element_type()
if data_dtype == ele_dtype:
tensors_number += 1
else:
raise TypeError(f"For '{TENSOR_SETITEM}', the data type of {i}th tensor '{ele_dtype}' "
f"in value tuple is not consistent with assigned tensor data type '{data_dtype}'.")
elif mstype.dtype_to_pytype(ele) == mstype.dtype_to_pytype(data_dtype):
scalars_number += 1
else:
raise TypeError(f"For '{TENSOR_SETITEM}', the {i}th element type '{ele}' in "
f"value tuple is not consistent with assigned tensor data type '{data_dtype}'.")
if tensors_number == len(types):
return ALL_TENSOR
if scalars_number == len(types):
return ALL_SCALAR
raise TypeError(
f"For '{TENSOR_SETITEM}', the value does not support scalar and tensor mixing, but got {types}.")
@constexpr
def get_index_tensor_dtype(dtype):
"""Check a tuple of tensor data type."""
if dtype == mstype.int32:
return INT_
if dtype == mstype.bool_:
return BOOL_
raise IndexError(
f"For '{TENSOR_SETITEM}', the index tensor data type '{dtype}' is not supported.")
@constexpr
def check_index_tensors_dtype(dtypes, op_name):
"""Check a tuple of tensor data type."""
for ele in dtypes:
if not ele == mstype.int32:
raise IndexError(f"For '{op_name}', the all index tensor "
f"data types should be mstype.int32, but got {dtypes}.")
return True
@constexpr
def check_index_tensor_dtype(dtype, op_name):
"""Check a tensor data type."""
if dtype == mstype.int32:
return True
raise IndexError(
f"For '{op_name}', the index tensor data type should be mstype.int32, but got {dtype}.")
@constexpr
def check_tensors_dtype_same(data_dtype, value_dtype, op_name):
"""Check tensors data type same."""
if value_dtype == data_dtype:
return True
raise TypeError(f"For '{op_name}', the value data type '{value_dtype}' "
f"is not consistent with assigned tensor data type {data_dtype}.")
@constexpr
def generate_broadcast_shape(shapes, op_name):
"""Generate broadcast shape for a tuple of shape."""
if not shapes:
return ()
broadcast_shape = shapes[0]
for i, shape in enumerate(shapes):
logger.debug(f"Broadcasts the {i}th tensor, the shape is {shape}.")
try:
broadcast_shape = op_utils.get_broadcast_shape(
broadcast_shape, shape, op_name)
except ValueError as ex:
raise IndexError(ex)
return tuple(broadcast_shape)
@constexpr
def check_two_shapes_need_broadcast(shape_x, shape_y):
"""Check two shapes need broadcast."""
error = ValueError(f"For 'tensor setitem with tensor', the value tensor shape "
f"{shape_y} could not broadcast the required updates shape {shape_x}.")
if len(shape_y) > len(shape_x):
raise error
for i in range(-len(shape_y), 0):
if shape_y[i] > shape_x[i]:
raise error
if shape_y[i] < shape_x[i] and shape_y[i] != 1:
raise error
if shape_y == shape_x:
return False
return True
@constexpr
def compute_multiples(origin_shape, broadcast_shape):
"""Compute multiples between origin shape with broadcast shape."""
len_gap = len(broadcast_shape) - len(origin_shape)
return broadcast_shape[0:len_gap] + tuple(map(lambda x, y: x // y, broadcast_shape[len_gap:], origin_shape))
@constexpr
def compute_new_shape(origin_shape, indexes_shapes_info):
"""Compute new shape between origin shape with final shape."""
new_shape = []
for i in indexes_shapes_info:
if i == origin_shape:
new_shape.extend(origin_shape)
else:
new_shape.append(1)
return tuple(new_shape)
@constexpr
def convert_ellipsis_to_tensors(slice_number,
ellipsis_occupied_dims,
final_shape,
indexes_shapes_info,
op_name):
"""Convert an ellipsis to a list of tensor."""
tensor_list = []
dims_dealt_count = 0
while dims_dealt_count < ellipsis_occupied_dims:
shape = []
slice_count = 0
array = None
for ele in indexes_shapes_info:
if isinstance(ele, list):
if slice_count == slice_number:
array = np.array(ele, np.int32)
shape.append(len(ele))
else:
shape.append(1)
slice_count += 1
if isinstance(ele, tuple):
shape.extend([1] * len(ele))
if array is None:
raise ValueError(
f"For '{op_name}', generate tensors from ellipsis failed.")
array = np.reshape(array, shape)
reps = compute_multiples(shape, final_shape)
tensor = Tensor(np.tile(array, reps))
tensor_list.append(tensor)
slice_number += 1
dims_dealt_count += 1
return tensor_list
@constexpr
def convert_slice_to_tensor(slice_number, final_shape, indexes_shapes_info, op_name):
"""Convert a slice to a tensor."""
shape = []
count = 0
array = None
for ele in indexes_shapes_info:
if isinstance(ele, list):
if count == slice_number:
array = np.array(ele, np.int32)
shape.append(len(ele))
else:
# When the slice is not the slice looking for, the shape is filled with 1.
shape.append(1)
count += 1
elif isinstance(ele, tuple):
shape.extend([1] * len(ele))
else:
shape.append(1)
if array is None:
raise ValueError(
f"For '{op_name}', generate tensor from 'slice' failed.")
array = np.reshape(array, shape)
reps = compute_multiples(shape, final_shape)
tensor = Tensor(np.tile(array, reps))
return tensor
@constexpr
def check_shapes_same(value_shapes, op_name):
"""Check if the shapes in the tuple are consistent."""
for i, shape in enumerate(value_shapes):
if shape != value_shapes[0]:
raise ValueError(f"For '{op_name}', the {i}th tensor shape in "
f"value tuple is not same as the first tensor shape.")
return True
@constexpr
def convert_scalar_to_tensor(data_shape, data_dtype, indices_shape, value, op_type):
"""Convert a scalar to a tensor."""
if op_type == SET_ITEM_BY_ONE_TENSOR:
updates_shape = indices_shape + data_shape[1:]
else:
updates_shape = indices_shape[:-1] + data_shape[indices_shape[-1]:]
if isinstance(value, mstype.dtype_to_pytype(data_dtype)):
return Tensor(np.full(updates_shape, value), dtype=data_dtype)
raise TypeError(f"For '{TENSOR_SETITEM}', the value type '{value.__class__.__name__}'"
f" is not consistent with the assigned tensor data type {data_dtype}.")
@constexpr
def convert_tuple_of_scalar_to_tensor(data_shape, data_dtype, index_shape, value, op_type):
"""Convert a tuple of scalar to a tensor."""
updates_shape = generate_updates_shape(data_shape, index_shape, op_type)
if len(value) != updates_shape[-1]:
raise ValueError(f"For '{TENSOR_SETITEM}', the number of elements : {len(value)} "
f"in the updates tuple does not meet the requirements: {updates_shape[-1]}.")
array = np.array(value, dtype=mstype.dtype_to_nptype(data_dtype))
reps = compute_multiples(updates_shape[-1:], updates_shape)
return Tensor(np.tile(array, reps))
@constexpr
def generate_updates_shape(data_shape, index_shape, op_type):
"""Generate updates shape for 'tensor setitem'."""
if op_type == SET_ITEM_BY_ONE_TENSOR:
updates_shape = index_shape + data_shape[1:]
else:
updates_shape = index_shape[:-1] + data_shape[index_shape[-1]:]
return updates_shape
@constexpr
def check_number_of_index_tensor(data_shape, tuple_len, op_name):
"""Check if the number of index tensor exceeds the dimension of the operated tensor."""
if tuple_len <= len(data_shape):
return True
raise IndexError(f"For '{op_name}', the number {tuple_len} of index tensor "
f"is greater than the dimension {len(data_shape)} of the operated tensor.")
@constexpr
def generate_index_info_from_tuple_of_mixed_tensors(data_shape,
indexes_types,
tensor_indexes_shapes,
tensor_indexes_dtypes,
slice_indexes,
op_name):
"""
Generate index info which contain broadcast shape, final shape,
indexes shapes info, ellipsis size from a tuple of mixed tensors.
"""
check_index_tensors_dtype(tensor_indexes_dtypes, op_name)
data_rank = len(data_shape)
indexes_size = len(indexes_types)
if indexes_size > data_rank:
raise IndexError(f"For '{op_name}', the number {indexes_size} of index elements "
f"is greater than the dimension {len(data_shape)} of the operated tensor.")
indexes_info = {}
index_tensors_info = {}
ellipsis_num = 0
ellipsis_occupied_dims = 0
tensor_count = 0
slice_count = 0
for i, ele_type in enumerate(indexes_types):
if ellipsis_num == 0:
pos = i
else:
pos = i + ellipsis_occupied_dims - 1
if isinstance(ele_type, mstype.tensor_type):
indexes_info[pos] = tensor_indexes_shapes[tensor_count]
index_tensors_info[pos] = tensor_indexes_shapes[tensor_count]
tensor_count += 1
elif isinstance(ele_type, mstype.slice_type):
slice_obj = slice(slice_indexes[slice_count].start,
slice_indexes[slice_count].stop,
slice_indexes[slice_count].step)
# Use list to represent slicing result.
indexes_info[pos] = list(range(data_shape[pos]))[slice_obj]
if not indexes_info[pos]:
raise IndexError("An empty slice is not supported, got {}:{}:{}".format(
slice_indexes[slice_count].start,
slice_indexes[slice_count].stop,
slice_indexes[slice_count].step))
slice_count += 1
elif isinstance(ele_type, mstype.ellipsis_type):
if ellipsis_num != 0:
raise IndexError(
f"For '{op_name}', the index could only contain one ellipsis.")
ellipsis_occupied_dims = data_rank - indexes_size + 1
for j in range(pos, pos + ellipsis_occupied_dims):
# Use list to represent slicing result.
indexes_info[j] = list(range(data_shape[j]))
ellipsis_num += 1
else:
raise IndexError(f"For '{op_name}', the index elements only support "
f"'Tensor', 'int', 'Slice', 'Ellipsis', but got {ele_type}.")
broadcast_shape, final_shape, indexes_shapes_info = \
_derive_result_shape_info_from_tuple_of_mixed_tensors(
indexes_info, index_tensors_info, op_name)
return broadcast_shape, final_shape, indexes_shapes_info, ellipsis_occupied_dims
def _judge_tuple_of_mixed_tensors_continuous(index_tensor_info_key: list):
"""Determine whether the tensor in the index appears continuously."""
for i in range(len(index_tensor_info_key) - 1):
if index_tensor_info_key[i + 1] != index_tensor_info_key[i] + 1:
return False
return True
def _derive_result_shape_info_from_tuple_of_mixed_tensors(indexes_info, index_tensors_info, op_name):
"""Derive the resulting shape information from the a tuple index of mixed tensors."""
index_tensor_info_key = list(index_tensors_info.keys())
index_tensor_info_value = list(index_tensors_info.values())
broadcast_shape = generate_broadcast_shape(
index_tensor_info_value, op_name)
final_shape = []
indexes_shapes_info = []
mixed_tensors_continuous = _judge_tuple_of_mixed_tensors_continuous(
index_tensor_info_key)
if mixed_tensors_continuous:
tensor_shape_dealt = False
for ele in indexes_info.values():
if isinstance(ele, list):
final_shape.append(len(ele))
indexes_shapes_info.append(ele)
elif isinstance(ele, tuple):
if not tensor_shape_dealt:
final_shape.extend(broadcast_shape)
indexes_shapes_info.append(broadcast_shape)
tensor_shape_dealt = True
else:
raise IndexError(f"For '{op_name}', the index elements only support "
f"'Tensor', 'int', 'Slice', 'Ellipsis', but got {type(ele).__name__}.")
else:
final_shape.extend(broadcast_shape)
indexes_shapes_info.append(broadcast_shape)
for ele in indexes_info.values():
if isinstance(ele, list):
final_shape.append(len(ele))
indexes_shapes_info.append(ele)
elif isinstance(ele, tuple):
continue
else:
raise IndexError(f"For '{op_name}', the index elements only support "
f"'Tensor', 'int', 'Slice', 'Ellipsis', but got {type(ele).__name__}.")
return broadcast_shape, tuple(final_shape), tuple(indexes_shapes_info)
@constexpr
def get_pos_of_int_index(indexes_types):
"""Get int index positions from the mixed tensors index which contains int, tensor, slice, and ellipsis."""
int_positions = []
for i, ele_type in enumerate(indexes_types):
if ele_type == mstype.int32:
int_positions.append(i)
return int_positions
@constexpr
def separate_mixed_tensors_index(indexes_types, op_name):
"""Separate the position information of tensor and slice and ellipsis from the mixed tensors index."""
tensor_positions = []
slice_positions = []
ellipsis_position = None
for i, ele_type in enumerate(indexes_types):
if isinstance(ele_type, mstype.tensor_type):
tensor_positions.append(i)
elif isinstance(ele_type, mstype.slice_type):
slice_positions.append(i)
elif isinstance(ele_type, mstype.ellipsis_type):
ellipsis_position = i
else:
raise IndexError(f"For '{op_name}', the index elements only support "
f"'Tensor', 'int32', 'Slice', 'Ellipsis', but got {ele_type}.")
return tensor_positions, slice_positions, ellipsis_position
@constexpr
def scalar_in_sequence(x, y):
"""Determine whether the scalar in the sequence."""
if x is None:
raise ValueError("Judge scalar in tuple or list require scalar and sequence should be constant, "
"but the scalar is not.")
if y is None:
raise ValueError("Judge scalar in tuple or list require scalar and sequence should be constant, "
"but the sequence is not.")
if x in y:
return True
return False
@constexpr
def get_np_eps(input_dtype):
nptype = mstype.dtype_to_nptype(input_dtype)
eps = np.finfo(nptype).eps
return float(eps)
@constexpr
def check_number_index_type(number):
"""Check if it is int or bool number"""
if isinstance(number, bool):
return BOOL_
if isinstance(number, int):
return INT_
raise IndexError("Only support integers, slices(`:`), ellipsis(`...`), None and bool, got {0} type is {1} "
.format(number, type(number)))
@constexpr
def get_stride_info_from_slice(data_shape, slice_index):
"""Get stride info from a python slice"""
begin, end, step = get_slice_stride(data_shape[0], slice_index)
begin_strides = [begin]
end_strides = [end]
step_strides = [step]
for end in data_shape[1:]:
begin_strides.append(0)
end_strides.append(end)
step_strides.append(1)
return tuple(begin_strides), tuple(end_strides), tuple(step_strides)
@constexpr
def get_stride_info_from_integer(data_shape, number):
"""Get stride info from a integer"""
begin_strides = [number]
end_strides = [number+1]
step_strides = [1]
for end in data_shape[1:]:
begin_strides.append(0)
end_strides.append(end)
step_strides.append(1)
return tuple(begin_strides), tuple(end_strides), tuple(step_strides)
def get_slice_stride(dim_size, index_slice):
"""Get slice stride info"""
step = 1 if index_slice.step is None else index_slice.step
start_default = 0
stop_default = dim_size
if step < 0:
start_default = -1
stop_default = -(dim_size+1)
start = start_default if index_slice.start is None else index_slice.start
stop = stop_default if index_slice.stop is None else index_slice.stop
return start, stop, step
@constexpr
def get_stride_info_from_tuple(data_shape, index_tuple):
"""Get stride info from a tuple"""
begin_strides = []
end_strides = []
step_strides = []
index_size = len(index_tuple)
data_shape_size = len(data_shape)
shrink_axis = 0
index_count = 0
ellipsis_count = 0
for idx, item in enumerate(index_tuple):
if isinstance(item, slice):
start, stop, step = get_slice_stride(data_shape[idx], item)
begin_strides.append(start)
end_strides.append(stop)
step_strides.append(step)
index_count = index_count + 1
elif isinstance(item, int):
begin_strides.append(item)
end_strides.append(item + 1)
step_strides.append(1)
shrink_axis = shrink_axis + (1 << index_count)
index_count = index_count + 1
elif item is ...:
ellipsis_count = ellipsis_count + 1
if ellipsis_count > 1:
raise IndexError("An index can have only one ellipsis (...)")
ellipsis_range_size = data_shape_size - (index_size - 1)
begin_strides.extend([0] * (ellipsis_range_size))
end_strides.extend(
[i for i in data_shape[index_count: index_count + (ellipsis_range_size)]])
step_strides.extend([1] * (ellipsis_range_size))
index_count = index_count + ellipsis_range_size
else:
raise IndexError("Not supported index data type, got ",
item, " type is ", type(item))
for item in range(index_count, data_shape_size):
begin_strides.append(0)
end_strides.append(data_shape[item])
step_strides.append(1)
return tuple(begin_strides), tuple(end_strides), tuple(step_strides), shrink_axis
| 36.338562
| 118
| 0.628224
|
3ddf61f65f00b55f16b75b1f36c7ca9aa75a022f
| 277
|
py
|
Python
|
Python3/0492-Construct-The-Rectangle/soln.py
|
wyaadarsh/LeetCode-Solutions
|
3719f5cb059eefd66b83eb8ae990652f4b7fd124
|
[
"MIT"
] | 5
|
2020-07-24T17:48:59.000Z
|
2020-12-21T05:56:00.000Z
|
Python3/0492-Construct-The-Rectangle/soln.py
|
zhangyaqi1989/LeetCode-Solutions
|
2655a1ffc8678ad1de6c24295071308a18c5dc6e
|
[
"MIT"
] | null | null | null |
Python3/0492-Construct-The-Rectangle/soln.py
|
zhangyaqi1989/LeetCode-Solutions
|
2655a1ffc8678ad1de6c24295071308a18c5dc6e
|
[
"MIT"
] | 2
|
2020-07-24T17:49:01.000Z
|
2020-08-31T19:57:35.000Z
|
class Solution:
def constructRectangle(self, area):
"""
:type area: int
:rtype: List[int]
"""
return min([(area // W, W) for W in range(1, int(math.sqrt(area)) + 1)
if area % W == 0], key=lambda pair : pair[0] - pair[1])
| 30.777778
| 78
| 0.501805
|
904125b789494129532aeb53d6a26c29bc2d0d71
| 6,428
|
py
|
Python
|
homie/models/homie_device.py
|
timpur/homie-discovery-python
|
f157e3843cae7b1ad3e4fd810b340c35c34473eb
|
[
"MIT"
] | null | null | null |
homie/models/homie_device.py
|
timpur/homie-discovery-python
|
f157e3843cae7b1ad3e4fd810b340c35c34473eb
|
[
"MIT"
] | null | null | null |
homie/models/homie_device.py
|
timpur/homie-discovery-python
|
f157e3843cae7b1ad3e4fd810b340c35c34473eb
|
[
"MIT"
] | null | null | null |
"""Homie Device module"""
import logging
from ..tools import (constants, helpers, HomieDiscoveryBase, STAGE_0, STAGE_1, STAGE_2)
from .homie_node import HomieNode
_LOGGER = logging.getLogger(__name__)
class HomieDevice(HomieDiscoveryBase):
"""A definition of a Homie Device"""
def __init__(self, base_topic: str, device_id: str):
super().__init__()
_LOGGER.info(f"Homie Device Discovered. ID: {device_id}")
self._base_topic = base_topic
self._device_id = device_id
self._prefix_topic = f'{base_topic}/{device_id}'
self._homie_nodes = dict()
self._convention_version = constants.STATE_UNKNOWN
self._online = constants.STATE_UNKNOWN
self._name = constants.STATE_UNKNOWN
self._ip = constants.STATE_UNKNOWN
self._mac = constants.STATE_UNKNOWN
self._uptime = constants.STATE_UNKNOWN
self._signal = constants.STATE_UNKNOWN
self._stats_interval = constants.STATE_UNKNOWN
self._fw_name = constants.STATE_UNKNOWN
self._fw_version = constants.STATE_UNKNOWN
self._fw_checksum = constants.STATE_UNKNOWN
self._implementation = constants.STATE_UNKNOWN
def setup(self, subscribe, publish):
"""
Setup of the Homie Device
This will start the discovery proccess of nodes
Once dicovery proccess of children has compleeted (aka. device is `STAGE_1`),
discovery of all attributes takes place
"""
self._discover_nodes(subscribe, publish)
self.add_on_discovery_stage_change(lambda _, stage: subscribe(f'{self._prefix_topic}/#', self._update), STAGE_1)
def _discover_nodes(self, subscribe, publish):
def _on_discovery_nodes(topic: str, payload: str, msg_qos: int):
for node_id in helpers.proccess_nodes(payload):
if node_id not in self._homie_nodes:
homie_node = HomieNode(self, self._prefix_topic, node_id)
homie_node.add_on_discovery_stage_change(self._check_discovery_stage)
homie_node.setup(subscribe, publish)
self._homie_nodes[node_id] = homie_node
subscribe(f'{self._prefix_topic}/$nodes', _on_discovery_nodes)
def _check_discovery_stage(self, homie_node=None, stage=None):
current_stage = self._stage_of_discovery
if current_stage == STAGE_0:
if helpers.can_advance_stage(STAGE_1, self._homie_nodes):
self._set_discovery_stage(STAGE_1)
if current_stage == STAGE_1:
if helpers.can_advance_stage(STAGE_2, self._homie_nodes) and self._online is not constants.STATE_UNKNOWN:
self._set_discovery_stage(STAGE_2)
def _update(self, topic: str, payload: str, qos: int):
if self._prefix_topic not in topic:
return None
for homie_node in self._homie_nodes.values():
homie_node._update(topic, payload, qos)
topic = topic.replace(self._prefix_topic, '')
# Load Device Properties
if topic == '/$homie':
self._convention_version = payload
if topic == '/$online':
self._online = payload
if topic == '/$name':
self._name = payload
if topic == '/$localip':
self._ip = payload
if topic == '/$mac':
self._mac = payload
# Load Device Stats Properties
if topic == '/$stats/uptime':
self._uptime = payload
if topic == '/$stats/signal':
self._signal = payload
if topic == '/$stats/interval':
self._stats_interval = payload
# Load Firmware Properties
if topic == '/$fw/name':
self._fw_name = payload
if topic == '/$fw/version':
self._fw_version = payload
if topic == '/$fw/checksum':
self._fw_checksum = payload
# Load Implementation Properties
if topic == '/$implementation':
self._implementation = payload
# Ready
if topic == '/$online':
self._check_discovery_stage()
@property
def base_topic(self):
"""Return the Base Topic of the device."""
return self._base_topic
@property
def device_id(self):
"""Return the Device ID of the device."""
return self._device_id
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def homie_version(self):
"""Return the Homie Framework Version of the device."""
return self._convention_version
@property
def online(self) -> bool:
"""Return true if the device is online."""
return helpers.string_to_bool(self._online)
@property
def ip(self):
"""Return the IP of the device."""
return self._ip
@property
def mac(self):
"""Return the MAC of the device."""
return self._mac
@property
def uptime(self):
"""Return the Uptime of the device."""
return self._uptime
@property
def signal(self):
"""Return the Signal of the device."""
return self._signal
@property
def stats_interval(self):
"""Return the Stats Interval of the device."""
return self._stats_interval
@property
def firmware_name(self):
"""Return the Firmware Name of the device."""
return self._fw_name
@property
def firmware_version(self):
"""Return the Firmware Version of the device."""
return self._fw_version
@property
def firmware_checksum(self):
"""Return the Firmware Checksum of the device."""
return self._fw_checksum
@property
def is_setup(self):
"""Return True if the Device has been setup as a component"""
return self.stage_of_discovery >= STAGE_2
@property
def nodes(self):
"""Return a List of Nodes for the device."""
return self._homie_nodes.values()
def get_node(self, node_id):
"""Return a specific Node for the device."""
return self._homie_nodes[node_id]
def has_node(self, node_id: str):
"""Return True if specific Node for the Device exists."""
return node_id in self._homie_nodes
@property
def entity_id(self):
"""Return the ID of the entity."""
return self.device_id
| 32.14
| 120
| 0.625078
|
02d240f6a834fda027f1f50b5866fe7b76b9c260
| 120
|
py
|
Python
|
files2md/structure_objects/__init__.py
|
KacperKotlewski/file_structure_to_markdown
|
aad0e1c80f88e0b3d079cf242d43fdc4b7a369f7
|
[
"MIT"
] | 1
|
2020-02-22T00:41:04.000Z
|
2020-02-22T00:41:04.000Z
|
files2md/structure_objects/__init__.py
|
KacperKotlewski/file_structure_to_markdown
|
aad0e1c80f88e0b3d079cf242d43fdc4b7a369f7
|
[
"MIT"
] | null | null | null |
files2md/structure_objects/__init__.py
|
KacperKotlewski/file_structure_to_markdown
|
aad0e1c80f88e0b3d079cf242d43fdc4b7a369f7
|
[
"MIT"
] | null | null | null |
from .file import FileObj
from .structureObject import StructureObject
from .structurable_directory import DirectoryObj
| 30
| 48
| 0.875
|
452282944fc40ee2a33ce6080140e8f1e27cad69
| 4,384
|
py
|
Python
|
tests/agents/test_nfsp.py
|
Res260/rlcard
|
c0fc9fe70c4ec1c726e5e66b62866086491f5dbf
|
[
"MIT"
] | 2
|
2020-08-24T21:30:44.000Z
|
2020-10-27T03:44:04.000Z
|
tests/agents/test_nfsp.py
|
Res260/rlcard
|
c0fc9fe70c4ec1c726e5e66b62866086491f5dbf
|
[
"MIT"
] | null | null | null |
tests/agents/test_nfsp.py
|
Res260/rlcard
|
c0fc9fe70c4ec1c726e5e66b62866086491f5dbf
|
[
"MIT"
] | 2
|
2020-02-23T17:26:14.000Z
|
2020-12-22T15:34:13.000Z
|
import unittest
import tensorflow as tf
import numpy as np
from rlcard.agents.nfsp_agent import NFSPAgent, ReservoirBuffer
class TestNFSP(unittest.TestCase):
def test_init(self):
sess = tf.InteractiveSession()
tf.Variable(0, name='global_step', trainable=False)
agent = NFSPAgent(sess=sess,
scope='nfsp',
action_num=10,
state_shape=[10],
hidden_layers_sizes=[10,10],
q_mlp_layers=[10,10])
self.assertEqual(agent._action_num, 10)
sess.close()
tf.reset_default_graph()
def test_train(self):
norm_step = 100
memory_init_size = 20
step_num = 1000
sess = tf.InteractiveSession()
tf.Variable(0, name='global_step', trainable=False)
agent = NFSPAgent(sess=sess,
scope='nfsp',
action_num=2,
state_shape=[2],
hidden_layers_sizes=[10,10],
reservoir_buffer_capacity=50,
batch_size=4,
min_buffer_size_to_learn=memory_init_size,
q_replay_memory_size=50,
q_replay_memory_init_size=memory_init_size,
q_batch_size=4,
q_norm_step=norm_step,
q_mlp_layers=[10,10])
sess.run(tf.global_variables_initializer())
predicted_action = agent.eval_step({'obs': np.random.random_sample((2,)), 'legal_actions': [0, 1]})
self.assertGreaterEqual(predicted_action, 0)
self.assertLessEqual(predicted_action, 1)
for step in range(step_num):
agent.sample_episode_policy()
predicted_action = agent.step({'obs': np.random.random_sample((2,)), 'legal_actions': [0, 1]})
self.assertGreaterEqual(predicted_action, 0)
self.assertLessEqual(predicted_action, 1)
ts = [{'obs': np.random.random_sample((2,)), 'legal_actions': [0, 1]}, np.random.randint(2), 0, {'obs': np.random.random_sample((2,)), 'legal_actions': [0, 1]}, True]
agent.feed(ts)
if step > norm_step + memory_init_size:
agent.train_rl()
agent.train_sl()
sess.close()
tf.reset_default_graph()
def test_reservoir_buffer(self):
buff = ReservoirBuffer(10)
for i in range(5):
buff.add(i)
sampled_data = buff.sample(3)
self.assertEqual(len(sampled_data), 3)
with self.assertRaises(ValueError):
buff.sample(100)
for i, element in enumerate(buff):
self.assertEqual(i, element)
self.assertEqual(len(buff), 5)
buff.clear()
self.assertEqual(len(buff), 0)
def test_evaluate_with(self):
# Test average policy and value error here
sess = tf.InteractiveSession()
tf.Variable(0, name='global_step', trainable=False)
agent = NFSPAgent(sess=sess,
scope='nfsp',
action_num=2,
state_shape=[2],
hidden_layers_sizes=[10,10],
q_mlp_layers=[10,10],
evaluate_with='average_policy')
sess.run(tf.global_variables_initializer())
predicted_action = agent.eval_step({'obs': np.random.random_sample((2,)), 'legal_actions': [0, 1]})
self.assertGreaterEqual(predicted_action, 0)
self.assertLessEqual(predicted_action, 1)
sess.close()
tf.reset_default_graph()
sess = tf.InteractiveSession()
tf.Variable(0, name='global_step', trainable=False)
agent = NFSPAgent(sess=sess,
scope='nfsp',
action_num=2,
state_shape=[2],
hidden_layers_sizes=[10,10],
q_mlp_layers=[10,10],
evaluate_with='random')
sess.run(tf.global_variables_initializer())
with self.assertRaises(ValueError):
predicted_action = agent.eval_step({'obs': np.random.random_sample((2,)), 'legal_actions': [0, 1]})
sess.close()
tf.reset_default_graph()
| 35.354839
| 178
| 0.547673
|
39e43f51627b2b4a362d75fa1a9ec41db4a63113
| 3,539
|
py
|
Python
|
kubernetes_asyncio/client/models/v1_node_selector.py
|
dineshsonachalam/kubernetes_asyncio
|
d57e9e9be11f6789e1ce8d5b161acb64d29acf35
|
[
"Apache-2.0"
] | 1
|
2021-02-25T04:36:18.000Z
|
2021-02-25T04:36:18.000Z
|
kubernetes_asyncio/client/models/v1_node_selector.py
|
hubo1016/kubernetes_asyncio
|
d57e9e9be11f6789e1ce8d5b161acb64d29acf35
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/models/v1_node_selector.py
|
hubo1016/kubernetes_asyncio
|
d57e9e9be11f6789e1ce8d5b161acb64d29acf35
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.12.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class V1NodeSelector(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'node_selector_terms': 'list[V1NodeSelectorTerm]'
}
attribute_map = {
'node_selector_terms': 'nodeSelectorTerms'
}
def __init__(self, node_selector_terms=None): # noqa: E501
"""V1NodeSelector - a model defined in Swagger""" # noqa: E501
self._node_selector_terms = None
self.discriminator = None
self.node_selector_terms = node_selector_terms
@property
def node_selector_terms(self):
"""Gets the node_selector_terms of this V1NodeSelector. # noqa: E501
Required. A list of node selector terms. The terms are ORed. # noqa: E501
:return: The node_selector_terms of this V1NodeSelector. # noqa: E501
:rtype: list[V1NodeSelectorTerm]
"""
return self._node_selector_terms
@node_selector_terms.setter
def node_selector_terms(self, node_selector_terms):
"""Sets the node_selector_terms of this V1NodeSelector.
Required. A list of node selector terms. The terms are ORed. # noqa: E501
:param node_selector_terms: The node_selector_terms of this V1NodeSelector. # noqa: E501
:type: list[V1NodeSelectorTerm]
"""
if node_selector_terms is None:
raise ValueError("Invalid value for `node_selector_terms`, must not be `None`") # noqa: E501
self._node_selector_terms = node_selector_terms
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1NodeSelector):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.247863
| 119
| 0.601865
|
5f81f5c12c6060f1f3d7e3d4e85c7b6904454af0
| 1,598
|
py
|
Python
|
testing/tests/001-main/001-empty/002-authenticated/006-news.py
|
fekblom/critic
|
a6b60c9053e13d4c878d50531860d7389568626d
|
[
"Apache-2.0"
] | 216
|
2015-01-05T12:48:10.000Z
|
2022-03-08T00:12:23.000Z
|
testing/tests/001-main/001-empty/002-authenticated/006-news.py
|
fekblom/critic
|
a6b60c9053e13d4c878d50531860d7389568626d
|
[
"Apache-2.0"
] | 55
|
2015-02-28T12:10:26.000Z
|
2020-11-18T17:45:16.000Z
|
testing/tests/001-main/001-empty/002-authenticated/006-news.py
|
fekblom/critic
|
a6b60c9053e13d4c878d50531860d7389568626d
|
[
"Apache-2.0"
] | 34
|
2015-05-02T15:15:10.000Z
|
2020-06-15T19:20:37.000Z
|
with_class = testing.expect.with_class
extract_text = testing.expect.extract_text
with frontend.signin():
frontend.page(
"news",
expect={ "document_title": testing.expect.document_title(u"News"),
"content_title": testing.expect.paleyellow_title(0, u"News"),
"pageheader_links": testing.expect.pageheader_links("authenticated",
"administrator"),
"script_user": testing.expect.script_no_user() })
# Load all news items to make sure they are syntactically correct.
#
# There may not be any, and we can't easily test that the right
# set of news items are listed, since this depends on whether we
# upgraded and from what. But this testing is still somewhat
# meaningful.
document = frontend.page("news", params={ "display": "all" })
items = document.findAll(attrs=with_class("item"))
for item in items:
item_id = item["critic-item-id"]
item_title = extract_text(item.find(attrs=with_class("title")))
frontend.page(
"news",
params={ "item": item_id },
expect={ "document_title": testing.expect.document_title(item_title),
"content_title": testing.expect.paleyellow_title(0, item_title),
"pageheader_links": testing.expect.pageheader_links("authenticated",
"administrator"),
"script_user": testing.expect.script_no_user() })
| 45.657143
| 90
| 0.58761
|
686085a72f209fb7de60b7e6d8913dcd45c916f1
| 673
|
py
|
Python
|
leetcode/medium/word-break.py
|
vtemian/interviews-prep
|
ddef96b5ecc699a590376a892a804c143fe18034
|
[
"Apache-2.0"
] | 8
|
2019-05-14T12:50:29.000Z
|
2022-03-01T09:08:27.000Z
|
leetcode/medium/word-break.py
|
vtemian/interviews-prep
|
ddef96b5ecc699a590376a892a804c143fe18034
|
[
"Apache-2.0"
] | 46
|
2019-03-24T20:59:29.000Z
|
2019-04-09T16:28:43.000Z
|
leetcode/medium/word-break.py
|
vtemian/interviews-prep
|
ddef96b5ecc699a590376a892a804c143fe18034
|
[
"Apache-2.0"
] | 1
|
2022-01-28T12:46:29.000Z
|
2022-01-28T12:46:29.000Z
|
class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> bool:
if not s or not wordDict:
return False
wordDict = set(wordDict)
queue = [0]
visited = [False] * len(s)
while queue:
current = queue.pop(0)
if visited[current]:
continue
start = current
while start < len(s):
if s[current: start + 1] in wordDict:
if start + 1 == len(s):
return True
queue.append(start + 1)
start += 1
visited[current] = True
return False
| 21.709677
| 61
| 0.444279
|
46a34778f8420ce822d5a9c20476de6fd2d6a129
| 113
|
py
|
Python
|
SLKlib/mapper/protein/__init__.py
|
korcsmarosgroup/ARN2DataBase
|
8931cec0387e5c8b599df40d652ac5fdb5c49a8f
|
[
"MIT"
] | null | null | null |
SLKlib/mapper/protein/__init__.py
|
korcsmarosgroup/ARN2DataBase
|
8931cec0387e5c8b599df40d652ac5fdb5c49a8f
|
[
"MIT"
] | null | null | null |
SLKlib/mapper/protein/__init__.py
|
korcsmarosgroup/ARN2DataBase
|
8931cec0387e5c8b599df40d652ac5fdb5c49a8f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
__author__ = 'NetBiolGroup'
__email__ = 'netbiol@netbiol.elte.hu'
__version__ = '0.1.0'
| 18.833333
| 37
| 0.663717
|
782fdf68569a57a2d81048739b768076179c22ea
| 8,450
|
py
|
Python
|
deep_sdf/mesh.py
|
Kaminyou/DeepImplicitTemplates
|
cb6b65c198cdce2851c24e181f444864c61a8689
|
[
"MIT"
] | 120
|
2020-12-01T13:25:00.000Z
|
2022-03-30T02:49:33.000Z
|
deep_sdf/mesh.py
|
Kaminyou/DeepImplicitTemplates
|
cb6b65c198cdce2851c24e181f444864c61a8689
|
[
"MIT"
] | 3
|
2021-03-11T12:33:17.000Z
|
2021-07-09T05:32:08.000Z
|
deep_sdf/mesh.py
|
Kaminyou/DeepImplicitTemplates
|
cb6b65c198cdce2851c24e181f444864c61a8689
|
[
"MIT"
] | 12
|
2020-12-20T08:15:59.000Z
|
2022-02-09T08:50:47.000Z
|
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import logging
import numpy as np
import plyfile
import skimage.measure
import time
import torch
import deep_sdf.utils
def create_mesh(
decoder, latent_vec, filename, N=256, max_batch=(32 ** 3 * 4), offset=None, scale=None, volume_size=2.0
):
start = time.time()
ply_filename = filename
# NOTE: the voxel_origin is actually the (bottom, left, down) corner, not the middle
voxel_origin = [-volume_size/2.0, -volume_size/2.0, -volume_size/2.0]
voxel_size = volume_size / (N - 1)
overall_index = torch.arange(0, N ** 3, 1, out=torch.LongTensor())
samples = torch.zeros(N ** 3, 4)
# transform first 3 columns
# to be the x, y, z index
samples[:, 2] = overall_index % N
samples[:, 1] = (overall_index.long() // N) % N
samples[:, 0] = ((overall_index.long() // N) // N) % N
# transform first 3 columns
# to be the x, y, z coordinate
samples[:, 0] = (samples[:, 0] * voxel_size) + voxel_origin[2]
samples[:, 1] = (samples[:, 1] * voxel_size) + voxel_origin[1]
samples[:, 2] = (samples[:, 2] * voxel_size) + voxel_origin[0]
num_samples = N ** 3
samples.requires_grad = False
head = 0
while head < num_samples:
sample_subset = samples[head : min(head + max_batch, num_samples), 0:3].cuda()
samples[head : min(head + max_batch, num_samples), 3] = (
deep_sdf.utils.decode_sdf(decoder, latent_vec, sample_subset)
.squeeze(1)
.detach()
.cpu()
)
head += max_batch
sdf_values = samples[:, 3]
sdf_values = sdf_values.reshape(N, N, N)
end = time.time()
logging.debug("sampling takes: %f" % (end - start))
convert_sdf_samples_to_ply(
sdf_values.data.cpu(),
voxel_origin,
voxel_size,
ply_filename + ".ply",
offset,
scale,
)
def create_mesh_octree(
decoder, latent_vec, filename, N=256, max_batch=32 ** 3, offset=None, scale=None, clamp_func=None,
volume_size=2.0):
start = time.time()
ply_filename = filename
# NOTE: the voxel_origin is actually the (bottom, left, down) corner, not the middle
voxel_origin = [-volume_size / 2.0, -volume_size / 2.0, -volume_size / 2.0]
voxel_size = volume_size / (N - 1)
overall_index = np.arange(0, N ** 3)
samples = np.zeros([N ** 3, 4], dtype=np.float32)
# transform first 3 columns
# to be the x, y, z index
samples[:, 2] = overall_index % N
samples[:, 1] = (overall_index // N) % N
samples[:, 0] = ((overall_index // N) // N) % N
# transform first 3 columns
# to be the x, y, z coordinate
samples[:, 0] = (samples[:, 0] * voxel_size) + voxel_origin[2]
samples[:, 1] = (samples[:, 1] * voxel_size) + voxel_origin[1]
samples[:, 2] = (samples[:, 2] * voxel_size) + voxel_origin[0]
samples = samples.reshape([N, N, N, 4])
sdf_values = np.zeros([N, N, N], dtype=np.float32)
dirty = np.ones([N, N, N], dtype=np.bool)
grid_mask = np.zeros_like(dirty, dtype=np.bool)
init_res = 64
ignore_thres = volume_size / N / 4
reso = N // init_res
while reso > 0:
grid_mask[0:N:reso, 0:N:reso, 0:N:reso] = True
test_mask = np.logical_and(grid_mask, dirty)
samples_ = samples[test_mask]
samples_ = torch.from_numpy(samples_).cuda()
sdf_ = []
head = 0
print(samples_.shape[0])
while head < samples_.shape[0]:
query_idx = torch.arange(head, min(head + max_batch, samples_.shape[0])).long().cuda()
s = (deep_sdf.utils.decode_sdf(
decoder, latent_vec, samples_[query_idx, :3]).view([-1]).detach()
)
if clamp_func is not None:
s = clamp_func(s)
sdf_.append(s.cpu().numpy())
head += max_batch
sdf_values[test_mask] = np.concatenate(sdf_, axis=-1)
if reso <= 1:
break
N_ds = N // reso - 1
overall_index_ds = np.arange(0, N_ds ** 3)
samples_ds = np.zeros([N_ds ** 3, 4], dtype=np.int32)
# transform first 3 columns
# to be the x, y, z index
samples_ds[:, 2] = overall_index_ds % N_ds
samples_ds[:, 1] = (overall_index_ds // N_ds) % N_ds
samples_ds[:, 0] = ((overall_index_ds // N_ds) // N_ds) % N_ds
samples_ds *= reso
dirty_ds = dirty[samples_ds[:, 0] + reso // 2,
samples_ds[:, 1] + reso // 2, samples_ds[:, 2] + reso // 2]
samples_ds = samples_ds[dirty_ds]
v0 = sdf_values[samples_ds[:, 0], samples_ds[:, 1], samples_ds[:, 2]]
v1 = sdf_values[samples_ds[:, 0], samples_ds[:, 1], samples_ds[:, 2] + reso]
v2 = sdf_values[samples_ds[:, 0], samples_ds[:, 1] + reso, samples_ds[:, 2]]
v3 = sdf_values[samples_ds[:, 0], samples_ds[:, 1] + reso, samples_ds[:, 2] + reso]
v4 = sdf_values[samples_ds[:, 0] + reso, samples_ds[:, 1], samples_ds[:, 2]]
v5 = sdf_values[samples_ds[:, 0] + reso, samples_ds[:, 1], samples_ds[:, 2] + reso]
v6 = sdf_values[samples_ds[:, 0] + reso, samples_ds[:, 1] + reso, samples_ds[:, 2]]
v7 = sdf_values[samples_ds[:, 0] + reso, samples_ds[:, 1] + reso, samples_ds[:, 2] + reso]
vs = np.asarray([v0, v1, v2, v3, v4, v5, v6, v7])
vmn = np.min(vs, axis=0)
vmx = np.max(vs, axis=0)
v_ = 0.5 *(vmx + vmn)
clean_flag = (vmx - vmn) < ignore_thres
for sample, v in zip(samples_ds[clean_flag], v_[clean_flag]):
x, y, z = sample[0], sample[1], sample[2]
sdf_values[x:x+reso, y:y+reso, z:z+reso] = v
dirty[x:x + reso, y:y + reso, z:z + reso] = False
reso //= 2
end = time.time()
logging.debug("sampling takes: %f" % (end - start))
convert_sdf_samples_to_ply(
sdf_values,
voxel_origin,
voxel_size,
ply_filename + ".ply",
offset,
scale,
)
def convert_sdf_samples_to_ply(
input_3d_sdf_array,
voxel_grid_origin,
voxel_size,
ply_filename_out,
offset=None,
scale=None,
):
"""
Convert sdf samples to .ply
:param input_3d_sdf_array: a float array of shape (n,n,n)
:voxel_grid_origin: a list of three floats: the bottom, left, down origin of the voxel grid
:voxel_size: float, the size of the voxels
:ply_filename_out: string, path of the filename to save to
This function adapted from: https://github.com/RobotLocomotion/spartan
"""
start_time = time.time()
if isinstance(input_3d_sdf_array, torch.Tensor):
numpy_3d_sdf_tensor = input_3d_sdf_array.numpy()
elif isinstance(input_3d_sdf_array, np.ndarray):
numpy_3d_sdf_tensor = input_3d_sdf_array
else:
raise NotImplementedError
verts, faces, normals, values = skimage.measure.marching_cubes_lewiner(
numpy_3d_sdf_tensor, level=0.0, spacing=[voxel_size] * 3
)
# transform from voxel coordinates to camera coordinates
# note x and y are flipped in the output of marching_cubes
mesh_points = np.zeros_like(verts)
mesh_points[:, 0] = voxel_grid_origin[0] + verts[:, 0]
mesh_points[:, 1] = voxel_grid_origin[1] + verts[:, 1]
mesh_points[:, 2] = voxel_grid_origin[2] + verts[:, 2]
# apply additional offset and scale
if scale is not None:
mesh_points = mesh_points / scale
if offset is not None:
mesh_points = mesh_points - offset
# try writing to the ply file
num_verts = verts.shape[0]
num_faces = faces.shape[0]
verts_tuple = np.zeros((num_verts,), dtype=[("x", "f4"), ("y", "f4"), ("z", "f4")])
for i in range(0, num_verts):
verts_tuple[i] = tuple(mesh_points[i, :])
faces_building = []
for i in range(0, num_faces):
faces_building.append(((faces[i, :].tolist(),)))
faces_tuple = np.array(faces_building, dtype=[("vertex_indices", "i4", (3,))])
el_verts = plyfile.PlyElement.describe(verts_tuple, "vertex")
el_faces = plyfile.PlyElement.describe(faces_tuple, "face")
ply_data = plyfile.PlyData([el_verts, el_faces])
logging.debug("saving mesh to %s" % (ply_filename_out))
ply_data.write(ply_filename_out)
logging.debug(
"converting to ply format and writing to file took {} s".format(
time.time() - start_time
)
)
| 33.665339
| 107
| 0.600828
|
c58dcb8c8b7ac791cd4b506b3cf40541b1847133
| 755
|
py
|
Python
|
tensorflow_hub/version.py
|
DEVESHTARASIA/hub
|
b203945ebcb5c29f4115d2bdcb3d21d387a59f0c
|
[
"Apache-2.0"
] | 6
|
2018-10-25T17:31:18.000Z
|
2022-02-12T15:24:12.000Z
|
tensorflow_hub/version.py
|
syed-ahmed/hub
|
ac2a47051dac4a2e9bd0c93eaa8dac550c350608
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_hub/version.py
|
syed-ahmed/hub
|
ac2a47051dac4a2e9bd0c93eaa8dac550c350608
|
[
"Apache-2.0"
] | 2
|
2019-01-24T04:58:03.000Z
|
2021-08-24T02:16:07.000Z
|
# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the version string."""
__version__ = "0.2.0-dev"
| 41.944444
| 80
| 0.675497
|
36c12e88851f2517ef90365be6c6f4ad6b132c35
| 1,247
|
py
|
Python
|
test/test_target_policies.py
|
Atomicology/isilon_sdk_python
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
[
"MIT"
] | null | null | null |
test/test_target_policies.py
|
Atomicology/isilon_sdk_python
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
[
"MIT"
] | null | null | null |
test/test_target_policies.py
|
Atomicology/isilon_sdk_python
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
ref: https://github.com/swagger-api/swagger-codegen
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.target_policies import TargetPolicies
class TestTargetPolicies(unittest.TestCase):
""" TargetPolicies unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testTargetPolicies(self):
"""
Test TargetPolicies
"""
model = swagger_client.models.target_policies.TargetPolicies()
if __name__ == '__main__':
unittest.main()
| 25.44898
| 75
| 0.729751
|
7d84a2859d11aa269fd6ad8e5c32deca34204d36
| 23,599
|
py
|
Python
|
azure-mgmt-logic/azure/mgmt/logic/operations/workflow_triggers_operations.py
|
NMijat1024/azure-sdk-for-python
|
c49e1d6d797dceaca81813cafb1a486d67185182
|
[
"MIT"
] | 1
|
2018-07-23T08:59:24.000Z
|
2018-07-23T08:59:24.000Z
|
azure-mgmt-logic/azure/mgmt/logic/operations/workflow_triggers_operations.py
|
NMijat1024/azure-sdk-for-python
|
c49e1d6d797dceaca81813cafb1a486d67185182
|
[
"MIT"
] | 1
|
2021-06-02T00:24:51.000Z
|
2021-06-02T00:24:51.000Z
|
azure-mgmt-logic/azure/mgmt/logic/operations/workflow_triggers_operations.py
|
NMijat1024/azure-sdk-for-python
|
c49e1d6d797dceaca81813cafb1a486d67185182
|
[
"MIT"
] | 1
|
2020-07-25T20:36:02.000Z
|
2020-07-25T20:36:02.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class WorkflowTriggersOperations(object):
"""WorkflowTriggersOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: The API version. Constant value: "2016-06-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-06-01"
self.config = config
def list(
self, resource_group_name, workflow_name, top=None, filter=None, custom_headers=None, raw=False, **operation_config):
"""Gets a list of workflow triggers.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param top: The number of items to be included in the result.
:type top: int
:param filter: The filter to apply on the operation.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of WorkflowTrigger
:rtype:
~azure.mgmt.logic.models.WorkflowTriggerPaged[~azure.mgmt.logic.models.WorkflowTrigger]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.WorkflowTriggerPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.WorkflowTriggerPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/triggers/'}
def get(
self, resource_group_name, workflow_name, trigger_name, custom_headers=None, raw=False, **operation_config):
"""Gets a workflow trigger.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param trigger_name: The workflow trigger name.
:type trigger_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: WorkflowTrigger or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.logic.models.WorkflowTrigger or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str'),
'triggerName': self._serialize.url("trigger_name", trigger_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('WorkflowTrigger', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/triggers/{triggerName}'}
def reset(
self, resource_group_name, workflow_name, trigger_name, custom_headers=None, raw=False, **operation_config):
"""Resets a workflow trigger.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param trigger_name: The workflow trigger name.
:type trigger_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.reset.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str'),
'triggerName': self._serialize.url("trigger_name", trigger_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
reset.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/triggers/{triggerName}/reset'}
def run(
self, resource_group_name, workflow_name, trigger_name, custom_headers=None, raw=False, **operation_config):
"""Runs a workflow trigger.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param trigger_name: The workflow trigger name.
:type trigger_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: object or ClientRawResponse if raw=true
:rtype: object or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.run.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str'),
'triggerName': self._serialize.url("trigger_name", trigger_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise HttpOperationError(self._deserialize, response, 'object')
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
run.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/triggers/{triggerName}/run'}
def get_schema_json(
self, resource_group_name, workflow_name, trigger_name, custom_headers=None, raw=False, **operation_config):
"""Get the trigger schema as JSON.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param trigger_name: The workflow trigger name.
:type trigger_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: JsonSchema or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.logic.models.JsonSchema or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get_schema_json.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str'),
'triggerName': self._serialize.url("trigger_name", trigger_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('JsonSchema', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_schema_json.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/triggers/{triggerName}/schemas/json'}
def set_state(
self, resource_group_name, workflow_name, trigger_name, source, custom_headers=None, raw=False, **operation_config):
"""Sets the state of a workflow trigger.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param trigger_name: The workflow trigger name.
:type trigger_name: str
:param source:
:type source: ~azure.mgmt.logic.models.WorkflowTrigger
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
set_state1 = models.SetTriggerStateActionDefinition(source=source)
# Construct URL
url = self.set_state.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str'),
'triggerName': self._serialize.url("trigger_name", trigger_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(set_state1, 'SetTriggerStateActionDefinition')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
set_state.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/triggers/{triggerName}/setState'}
def list_callback_url(
self, resource_group_name, workflow_name, trigger_name, custom_headers=None, raw=False, **operation_config):
"""Get the callback URL for a workflow trigger.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param trigger_name: The workflow trigger name.
:type trigger_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: WorkflowTriggerCallbackUrl or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.logic.models.WorkflowTriggerCallbackUrl or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.list_callback_url.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str'),
'triggerName': self._serialize.url("trigger_name", trigger_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('WorkflowTriggerCallbackUrl', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
list_callback_url.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/triggers/{triggerName}/listCallbackUrl'}
| 48.063136
| 200
| 0.668461
|
9c1ff9f1a5e0e35cce12a22d494fc369efc95e9f
| 26,833
|
py
|
Python
|
openmdao/core/test/test_units.py
|
colinxs/OpenMDAO
|
a9a52be29281a23a102c64b577066ee5fc70f4b4
|
[
"Apache-2.0"
] | null | null | null |
openmdao/core/test/test_units.py
|
colinxs/OpenMDAO
|
a9a52be29281a23a102c64b577066ee5fc70f4b4
|
[
"Apache-2.0"
] | null | null | null |
openmdao/core/test/test_units.py
|
colinxs/OpenMDAO
|
a9a52be29281a23a102c64b577066ee5fc70f4b4
|
[
"Apache-2.0"
] | null | null | null |
""" Tests the ins and outs of automatic unit conversion in OpenMDAO."""
import unittest
from six import iteritems
from six.moves import cStringIO
import numpy as np
from openmdao.api import IndepVarComp, Component, Group, Problem, ExecComp
from openmdao.test.util import assert_rel_error
class SrcComp(Component):
def __init__(self):
super(SrcComp, self).__init__()
self.add_param('x1', 100.0)
self.add_output('x2', 100.0, units='degC')
def solve_nonlinear(self, params, unknowns, resids):
""" No action."""
unknowns['x2'] = params['x1']
def linearize(self, params, unknowns, resids):
""" Derivative is 1.0"""
J = {}
J[('x2', 'x1')] = np.array([1.0])
return J
class TgtCompF(Component):
def __init__(self):
super(TgtCompF, self).__init__()
self.add_param('x2', 100.0, units='degF')
self.add_output('x3', 100.0)
def solve_nonlinear(self, params, unknowns, resids):
""" No action."""
unknowns['x3'] = params['x2']
def linearize(self, params, unknowns, resids):
""" Derivative is 1.0"""
J = {}
J[('x3', 'x2')] = np.array([1.0])
return J
class TgtCompFMulti(Component):
# Some extra inputs that might trip things up.
def __init__(self):
super(TgtCompFMulti, self).__init__()
self.add_param('_x2', 100.0, units='degF')
self.add_param('x2', 100.0, units='degF')
self.add_param('x2_', 100.0, units='degF')
self.add_output('_x3', 100.0)
self.add_output('x3', 100.0)
self.add_output('x3_', 100.0)
def solve_nonlinear(self, params, unknowns, resids):
""" No action."""
unknowns['x3'] = params['x2']
def linearize(self, params, unknowns, resids):
""" Derivative is 1.0"""
J = {}
J[('_x3', 'x2')] = np.array([1.0])
J[('_x3', '_x2')] = 0.0
J[('_x3', 'x2_')] = 0.0
J[('x3', 'x2')] = np.array([1.0])
J[('x3', '_x2')] = 0.0
J[('x3', 'x2_')] = 0.0
J[('x3_', 'x2')] = np.array([1.0])
J[('x3_', '_x2')] = 0.0
J[('x3_', 'x2_')] = 0.0
return J
class TgtCompC(Component):
def __init__(self):
super(TgtCompC, self).__init__()
self.add_param('x2', 100.0, units='degC')
self.add_output('x3', 100.0)
def solve_nonlinear(self, params, unknowns, resids):
""" No action."""
unknowns['x3'] = params['x2']
def linearize(self, params, unknowns, resids):
""" Derivative is 1.0"""
J = {}
J[('x3', 'x2')] = np.array([1.0])
return J
class TgtCompK(Component):
def __init__(self):
super(TgtCompK, self).__init__()
self.add_param('x2', 100.0, units='degK')
self.add_output('x3', 100.0)
def solve_nonlinear(self, params, unknowns, resids):
""" No action."""
unknowns['x3'] = params['x2']
def linearize(self, params, unknowns, resids):
""" Derivative is 1.0"""
J = {}
J[('x3', 'x2')] = np.array([1.0])
return J
class TestUnitConversion(unittest.TestCase):
""" Testing automatic unit conversion."""
def test_basic(self):
prob = Problem()
prob.root = Group()
prob.root.add('src', SrcComp())
prob.root.add('tgtF', TgtCompF())
prob.root.add('tgtC', TgtCompC())
prob.root.add('tgtK', TgtCompK())
prob.root.add('px1', IndepVarComp('x1', 100.0), promotes=['x1'])
prob.root.connect('x1', 'src.x1')
prob.root.connect('src.x2', 'tgtF.x2')
prob.root.connect('src.x2', 'tgtC.x2')
prob.root.connect('src.x2', 'tgtK.x2')
prob.setup(check=False)
prob.run()
assert_rel_error(self, prob['src.x2'], 100.0, 1e-6)
assert_rel_error(self, prob['tgtF.x3'], 212.0, 1e-6)
assert_rel_error(self, prob['tgtC.x3'], 100.0, 1e-6)
assert_rel_error(self, prob['tgtK.x3'], 373.15, 1e-6)
# Make sure we don't convert equal units
self.assertEqual(prob.root.params.metadata('tgtC.x2').get('unit_conv'),
None)
indep_list = ['x1']
unknown_list = ['tgtF.x3', 'tgtC.x3', 'tgtK.x3']
J = prob.calc_gradient(indep_list, unknown_list, mode='fwd',
return_format='dict')
assert_rel_error(self, J['tgtF.x3']['x1'][0][0], 1.8, 1e-6)
assert_rel_error(self, J['tgtC.x3']['x1'][0][0], 1.0, 1e-6)
assert_rel_error(self, J['tgtK.x3']['x1'][0][0], 1.0, 1e-6)
J = prob.calc_gradient(indep_list, unknown_list, mode='rev',
return_format='dict')
assert_rel_error(self, J['tgtF.x3']['x1'][0][0], 1.8, 1e-6)
assert_rel_error(self, J['tgtC.x3']['x1'][0][0], 1.0, 1e-6)
assert_rel_error(self, J['tgtK.x3']['x1'][0][0], 1.0, 1e-6)
J = prob.calc_gradient(indep_list, unknown_list, mode='fd',
return_format='dict')
assert_rel_error(self, J['tgtF.x3']['x1'][0][0], 1.8, 1e-6)
assert_rel_error(self, J['tgtC.x3']['x1'][0][0], 1.0, 1e-6)
assert_rel_error(self, J['tgtK.x3']['x1'][0][0], 1.0, 1e-6)
# Need to clean up after FD gradient call, so just rerun.
prob.run()
# Make sure check partials handles conversion
data = prob.check_partial_derivatives(out_stream=None)
for key1, val1 in iteritems(data):
for key2, val2 in iteritems(val1):
assert_rel_error(self, val2['abs error'][0], 0.0, 1e-6)
assert_rel_error(self, val2['abs error'][1], 0.0, 1e-6)
assert_rel_error(self, val2['abs error'][2], 0.0, 1e-6)
assert_rel_error(self, val2['rel error'][0], 0.0, 1e-6)
assert_rel_error(self, val2['rel error'][1], 0.0, 1e-6)
assert_rel_error(self, val2['rel error'][2], 0.0, 1e-6)
stream = cStringIO()
conv = prob.root.list_unit_conv(stream=stream)
self.assertTrue((('src.x2', 'tgtF.x2'), ('degC', 'degF')) in conv)
self.assertTrue((('src.x2', 'tgtK.x2'), ('degC', 'degK')) in conv)
def test_list_unit_conversions_no_unit(self):
prob = Problem()
prob.root = Group()
prob.root.add('px1', IndepVarComp('x1', 100.0), promotes=['x1'])
prob.root.add('src', SrcComp())
prob.root.add('tgt', ExecComp('yy=xx', xx=0.0))
prob.root.connect('src.x2', 'tgt.xx')
prob.setup(check=False)
prob.run()
stream = cStringIO()
conv = prob.root.list_unit_conv(stream=stream)
self.assertTrue((('src.x2', 'tgt.xx'), ('degC', None)) in conv)
def test_basic_input_input(self):
prob = Problem()
prob.root = Group()
prob.root.add('src', SrcComp())
prob.root.add('tgtF', TgtCompF())
prob.root.add('tgtC', TgtCompC())
prob.root.add('tgtK', TgtCompK())
prob.root.add('px1', IndepVarComp('x1', 100.0), promotes=['x1'])
prob.root.connect('x1', 'src.x1')
prob.root.connect('src.x2', 'tgtC.x2')
prob.root.connect('tgtC.x2', 'tgtF.x2')
prob.root.connect('tgtC.x2', 'tgtK.x2')
prob.setup(check=False)
prob.run()
assert_rel_error(self, prob['src.x2'], 100.0, 1e-6)
assert_rel_error(self, prob['tgtF.x3'], 212.0, 1e-6)
assert_rel_error(self, prob['tgtC.x3'], 100.0, 1e-6)
assert_rel_error(self, prob['tgtK.x3'], 373.15, 1e-6)
# Make sure we don't convert equal units
self.assertEqual(prob.root.params.metadata('tgtC.x2').get('unit_conv'),
None)
indep_list = ['x1']
unknown_list = ['tgtF.x3', 'tgtC.x3', 'tgtK.x3']
J = prob.calc_gradient(indep_list, unknown_list, mode='fwd',
return_format='dict')
assert_rel_error(self, J['tgtF.x3']['x1'][0][0], 1.8, 1e-6)
assert_rel_error(self, J['tgtC.x3']['x1'][0][0], 1.0, 1e-6)
assert_rel_error(self, J['tgtK.x3']['x1'][0][0], 1.0, 1e-6)
J = prob.calc_gradient(indep_list, unknown_list, mode='rev',
return_format='dict')
assert_rel_error(self, J['tgtF.x3']['x1'][0][0], 1.8, 1e-6)
assert_rel_error(self, J['tgtC.x3']['x1'][0][0], 1.0, 1e-6)
assert_rel_error(self, J['tgtK.x3']['x1'][0][0], 1.0, 1e-6)
J = prob.calc_gradient(indep_list, unknown_list, mode='fd',
return_format='dict')
assert_rel_error(self, J['tgtF.x3']['x1'][0][0], 1.8, 1e-6)
assert_rel_error(self, J['tgtC.x3']['x1'][0][0], 1.0, 1e-6)
assert_rel_error(self, J['tgtK.x3']['x1'][0][0], 1.0, 1e-6)
def test_basic_implicit_conn(self):
prob = Problem()
prob.root = Group()
prob.root.add('src', SrcComp(), promotes=['x1', 'x2'])
prob.root.add('tgtF', TgtCompF(), promotes=['x2'])
prob.root.add('tgtC', TgtCompC(), promotes=['x2'])
prob.root.add('tgtK', TgtCompK(), promotes=['x2'])
prob.root.add('px1', IndepVarComp('x1', 100.0), promotes=['x1'])
prob.setup(check=False)
prob.run()
assert_rel_error(self, prob['x2'], 100.0, 1e-6)
assert_rel_error(self, prob['tgtF.x3'], 212.0, 1e-6)
assert_rel_error(self, prob['tgtC.x3'], 100.0, 1e-6)
assert_rel_error(self, prob['tgtK.x3'], 373.15, 1e-6)
# Make sure we don't convert equal units
self.assertEqual(prob.root.params.metadata('tgtC.x2').get('unit_conv'),
None)
indep_list = ['x1']
unknown_list = ['tgtF.x3', 'tgtC.x3', 'tgtK.x3']
J = prob.calc_gradient(indep_list, unknown_list, mode='fwd',
return_format='dict')
assert_rel_error(self, J['tgtF.x3']['x1'][0][0], 1.8, 1e-6)
assert_rel_error(self, J['tgtC.x3']['x1'][0][0], 1.0, 1e-6)
assert_rel_error(self, J['tgtK.x3']['x1'][0][0], 1.0, 1e-6)
J = prob.calc_gradient(indep_list, unknown_list, mode='rev',
return_format='dict')
assert_rel_error(self, J['tgtF.x3']['x1'][0][0], 1.8, 1e-6)
assert_rel_error(self, J['tgtC.x3']['x1'][0][0], 1.0, 1e-6)
assert_rel_error(self, J['tgtK.x3']['x1'][0][0], 1.0, 1e-6)
J = prob.calc_gradient(indep_list, unknown_list, mode='fd',
return_format='dict')
assert_rel_error(self, J['tgtF.x3']['x1'][0][0], 1.8, 1e-6)
assert_rel_error(self, J['tgtC.x3']['x1'][0][0], 1.0, 1e-6)
assert_rel_error(self, J['tgtK.x3']['x1'][0][0], 1.0, 1e-6)
def test_basic_grouped(self):
prob = Problem()
prob.root = Group()
sub1 = prob.root.add('sub1', Group())
sub2 = prob.root.add('sub2', Group())
sub1.add('src', SrcComp())
sub2.add('tgtF', TgtCompF())
sub2.add('tgtC', TgtCompC())
sub2.add('tgtK', TgtCompK())
prob.root.add('px1', IndepVarComp('x1', 100.0), promotes=['x1'])
prob.root.connect('x1', 'sub1.src.x1')
prob.root.connect('sub1.src.x2', 'sub2.tgtF.x2')
prob.root.connect('sub1.src.x2', 'sub2.tgtC.x2')
prob.root.connect('sub1.src.x2', 'sub2.tgtK.x2')
prob.setup(check=False)
prob.run()
assert_rel_error(self, prob['sub1.src.x2'], 100.0, 1e-6)
assert_rel_error(self, prob['sub2.tgtF.x3'], 212.0, 1e-6)
assert_rel_error(self, prob['sub2.tgtC.x3'], 100.0, 1e-6)
assert_rel_error(self, prob['sub2.tgtK.x3'], 373.15, 1e-6)
# Make sure we don't convert equal units
self.assertEqual(prob.root.sub2.params.metadata('tgtC.x2').get('unit_conv'),
None)
indep_list = ['x1']
unknown_list = ['sub2.tgtF.x3', 'sub2.tgtC.x3', 'sub2.tgtK.x3']
J = prob.calc_gradient(indep_list, unknown_list, mode='fwd',
return_format='dict')
assert_rel_error(self, J['sub2.tgtF.x3']['x1'][0][0], 1.8, 1e-6)
assert_rel_error(self, J['sub2.tgtC.x3']['x1'][0][0], 1.0, 1e-6)
assert_rel_error(self, J['sub2.tgtK.x3']['x1'][0][0], 1.0, 1e-6)
J = prob.calc_gradient(indep_list, unknown_list, mode='rev',
return_format='dict')
assert_rel_error(self, J['sub2.tgtF.x3']['x1'][0][0], 1.8, 1e-6)
assert_rel_error(self, J['sub2.tgtC.x3']['x1'][0][0], 1.0, 1e-6)
assert_rel_error(self, J['sub2.tgtK.x3']['x1'][0][0], 1.0, 1e-6)
J = prob.calc_gradient(indep_list, unknown_list, mode='fd',
return_format='dict')
assert_rel_error(self, J['sub2.tgtF.x3']['x1'][0][0], 1.8, 1e-6)
assert_rel_error(self, J['sub2.tgtC.x3']['x1'][0][0], 1.0, 1e-6)
assert_rel_error(self, J['sub2.tgtK.x3']['x1'][0][0], 1.0, 1e-6)
stream = cStringIO()
conv = prob.root.sub1.list_unit_conv(stream=stream)
self.assertTrue(len(conv) == 0)
def test_list_unit_connections_sub(self):
prob = Problem()
prob.root = Group()
sub1 = prob.root.add('sub1', Group())
sub2 = prob.root.add('sub2', Group())
sub1.add('src', SrcComp())
sub1.add('tgtF', TgtCompF())
sub2.add('tgtC', TgtCompC())
sub2.add('tgtK', TgtCompK())
prob.root.add('px1', IndepVarComp('x1', 100.0), promotes=['x1'])
prob.root.connect('x1', 'sub1.src.x1')
prob.root.connect('sub1.src.x2', 'sub1.tgtF.x2')
prob.root.connect('sub1.src.x2', 'sub2.tgtC.x2')
prob.root.connect('sub1.src.x2', 'sub2.tgtK.x2')
prob.setup(check=False)
prob.run()
stream = cStringIO()
conv = prob.root.sub1.list_unit_conv(stream=stream)
self.assertTrue((('src.x2', 'tgtF.x2'), ('degC', 'degF')) in conv)
def test_basic_grouped_bug_from_pycycle(self):
prob = Problem()
root = prob.root = Group()
sub1 = prob.root.add('sub1', Group(), promotes=['x2'])
sub1.add('src', SrcComp(), promotes = ['x2'])
root.add('tgtF', TgtCompFMulti())
root.add('tgtC', TgtCompC())
root.add('tgtK', TgtCompK())
prob.root.add('px1', IndepVarComp('x1', 100.0), promotes=['x1'])
prob.root.connect('x1', 'sub1.src.x1')
prob.root.connect('x2', 'tgtF.x2')
prob.root.connect('x2', 'tgtC.x2')
prob.root.connect('x2', 'tgtK.x2')
prob.setup(check=False)
prob.run()
assert_rel_error(self, prob['x2'], 100.0, 1e-6)
assert_rel_error(self, prob['tgtF.x3'], 212.0, 1e-6)
assert_rel_error(self, prob['tgtC.x3'], 100.0, 1e-6)
assert_rel_error(self, prob['tgtK.x3'], 373.15, 1e-6)
indep_list = ['x1']
unknown_list = ['tgtF.x3', 'tgtC.x3', 'tgtK.x3']
J = prob.calc_gradient(indep_list, unknown_list, mode='fwd',
return_format='dict')
assert_rel_error(self, J['tgtF.x3']['x1'][0][0], 1.8, 1e-6)
assert_rel_error(self, J['tgtC.x3']['x1'][0][0], 1.0, 1e-6)
assert_rel_error(self, J['tgtK.x3']['x1'][0][0], 1.0, 1e-6)
J = prob.calc_gradient(indep_list, unknown_list, mode='rev',
return_format='dict')
assert_rel_error(self, J['tgtF.x3']['x1'][0][0], 1.8, 1e-6)
assert_rel_error(self, J['tgtC.x3']['x1'][0][0], 1.0, 1e-6)
assert_rel_error(self, J['tgtK.x3']['x1'][0][0], 1.0, 1e-6)
J = prob.calc_gradient(indep_list, unknown_list, mode='fd',
return_format='dict')
assert_rel_error(self, J['tgtF.x3']['x1'][0][0], 1.8, 1e-6)
assert_rel_error(self, J['tgtC.x3']['x1'][0][0], 1.0, 1e-6)
assert_rel_error(self, J['tgtK.x3']['x1'][0][0], 1.0, 1e-6)
def test_basic_grouped_grouped_implicit(self):
prob = Problem()
root = prob.root = Group()
sub1 = prob.root.add('sub1', Group(), promotes=['x2'])
sub2 = prob.root.add('sub2', Group(), promotes=['x2'])
sub1.add('src', SrcComp(), promotes = ['x2'])
sub2.add('tgtF', TgtCompFMulti(), promotes=['x2'])
sub2.add('tgtC', TgtCompC(), promotes=['x2'])
sub2.add('tgtK', TgtCompK(), promotes=['x2'])
prob.root.add('px1', IndepVarComp('x1', 100.0), promotes=['x1'])
prob.root.connect('x1', 'sub1.src.x1')
prob.setup(check=False)
prob.run()
assert_rel_error(self, prob['x2'], 100.0, 1e-6)
assert_rel_error(self, prob['sub2.tgtF.x3'], 212.0, 1e-6)
assert_rel_error(self, prob['sub2.tgtC.x3'], 100.0, 1e-6)
assert_rel_error(self, prob['sub2.tgtK.x3'], 373.15, 1e-6)
indep_list = ['x1']
unknown_list = ['sub2.tgtF.x3', 'sub2.tgtC.x3', 'sub2.tgtK.x3']
J = prob.calc_gradient(indep_list, unknown_list, mode='fwd',
return_format='dict')
assert_rel_error(self, J['sub2.tgtF.x3']['x1'][0][0], 1.8, 1e-6)
assert_rel_error(self, J['sub2.tgtC.x3']['x1'][0][0], 1.0, 1e-6)
assert_rel_error(self, J['sub2.tgtK.x3']['x1'][0][0], 1.0, 1e-6)
J = prob.calc_gradient(indep_list, unknown_list, mode='rev',
return_format='dict')
assert_rel_error(self, J['sub2.tgtF.x3']['x1'][0][0], 1.8, 1e-6)
assert_rel_error(self, J['sub2.tgtC.x3']['x1'][0][0], 1.0, 1e-6)
assert_rel_error(self, J['sub2.tgtK.x3']['x1'][0][0], 1.0, 1e-6)
J = prob.calc_gradient(indep_list, unknown_list, mode='fd',
return_format='dict')
assert_rel_error(self, J['sub2.tgtF.x3']['x1'][0][0], 1.8, 1e-6)
assert_rel_error(self, J['sub2.tgtC.x3']['x1'][0][0], 1.0, 1e-6)
assert_rel_error(self, J['sub2.tgtK.x3']['x1'][0][0], 1.0, 1e-6)
def test_apply_linear_adjoint(self):
# Make sure we can index into dparams
class Attitude_Angular(Component):
""" Calculates angular velocity vector from the satellite's orientation
matrix and its derivative.
"""
def __init__(self, n=2):
super(Attitude_Angular, self).__init__()
self.n = n
# Inputs
self.add_param('O_BI', np.zeros((3, 3, n)), units="ft",
desc="Rotation matrix from body-fixed frame to Earth-centered "
"inertial frame over time")
self.add_param('Odot_BI', np.zeros((3, 3, n)), units="km",
desc="First derivative of O_BI over time")
# Outputs
self.add_output('w_B', np.zeros((3, n)), units="1/s",
desc="Angular velocity vector in body-fixed frame over time")
self.dw_dOdot = np.zeros((n, 3, 3, 3))
self.dw_dO = np.zeros((n, 3, 3, 3))
def solve_nonlinear(self, params, unknowns, resids):
""" Calculate output. """
O_BI = params['O_BI']
Odot_BI = params['Odot_BI']
w_B = unknowns['w_B']
for i in range(0, self.n):
w_B[0, i] = np.dot(Odot_BI[2, :, i], O_BI[1, :, i])
w_B[1, i] = np.dot(Odot_BI[0, :, i], O_BI[2, :, i])
w_B[2, i] = np.dot(Odot_BI[1, :, i], O_BI[0, :, i])
def linearize(self, params, unknowns, resids):
""" Calculate and save derivatives. (i.e., Jacobian) """
O_BI = params['O_BI']
Odot_BI = params['Odot_BI']
for i in range(0, self.n):
self.dw_dOdot[i, 0, 2, :] = O_BI[1, :, i]
self.dw_dO[i, 0, 1, :] = Odot_BI[2, :, i]
self.dw_dOdot[i, 1, 0, :] = O_BI[2, :, i]
self.dw_dO[i, 1, 2, :] = Odot_BI[0, :, i]
self.dw_dOdot[i, 2, 1, :] = O_BI[0, :, i]
self.dw_dO[i, 2, 0, :] = Odot_BI[1, :, i]
def apply_linear(self, params, unknowns, dparams, dunknowns, dresids, mode):
""" Matrix-vector product with the Jacobian. """
dw_B = dresids['w_B']
if mode == 'fwd':
for k in range(3):
for i in range(3):
for j in range(3):
if 'O_BI' in dparams:
dw_B[k, :] += self.dw_dO[:, k, i, j] * \
dparams['O_BI'][i, j, :]
if 'Odot_BI' in dparams:
dw_B[k, :] += self.dw_dOdot[:, k, i, j] * \
dparams['Odot_BI'][i, j, :]
else:
for k in range(3):
for i in range(3):
for j in range(3):
if 'O_BI' in dparams:
dparams['O_BI'][i, j, :] += self.dw_dO[:, k, i, j] * \
dw_B[k, :]
if 'Odot_BI' in dparams:
dparams['Odot_BI'][i, j, :] -= -self.dw_dOdot[:, k, i, j] * \
dw_B[k, :]
prob = Problem()
root = prob.root = Group()
prob.root.add('comp', Attitude_Angular(n=5), promotes=['*'])
prob.root.add('p1', IndepVarComp('O_BI', np.ones((3, 3, 5))), promotes=['*'])
prob.root.add('p2', IndepVarComp('Odot_BI', np.ones((3, 3, 5))), promotes=['*'])
prob.setup(check=False)
prob.run()
indep_list = ['O_BI', 'Odot_BI']
unknown_list = ['w_B']
Jf = prob.calc_gradient(indep_list, unknown_list, mode='fwd',
return_format='dict')
indep_list = ['O_BI', 'Odot_BI']
unknown_list = ['w_B']
Jr = prob.calc_gradient(indep_list, unknown_list, mode='rev',
return_format='dict')
for key, val in iteritems(Jr):
for key2 in val:
diff = abs(Jf[key][key2] - Jr[key][key2])
assert_rel_error(self, diff, 0.0, 1e-10)
def test_incompatible_connections(self):
class BadComp(Component):
def __init__(self):
super(BadComp, self).__init__()
self.add_param('x2', 100.0, units='m')
self.add_output('x3', 100.0)
# Explicit Connection
prob = Problem()
prob.root = Group()
prob.root.add('src', SrcComp())
prob.root.add('dest', BadComp())
prob.root.connect('src.x2', 'dest.x2')
with self.assertRaises(Exception) as cm:
prob.setup(check=False)
expected_msg = "Unit 'degC' in source 'src.x2' is incompatible with unit 'm' in target 'dest.x2'."
self.assertTrue(expected_msg in str(cm.exception))
# Implicit Connection
prob = Problem()
prob.root = Group()
prob.root.add('src', SrcComp(), promotes=['x2'])
prob.root.add('dest', BadComp(),promotes=['x2'])
with self.assertRaises(Exception) as cm:
prob.setup(check=False)
expected_msg = "Unit 'degC' in source 'src.x2' (x2) is incompatible with unit 'm' in target 'dest.x2' (x2)."
self.assertTrue(expected_msg in str(cm.exception))
class PBOSrcComp(Component):
def __init__(self):
super(PBOSrcComp, self).__init__()
self.add_param('x1', 100.0)
self.add_output('x2', 100.0, units='degC', pass_by_obj=True)
self.deriv_options['type'] = 'fd'
def solve_nonlinear(self, params, unknowns, resids):
""" No action."""
unknowns['x2'] = params['x1']
class PBOTgtCompF(Component):
def __init__(self):
super(PBOTgtCompF, self).__init__()
self.add_param('x2', 100.0, units='degF', pass_by_obj=True)
self.add_output('x3', 100.0)
self.deriv_options['type'] = 'fd'
def solve_nonlinear(self, params, unknowns, resids):
""" No action."""
unknowns['x3'] = params['x2']
class TestUnitConversionPBO(unittest.TestCase):
""" Tests support for unit conversions on pass_by_obj connections."""
def test_basic(self):
prob = Problem()
prob.root = Group()
prob.root.add('src', PBOSrcComp())
prob.root.add('tgtF', PBOTgtCompF())
prob.root.add('px1', IndepVarComp('x1', 100.0), promotes=['x1'])
prob.root.connect('x1', 'src.x1')
prob.root.connect('src.x2', 'tgtF.x2')
prob.root.deriv_options['type'] = 'fd'
prob.setup(check=False)
prob.run()
assert_rel_error(self, prob['src.x2'], 100.0, 1e-6)
assert_rel_error(self, prob['tgtF.x3'], 212.0, 1e-6)
indep_list = ['x1']
unknown_list = ['tgtF.x3']
J = prob.calc_gradient(indep_list, unknown_list, mode='fwd',
return_format='dict')
assert_rel_error(self, J['tgtF.x3']['x1'][0][0], 1.8, 1e-6)
stream = cStringIO()
conv = prob.root.list_unit_conv(stream=stream)
self.assertTrue((('src.x2', 'tgtF.x2'), ('degC', 'degF')) in conv)
def test_radian_bug(self):
class Src(Component):
def __init__(self):
super(Src, self).__init__()
self.add_output('x1', 180.0, units='deg')
self.add_output('x2', np.pi, units='rad')
self.add_output('x3', 2.0, units='m')
self.deriv_options['type'] = 'fd'
def solve_nonlinear(self, params, unknowns, resids):
""" No action."""
pass
class Tgt(Component):
def __init__(self):
super(Tgt, self).__init__()
self.add_param('x1', 0.0, units='rad')
self.add_param('x2', 0.0, units='deg')
self.add_param('x3', 0.0, units='ft')
self.deriv_options['type'] = 'fd'
def solve_nonlinear(self, params, unknowns, resids):
""" No action."""
pass
top = Problem()
root = top.root = Group()
root.add('src', Src())
root.add('tgt', Tgt())
root.connect('src.x1', 'tgt.x1')
root.connect('src.x2', 'tgt.x2')
root.connect('src.x3', 'tgt.x3')
top.setup(check=False)
top.run()
assert_rel_error(self, top['tgt.x1'], np.pi, 1e-6)
assert_rel_error(self, top['tgt.x2'], 180.0, 1e-6)
assert_rel_error(self, top['tgt.x3'], 2.0/0.3048, 1e-6)
if __name__ == "__main__":
unittest.main()
| 36.858516
| 116
| 0.527634
|
ec6e246da714c454427400b1e1ed447318783ad5
| 4,116
|
py
|
Python
|
benchmark/startQiskit_noisy2464.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_noisy2464.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_noisy2464.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=4
# total number=37
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=31
prog.cz(input_qubit[0],input_qubit[3]) # number=32
prog.h(input_qubit[3]) # number=33
prog.h(input_qubit[3]) # number=30
prog.x(input_qubit[3]) # number=11
prog.h(input_qubit[3]) # number=13
prog.cz(input_qubit[0],input_qubit[3]) # number=14
prog.h(input_qubit[1]) # number=18
prog.cz(input_qubit[3],input_qubit[1]) # number=19
prog.z(input_qubit[3]) # number=25
prog.h(input_qubit[1]) # number=20
prog.rx(-3.141592653589793,input_qubit[3]) # number=26
prog.h(input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[2]) # number=17
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.h(input_qubit[0]) # number=27
prog.cz(input_qubit[1],input_qubit[0]) # number=28
prog.h(input_qubit[0]) # number=29
prog.cx(input_qubit[1],input_qubit[0]) # number=22
prog.cx(input_qubit[0],input_qubit[1]) # number=34
prog.x(input_qubit[1]) # number=35
prog.cx(input_qubit[0],input_qubit[1]) # number=36
prog.x(input_qubit[1]) # number=24
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy2464.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 34.3
| 140
| 0.652818
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.