hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aa2d658bf6eecc23e7845825a913f9c0632d171d
| 50,811
|
py
|
Python
|
qiskit_metal/qlibrary/core/base.py
|
TomVethaak/qiskit-metal
|
0fd3049b16a2b28dc6890b696d67329a91da70b9
|
[
"Apache-2.0"
] | null | null | null |
qiskit_metal/qlibrary/core/base.py
|
TomVethaak/qiskit-metal
|
0fd3049b16a2b28dc6890b696d67329a91da70b9
|
[
"Apache-2.0"
] | null | null | null |
qiskit_metal/qlibrary/core/base.py
|
TomVethaak/qiskit-metal
|
0fd3049b16a2b28dc6890b696d67329a91da70b9
|
[
"Apache-2.0"
] | 1
|
2022-01-26T06:21:27.000Z
|
2022-01-26T06:21:27.000Z
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""This is the main module that defines what a QComponent is in Qiskit Metal.
To see the docstring of QComponent in Jupyter notebook, use:
>> ?QComponent
"""
# pylint: disable=too-many-lines, too-many-public-methods
import logging
import inspect
import random
from copy import deepcopy
from typing import TYPE_CHECKING, Any, Iterable, List, Union, Tuple, Dict as Dict_
from datetime import datetime
import pandas as pd
import numpy as np
import pprint
from inspect import signature
from qiskit_metal import draw
from qiskit_metal import is_design, logger
import qiskit_metal.qlibrary as qlibrary
from qiskit_metal import config
from qiskit_metal.draw import BaseGeometry
from qiskit_metal.toolbox_python.attr_dict import Dict
from qiskit_metal.toolbox_python.display import format_dict_ala_z
from qiskit_metal.qlibrary.core._parsed_dynamic_attrs import ParsedDynamicAttributes_Component
if not config.is_building_docs():
from ...draw import Vector
__all__ = ['QComponent']
if TYPE_CHECKING:
# For linting typechecking, import modules that can't be loaded here under normal conditions.
# For example, I can't import QDesign, because it requires QComponent first. We have the
# chicken and egg issue.
from ...designs import QDesign
import matplotlib
class QComponent():
"""`QComponent` is the core class for all Metal components and is the
central construct from which all components in Metal are derived.
The class defines the user interface for working with components.
For front-end user:
* Manipulates the dictionary of options (stored as string-string key-value
pairs) to change the geometry and properties of the component.
* The options of the class are stored in an options dictionary. These
include the geometric sizes, such as width='10um' or height='5mm', etc.
* The `make` function parses these strings and implements the logic required
to transform the dictionary of options (stored as strings) into shapes
with associated properties.
For creator user:
* The creator user implements the `make` function (see above)
* The class define the internal representation of a components
* The class provides the interfaces for the component (creator user)
Default Options:
* pos_x/_y: '0.0um' -- The x/y position of the center of the QComponent.
* orientation: '0.0' -- The primary direction in degrees of the QComponent.
Expressed counter-clockwise orientation.
* chip: 'main' -- Chip holding the QComponent.
* layer: '1' -- Manufacturing layer used for the QComponent.
Nested default options can be overwritten with the update function.
The following code demonstrates how the update works.
.. code-block:: python
:linenos:
from qiskit_metal import Dict
default = Dict(
a=1,
b=2,
c=Dict(
d=3,
e=4,
f=Dict(
g=6,
h=7
)
)
)
overwrite = Dict(
a=10,
b=20,
c=Dict(
d=30,
f=Dict(
h=70
)
),
z=33
)
default.update(overwrite)
default
>> {'a': 10, 'b': 20, 'c': {'d': 30, 'e': 4, 'f': {'g': 6, 'h': 70}}, 'z': 33}
"""
# pylint: disable=too-many-instance-attributes
default_options = Dict(pos_x='0.0um',
pos_y='0.0um',
orientation='0.0',
chip='main',
layer='1')
"""Default drawing options"""
component_metadata = Dict()
"""Component metadata"""
TOOLTIP = """QComponent"""
options = {}
"""A dictionary of the component-designer-defined options.
These options are used in the make function to create the QGeometry and QPins.
All options should have string keys and preferrable string values.
"""
# Dummy private attribute used to check if an instantiated object is
# indeed a QComponent class. The problem is that the `isinstance`
# built-in method fails when this module is reloaded.
# Used by `is_component` to check.
__i_am_component__ = True
def __init__(self,
design: 'QDesign',
name: str = None,
options: Dict = None,
make=True,
component_template: Dict = None) -> Union[None, str]:
"""Create a new Metal component and adds it's default_options to the
design.
Args:
design (QDesign): The parent design.
name (str): Name of the component. Auto-named if possible.
options (dict): User options that will override the defaults. Defaults to None.
make (bool): True if the make function should be called at the end of the init.
Options be used in the make function to create the geometry. Defaults to True.
component_template (dict): User can overwrite the template options for the component
that will be stored in the design, in design.template,
and used every time a new component is instantiated. Defaults to None.
Raises:
ValueError: User supplied design isn't a QDesign
Note: Information copied from QDesign class.
self._design.overwrite_enabled (bool):
When True - If the string name, used for component, already
exists in the design, the existing component will be
deleted from design, and new component will be generated
with the same name and newly generated component_id,
and then added to design.
When False - If the string name, used for component, already
exists in the design, the existing component will be
kept in the design, and current component will not be generated,
nor will be added to the design. The variable design.self.status
will still be NotBuilt, as opposed to Initialization Successful.
Either True or False - If string name, used for component, is NOT
being used in the design, a component will be generated and
added to design using the name.
"""
# Make the id be None, which means it hasn't been added to design yet.
self._id = None
self._made = False
self._component_template = component_template
# Status: used to handle building of a component and checking if it succeeded or failed.
self.status = 'Not Built'
if not is_design(design):
raise ValueError(
"Error you did not pass in a valid Metal QDesign object as a '\
'parent of this QComponent.")
self._design = design # reference to parent
# pylint: disable=literal-comparison
if self._delete_evaluation(name) == 'NameInUse':
raise ValueError(
f"{name} already exists! Please choose a different name for your new QComponent"
)
self._name = name
self._class_name = self._get_unique_class_name() # Full class name
#: A dictionary of the component-designer-defined options.
#: These options are used in the make function to create the QGeometry and QPins.
#: All options should have string keys and preferrable string values.
self.options = self.get_template_options(
design=design, component_template=component_template)
if options:
self.options.update(options)
# Parser for options
self.p = ParsedDynamicAttributes_Component(self)
# Should put this earlier so could pass in other error messages?
self._error_message = ''
if self._check_pin_inputs():
self.logger.warning(self._error_message)
return
# Build and component internals
#: Dictionary of pins. Populated by component designer in make function using `add_pin`.
self.pins = Dict()
#: Metadata allows a designer to store extra information or analysis results.
self.metadata = Dict()
# Add the component to the parent design
self._id = self.design._get_new_qcomponent_id() # Create the unique id
self._add_to_design() # Do this after the pin checking?
#: Stores the latest status of the component. Values include:
#: ``Initialization Successful``, ``Build Failed``, etc.
self.status = 'Initialization Successful'
# Used for short name, and renderers adding information to tables.
self.a_metadata = self._gather_all_children_metadata()
# Auto naming - add id to component based on type
if name is None:
prefix = self.a_metadata
# limit names to 24 characters
name_trunc = 24
# if no prefix, use class name
if "short_name" not in prefix:
short_name = self.__class__.__name__[:name_trunc]
else:
short_name = prefix['short_name'][:name_trunc]
name_id = self.design._get_new_qcomponent_name_id(short_name)
# rename loop to make sure that no components manually named by the user conflicts
while self.design.rename_component(
self._id, short_name + "_" + str(name_id)) != 1:
name_id = self.design._get_new_qcomponent_name_id(short_name)
# Add keys for each type of table. add_qgeometry() will update bool if the table is used.
self.qgeometry_table_usage = Dict()
self.populate_to_track_table_usage()
# Make the component geometry
if make:
self.rebuild()
@classmethod
def _gather_all_children_options(cls) -> dict:
"""From the QComponent core class, traverse the child classes to
gather the `default_options` for each child class.
Collects the options starting with the basecomponent,
and stepping through the children.
Each child adds it's options to the base options. If the
key is the same, the option of the youngest child is used.
Note: if keys are the same for child and grandchild,
grandchild will overwrite child
Init method.
Returns:
dict: options from all children
"""
options_from_children = {}
parents = inspect.getmro(cls)
# len-2: generic "object" does not have default_options.
for child in parents[len(parents) - 2::-1]:
# The template default options are in a class dict attribute `default_options`.
if hasattr(child, 'default_options'):
options_from_children = {
**options_from_children,
**child.default_options
}
if qlibrary.core.qroute.QRoute in parents:
options_from_children.pop("pos_x", None)
options_from_children.pop("pos_y", None)
options_from_children.pop("orientation", None)
return options_from_children
@classmethod
def _gather_all_children_metadata(cls) -> dict:
"""From the QComponent core class, traverse the child classes to
gather the component_metadata for each child class.
Note: if keys are the same for child and grandchild, grandchild will overwrite child
Init method.
Returns:
dict: Metadata from all children.
"""
metadata_from_children = {}
parents = inspect.getmro(cls)
# Base.py is not expected to have component_metadata dict to add to design class.
for child in parents[len(parents) - 2::-1]:
# There is a developer agreement so the defaults will be in dict named component_metadata.
if hasattr(child, 'component_metadata'):
metadata_from_children = {
**metadata_from_children,
**child.component_metadata
}
return metadata_from_children
@classmethod
def _get_unique_class_name(cls) -> str:
"""Returns unique class name based on the module.
Returns:
str: Example: 'qiskit_metal.qlibrary.qubits.transmon_pocket.TransmonPocket'
"""
return f'{cls.__module__}.{cls.__name__}'
@classmethod
def _register_class_with_design(cls, design: 'QDesign', template_key: str,
component_template: Dict):
"""Init function to register a component class with the design when
first instantiated. Registers the design template options.
Args:
design (QDesign): The parent design
template_key (str): Key to use
component_template (dict): Template of components to copy, with renderer options
"""
# do not overwrite
if template_key not in design.template_options:
# if not component_template:
# component_template = cls._gather_all_children_options()
children_options = cls._gather_all_children_options()
options_template_renderer = {
**children_options,
**component_template
}
# design.template_options[template_key] = deepcopy(
# component_template)
design.template_options[template_key] = deepcopy(
options_template_renderer)
@property
def name(self) -> str:
"""Name of the component."""
return self._name
@name.setter
def name(self, new_name: str):
"""Rename the component. Change the design dictionaries as well. handle
components. Delete and remake.
Returns:
bool: True is successful, otherwise failure code
"""
return_code = self.design.rename_component(self.id, new_name)
if return_code is not True:
logger.warning(
f'In design_base.name, the new_name={new_name} was not set. ')
return return_code
@property
def design(self) -> 'QDesign':
"""Return a reference to the parent design object.
Returns:
QDesign: design
"""
return self._design
@property
def class_name(self) -> str:
"""Return the full name of the class: the full module name with the
class name. e.g., qiskit_metal.qlibrary.qubits.TransmonPocket.
Returns:
str: Class name
"""
return self._class_name
@property
def logger(self) -> logging.Logger:
"""The Qiskit Metal Logger.
Returns:
logging.Logger: Logger
"""
return self._design.logger
@property
def pin_names(self) -> set:
"""The names of the pins.
Returns:
set: Set of pin names
"""
return set(self.pins.keys())
@property
# pylint: disable=invalid-name
def id(self) -> int:
"""The unique id of component within a design.
Returns:
int: Component id
"""
return self._id
def _add_to_design(self):
"""Add self to design objects dictionary.
Method will obtain an unique id for the component within a
design, THEN add itself to design.
"""
# pylint: disable=protected-access
self.design._components[self.id] = self
self.design.name_to_id[self.name] = self._id
@classmethod
def get_template_options(cls,
design: 'QDesign',
component_template: Dict = None,
logger_: logging.Logger = None,
template_key: str = None) -> Dict:
"""Creates template options for the Metal Component class required for
the class to function, based on the design template; i.e., be created,
made, and rendered. Provides the blank option structure required.
The options can be extended by plugins, such as renderers.
Args:
design (QDesign): Design class. Should be the class, not the instance.
component_template (Dict): Template options to overwrite the class ones (default: None)
logger_ (logging.Logger): A logger for errors. Defaults to None.
template_key (str): The template key identifier. If None, then uses
cls._get_unique_class_name(). Defaults to None.
Returns:
Dict: dictionary of default options based on design template.
"""
# get key for templates
if template_key is None:
template_key = cls._get_unique_class_name()
renderer_key_values = cls._get_table_values_from_renderers(design)
# Think
if component_template is not None:
renderer_and_component_template = {
**renderer_key_values,
**component_template
}
else:
renderer_and_component_template = renderer_key_values
if template_key not in design.template_options:
cls._register_class_with_design(design, template_key,
renderer_and_component_template)
if template_key not in design.template_options:
logger_ = logger_ or design.logger
if logger_:
logger_.error(
f'ERROR in the creating component {cls.__name__}!\nThe default '
f'options for the component class {cls.__name__} are missing'
)
# Specific object template options
template_options = deepcopy(Dict(design.template_options[template_key]))
return template_options
def _delete_evaluation(self, check_name: str = None):
"""When design.overwrite_enabled, the user is allowed to delete an
existing component within the design if the name is being used.
Args:
check_name (str, optional): Name of new component. Defaults to None.
Returns:
string: Return 'NameInUse' if overwrite flag is False and
check_name is already being used within design.
Otherwise return None.
"""
answer = self._is_name_used(check_name)
if self._design.overwrite_enabled and answer:
self._design.delete_component(check_name)
elif answer:
logger.warning(
f'The QComponent name `{check_name}` is already in use, '
f'by a component (with QComponent id={answer}).\n'
f'QComponent NOT made, nor added to the design. \n'
'To force overwrite a QComponent with an existing name '
'use the flag:\n`design.overwrite_enabled = True`.')
return 'NameInUse'
return None
def make(self):
"""The make function implements the logic that creates the geometry
(poly, path, etc.) from the qcomponent.options dictionary of
parameters, and the adds them to the design, using
qcomponent.add_qgeometry(...), adding in extra needed information, such
as layer, subtract, etc.
Use the qiskit_metal.draw module to create the geometry.
**Note:**
* This method should be overwritten by the children make function.
* This function only contains the logic, the actual call to make the element is in
rebuild()
Raises:
NotImplementedError: Overwrite this function by subclassing.
"""
raise NotImplementedError()
def to_script(self,
thin: bool = False,
is_part_of_chip: bool = False) -> Tuple:
"""
Args:
thin: If true then any key in the QComponent's options whose value
is the same value as the default will not be included in the body
is_part_of_chip: If true, body will not include header code
Returns: Code that if copy-pasted into a .py file would generate
an instance of this class with the same properties as the instance calling
this function
"""
def is_default_options(k):
""" Returns true if option's key value is the same as the default value """
temp_option = self.get_template_options(self.design)
def_options = self.default_options
if (k in def_options and def_options[k] == self.options[k]):
return True
if (k in temp_option and temp_option[k] == self.options[k]):
return True
return False
module = self._get_unique_class_name()
cls = '.'.join(module.split('.')[:-1])
obj_name = module.split('.')[-1]
### constructing imports ###
#header
if not is_part_of_chip:
header = """
from qiskit_metal import designs, MetalGUI
design = designs.DesignPlanar()
gui = MetalGUI(design)
"""
else:
header = ""
# component import
comp_import = f"""from {cls} import {obj_name}"""
full_import = header + comp_import
### constructing qcomponent instantiation ###
## setting up options
if thin:
body_options = {}
for k in self.options:
if not is_default_options(k):
body_options[k] = self.options[k]
else:
body_options = self.options
if len(body_options) < 1:
str_options = ""
else:
pp = pprint.PrettyPrinter(width=41, compact=False)
str_options = f"""options={pp.pformat(body_options)}"""
## setting up component-specific args
# get init from child?
to_ignore = {
'self', 'name', 'design', 'make', 'kwargs', 'options', 'args'
}
class_signature = signature(self.__class__.__init__)
failed = set()
params = dict()
str_params = ""
for _, param in class_signature.parameters.items():
if not param.name in to_ignore:
param_name = param.name
if param_name in self.__dict__:
param_name = param.name
param_val = self.__dict__[param.name]
if type(param_val) is str:
param_val = f"'{param_val}'"
params[param_name] = param_val
elif '_' + param_name in self.__dict__:
priv_param_name = '_' + param_name
param_val = self.__dict__[priv_param_name]
if type(param_val) is str:
param_val = f"'{param_val}'"
params[param_name] = param_val
else:
failed.add(param_name)
for k, v in params.items():
str_params += f"""
{k}={v},"""
str_failed = ""
if len(failed) > 0:
str_failed += """
# WARNING"""
for k in failed:
str_failed += f"""
#{k} failed to have a value"""
## setting up metadata
if len(self.metadata) > 1:
str_meta_d = f"""
{self.name}.meta = {self.metadata}
"""
else:
str_meta_d = ""
## cleaning up
strname = self.name
if not strname.isidentifier():
if "-" in strname:
strname = strname.replace("-", "")
if not strname.isidentifier():
strname = cls + str(random.randint(1000))
other_args = ""
if str_options != "":
other_args += """,
""" + str_options
if str_params != "":
other_args += """,
""" + str_params
## setting up instantiation
body = f"""
{str_failed}
{strname} = {obj_name}(
design,
name='{strname}'{other_args}
)
{str_meta_d}
"""
return full_import, body
def rebuild(self):
"""Builds the QComponent.
This is the main action function of a
QComponent, call it qc. It converts the qc.options into QGeometry with
all of the required options, such as the geometry points, layer number,
materials, etc. needed to render.
The build clears the existing QGeometry and QPins and then calls the qc.make function,
which is written by the component developer to implement the logic (using the metal.
draw module) to convert the qc.options into the QGeometry.
*Build status:*
The function also sets the build status of the component.
It sets to `failed` when the component is created, and then it sets to `good` when it is
done with no errors. The user can also set other statuses, which can appear if the code
fails to reach the final line of the build, where the build status is set to `good`.
Raises:
Exception: Component build failure
"""
self.status = 'failed'
try:
if self._made: # already made, just remaking
self.design.qgeometry.delete_component_id(self.id)
# pylint: disable=protected-access
self.design._delete_all_pins_for_component(self.id)
self.make()
self._made = True
self.status = 'good'
self.design.build_logs.add_success(
f"{str(datetime.now())} -- Component: {self.name} successfully built"
)
except Exception as error:
self.logger.error(
f'ERROR in building component name={self.name}, error={error}')
self.design.build_logs.add_error(
f"{str(datetime.now())} -- Component: {self.name} failed with error\n: {error}"
)
raise error
def delete(self):
"""Delete the QComponent.
Removes QGeometry, QPins, etc. from the design.
"""
self.design.delete_component(self.name)
# Maybe still should be fine as any values will be in component options still?
# Though the data table approach and rendering directly via shapely could lead to problem
# with variable use
def parse_value(
self, value: Union[Any, List, Dict, Iterable]
) -> Union[Any, List, Dict, Iterable]:
"""Parse a string, mappable (dict, Dict), iterable (list, tuple) to
account for units conversion, some basic arithmetic, and design
variables. This is the main parsing function of Qiskit Metal.
Args:
value (str): String to parse *or*
variable_dict (dict): dict pointer of variables
Return:
str, float, list, tuple, or ast eval: Parse value
Handled Inputs:
Strings:
Strings of numbers, numbers with units; e.g., '1', '1nm', '1 um'
Converts to int or float.
Some basic arithmetic is possible, see below.
Strings of variables 'variable1'.
Variable interpretation will use string method
isidentifier 'variable1'.isidentifier()`
Dictionaries:
Returns ordered `Dict` with same key-value mappings, where the values have
been subjected to parse_value.
Itterables(list, tuple, ...):
Returns same kind and calls itself `parse_value` on each element.
Numbers:
Returns the number as is. Int to int, etc.
Arithemetic:
Some basic arithmetic can be handled as well, such as `'-2 * 1e5 nm'`
will yield float(-0.2) when the default units are set to `mm`.
Default units:
User units can be set in the design. The design will set config.DEFAULT.units
Examples:
See the docstring for this module.
>> ?qiskit_metal.toolbox_metal.parsing
"""
return self.design.parse_value(value)
def parse_options(self, options: Dict = None) -> Dict:
"""Parse the options, converting string into interpreted values. Parses
units, variables, strings, lists, and dictionaries. Explained by
example below.
Args:
options (dict) : If left None, then self.options is used. Defaults to None.
Returns:
dict: Parsed value
Calls `self.design.parse_options`.
See `self.parse_value` for more information.
"""
return self.design.parse_value(options if options else self.options)
def _is_name_used(self, check_name: str) -> int:
"""Used to check if name of component already exists.
Args:
check_name (str): Name which user requested to apply to current component.
Returns:
int: 0 if does not exist, otherwise
component-id of component which is already using the name.
Warning: If user has used this text version of the component name already,
warning will be given to user.
"""
if check_name in self._design.name_to_id:
component_id = self._design.name_to_id[check_name]
# if not self._overwrite_flag:
# logger.warning(f"Called _is_name_used, "
# f"component_id({check_name}, id={component_id})"
# " is already being used in design.")
return component_id
return 0
####################################################################################
# Functions for handling of pins
def add_pin(
self,
name: str, # Should be static based on component designer's choice
points: np.ndarray,
width: float,
input_as_norm: bool = False,
chip: str = None,
gap: float = None): # gap defaults to 0.6 * width
"""Adds a pin from two points which are normal/tangent to the intended
plane of the pin. The normal should 'point' in the direction of
intended connection. Adds the new pin as a subdictionary to parent
component's pins dictionary.
Args:
* name (str): name of the pin
* points (numpy.ndarray): [[x1,y1],[x2,y2]] for the normal/tangent line
* width (float): the width of the intended connection (eg. qubit bus pad arm)
* input_as_norm (bool): Indicates if the points are tangent or normal to the pin plane.
Defaults to False.. Make True for normal.
* parent (Union[int,]): The id of the parent component.
* chip (str): the name of the chip the pin is located on. Defaults to None, which is
converted to self.options.chip.
* gap (float): the dielectric gap of the pin for the purpose of representing as a port
for simulations. Defaults to None which is converted to 0.6 * width.
Dictionary containing pins information:
* points (numpy.ndarray) - two (x,y) points which represent the edge of the pin for
another component to attach to (eg. the edge of a CPW TL)
* middle (numpy.ndarray) - an (x,y) which represents the middle of the points above,
where the pin is represented.
* normal (numpy.ndarray) - the normal vector of the pin, pointing in direction of
intended connection
* tangent (numpy.ndarray) - 90 degree rotation of normal vector
* width (float) - the width of the pin
* chip (str) - the chip the pin is on
* parent_name - the id of the parent component
* net_id - net_id of the pin if connected to another pin. Defaults to 0, indicates
not connected))
::
* = pin
. = outline of component
---> = the list being passed in as 'points' [[x1,y1],[x2,y2]]
normal vector
::
..........
.
--------->*
.
..........
tangent vector
::
..........^
.|
.*
.|
..........|
"""
assert len(points) == 2
if gap is None:
gap = width * 0.6
if chip is None:
chip = self.options.chip
rounding_val = self.design.template_options['PRECISION']
points = np.around(
points, rounding_val) #Need points to remain as shapely geom?
if input_as_norm:
middle_point = points[1]
vec_normal = points[1] - points[0]
vec_normal /= np.linalg.norm(vec_normal)
s_point = np.round(Vector.rotate(
vec_normal, (np.pi / 2))) * width / 2 + points[1]
e_point = np.round(Vector.rotate(
vec_normal, -(np.pi / 2))) * width / 2 + points[1]
points = [s_point, e_point]
tangent_vector = Vector.rotate(vec_normal, np.pi / 2)
else:
vec_dist, tangent_vector, vec_normal = draw.Vector.two_points_described(
points)
middle_point = np.sum(points, axis=0) / 2.
width = np.linalg.norm(vec_dist)
pin_dict = Dict(
points=points,
middle=np.around(middle_point, rounding_val),
normal=np.around(vec_normal, rounding_val),
tangent=np.around(tangent_vector, rounding_val),
width=np.around(width, rounding_val),
gap=np.around(gap, rounding_val),
chip=chip,
parent_name=self.id,
net_id=0,
# Place holder value for potential future property (auto-routing cpw with
# length limit)
length=0)
self.pins[name] = pin_dict
def get_pin(self, name: str) -> Dict:
"""Interface for components to get pin data.
Args:
name (str): Name of the desired pin.
Returns:
dict: Returns the data of the pin, see make_pin() for what those values are.
"""
return self.pins[name]
def _check_pin_inputs(self):
"""Checks that the pin_inputs are valid, sets an error message
indicating what the error is if the inputs are not valid. Checks
regardless of user passing the component name or component id (probably
a smoother way to do this check).
3 Error cases:
- Component does not exist
- Pin does not exist
- Pin is already attached to something
Returns:
str: Status test, or None
"""
# Add check for if user inputs nonsense?
# pylint: disable=protected-access
false_component = False
false_pin = False
pin_in_use = False
for pin_check in self.options.pin_inputs.values():
component = pin_check['component']
pin = pin_check['pin']
if isinstance(component, str):
if component not in self.design.components:
false_component = True
elif pin not in self.design.components[component].pins:
false_pin = True
elif self.design.components[component].pins[pin].net_id:
pin_in_use = True
elif isinstance(component, int):
if component not in self.design._components:
false_component = True
elif pin not in self.design._components[component].pins:
false_pin = True
elif self.design._components[component].pins[pin].net_id:
pin_in_use = True
# Should modify to allow for multiple error messages to be returned.
if false_component:
self._error_message = (
f'Component {component} does not exist. {self.name} has not been built. '
'Please check your pin_input values.')
return 'Component Does Not Exist'
if false_pin:
self._error_message = (
f'Pin {pin} does not exist in component {component}. '
f'{self.name} has not been built. Please check your pin_input values.'
)
return 'Pin Does Not Exist'
if pin_in_use:
self._error_message = (
f'Pin {pin} of component {component} is already in use. '
f'{self.name} has not been built. Please check your pin_input values.'
)
return 'Pin In Use'
return None
# This method does not appear to be being used anywhere.
def connect_components_already_in_design(self, pin_name_self: str,
comp2_id: int,
pin2_name: str) -> int:
"""WARNING: Do NOT use this method during generation of component instance.
This method is expecting self to be added to design._components dict. More importantly,
the unique id of self component needs to be in design._components dict.
Args:
pin_name_self (str): Name of pin within the component.
comp2_id (int): Component within design, but not self.
pin2_name (str): The pin of comp2_id that pin_name_self will connect to.
Returns:
int: A unique net_id for the connection.
"""
# pylint: disable=protected-access
net_id_rtn = 0
if self.id not in self.design._components:
# Component not in design.
logger.warning(
f'No connection made. Component_id {self.id} not in design.')
return net_id_rtn
if comp2_id not in self.design._components:
# Component not in design.
logger.warning(
f'No connection made. Component_id {comp2_id} not in design.')
return net_id_rtn
if self.design._components[self._id].pins[pin_name_self].net_id:
# Pin already in use.
logger.warning(
f'Component_id {self._id} not connected. The pin '
f'{pin_name_self} is already connected to something else.')
return net_id_rtn
if self.design._components[comp2_id].pins[pin2_name].net_id:
# Pin already in use.
logger.warning(
f'Component_id {comp2_id} not connected. The pin '
f'{pin2_name} is already connected to something else.')
return net_id_rtn
net_id_rtn = self.design.connect_pins(self.id, pin_name_self, comp2_id,
pin2_name)
return net_id_rtn
########################################################################
def add_dependency(self, parent: str, child: str):
"""Add a dependency between one component and another. Calls parent
design.
Args:
parent (str): The component on which the child depends
child (str): The child cannot live without the parent.
"""
self.design.add_dependency(parent, child)
##########################################
# QGeometry
def add_qgeometry(
self,
kind: str,
geometry: dict,
subtract: bool = False,
helper: bool = False,
layer: Union[int, str] = None, # chip will be here
chip: str = None,
**kwargs):
r"""Add QGeometry.
Takes any additional options in options.
Args:
kind (str): The kind of QGeometry, such as 'path', 'poly', etc.
All geometry in the dictionary should have the same kind,
such as Polygon or LineString.
geometry (Dict[BaseGeometry]): Key-value pairs of name of the geometry
you want to add and the value should be a shapely geometry object, such
as a Polygon or a LineString.
subtract (bool): Subtract from the layer. Defaults to False.
helper (bool): Is this a helper object. If true, subtract must be false
Defaults to False.
layer (int, str): The layer to which the set of QGeometry will belong
Defaults to None, which is converted to self.options.chip.
chip (str): Chip name. Defaults to None, which is converted to
self.options.chip.
kwargs (dict): Parameters dictionary
Assumptions:
* Assumes all geometry in the `geometry` argument are homogeneous in kind;
i.e., all lines or polys etc.
"""
# assert (subtract and helper) == False, "The object can't be a subtracted helper. Please"\
# " choose it to either be a helper or a a subtracted layer, but not both. Thank you."
if layer is None:
layer = self.options.layer
if chip is None:
chip = self.options.chip
if kind in self.qgeometry_table_usage.keys():
self.qgeometry_table_usage[kind] = True
else:
self.logger.warning(
f'Component with classname={self.class_name} does not know about '
f'table name "{kind}".')
renderer_key_values = self._get_specific_table_values_from_renderers(
kind)
for key in renderer_key_values:
if key in self.options:
renderer_key_values[key] = deepcopy(self.options[key])
# # if not already in kwargs, add renderer information to it.
renderer_and_options = {**renderer_key_values, **kwargs}
# When self.options is instantiated, the template_options are populated.
# renderer_and_options = {**self.options, **kwargs}
self.design.qgeometry.add_qgeometry(kind,
self.id,
geometry,
subtract=subtract,
helper=helper,
layer=layer,
chip=chip,
**renderer_and_options)
def _get_specific_table_values_from_renderers(self, kind: str) -> Dict:
"""Populate a dict to combine with options for the qcomponent.
Based on kind, which the table name, the component-developer denotes in the metadata,
assume those qgeometry.tables are used for the component. The method
will search a dict populated by all the renderers during their init.
Args:
kind (str): Name of table, like junction, path, or poly.
Returns:
Dict: key is column names for tables, value is data for the column.
"""
all_renderers_key_value = dict()
# design.renderer_defaults_by_table[table_name][renderer_name][column_name]
if kind in self.design.renderer_defaults_by_table:
for name_renderer, renderer_data in self.design.renderer_defaults_by_table[
kind].items():
if len(renderer_data) > 0:
for col_name, col_value in renderer_data.items():
render_col_name = f'{name_renderer}_{col_name}'
all_renderers_key_value[render_col_name] = col_value
return all_renderers_key_value
@classmethod
def _get_table_values_from_renderers(cls, design: 'QDesign') -> Dict:
"""Populate a dict to combine with options for the qcomponent.
Based on tables the component-developer denotes in the metadata,
assume those qgeometry.tables are used for the component. The method
will search a dict populated by all the renderers during their init.
Returns:
Dict: key is column names for tables, value is data for the column.
"""
metadata_dict = cls._gather_all_children_metadata()
tables_list = design.get_list_of_tables_in_metadata(metadata_dict)
all_renderers_key_value = dict()
# design.renderer_defaults_by_table[table_name][renderer_name][column_name]
for table in tables_list:
if table in design.renderer_defaults_by_table:
for name_renderer, renderer_data in design.renderer_defaults_by_table[
table].items():
if len(renderer_data) > 0:
for col_name, col_value in renderer_data.items():
render_col_name = f'{name_renderer}_{col_name}'
all_renderers_key_value[render_col_name] = col_value
return all_renderers_key_value
######################################
def __repr__(self, *args):
# pylint: disable=invalid-name
b = '\033[95m\033[1m'
b1 = '\033[94m\033[1m'
e = '\033[0m'
# id = {hex(id(self))}
# options = pprint.pformat(self.options)
options = format_dict_ala_z(self.options)
text = f"{b}name: {b1}{self.name}{e}\n"\
f"{b}class: {b1}{self.__class__.__name__:<22s}{e}\n"\
f"{b}options: {e}\n{options}\n"\
f"{b}module: {b1}{self.__class__.__module__}{e}\n"\
f"{b}id: {b1}{self.id}{e}\n"
return text
############################################################################
# Geometry handling of created qgeometry
@property
def qgeometry_types(self) -> List[str]:
"""Get a list of the names of the element tables.
Returns:
List[str]: Name of element table or type; e.g., 'poly' and 'path'
"""
return self.design.qgeometry.get_element_types()
def qgeometry_dict( # pylint: disable=inconsistent-return-statements
self, element_type: str) -> Dict_[str, BaseGeometry]:
"""Returns a dict of element qgeometry (shapely geometry) of the
component as a python dict, where the dict keys are the names of the
qgeometry and the corresponding values are the shapely geometries.
Args:
element_type (str): Name of element table or type; e.g., 'poly' and 'path'
Returns:
List[BaseGeometry]: Geometry diction or None if an error in the name of the element
type (ie. table)
"""
if element_type == 'all' or self.design.qgeometry.check_element_type(
element_type):
return self.design.qgeometry.get_component_geometry_dict(
self.name, element_type)
def qgeometry_list( # pylint: disable=inconsistent-return-statements
self,
element_type: str = 'all') -> List[BaseGeometry]:
"""Returns a list of element qgeometry (shapely geometry) of the
component as a python list of shapely geometries.
Args:
element_type (str): Name of element table or type; e.g., 'poly' and 'path'.
Can also specify all
Returns:
List[BaseGeometry]: Geometry list or None if an error in the name of the element type
(ie. table)
"""
if element_type == 'all' or self.design.qgeometry.check_element_type(
element_type):
return self.design.qgeometry.get_component_geometry_list(
self.name, element_type)
def qgeometry_table( # pylint: disable=inconsistent-return-statements
self, element_type: str) -> pd.DataFrame:
"""Returns the entire element table for the component.
Args:
element_type (str): Name of element table or type; e.g., 'poly' and 'path'
Returns:
pd.DataFrame: Element table for the component or None if an error in the name of
the element type (ie. table)
"""
if element_type == 'all' or self.design.qgeometry.check_element_type(
element_type):
return self.design.qgeometry.get_component(self.name, element_type)
def qgeometry_bounds(self) -> Tuple:
"""Fetched the component bound dict_value.
Returns:
tuple: containing (minx, miny, maxx, maxy) bound values for the bounds of the
component as a whole.
Uses:
design.qgeometry.get_component_bounds
"""
bounds = self.design.qgeometry.get_component_bounds(self.name)
return bounds
def qgeometry_plot(self,
ax: 'matplotlib.axes.Axes' = None,
plot_kw: dict = None) -> List:
"""Draw all the qgeometry of the component (polys and path etc.)
Args:
ax (matplotlib.axes.Axes): Matplotlib axis to draw on. Defaults to None.
When None, it gets the current axis.
plot_kw (dict): Parameters dictionary.
Returns:
List: The list of qgeometry draw
Example use:
Suppose you had a component called q1:
fig, ax = draw.mpl.figure_spawn()
q1.qgeometry_plot(ax)
"""
qgeometry = self.qgeometry_list()
plot_kw = {}
draw.mpl.render(qgeometry, ax=ax, kw=plot_kw)
return qgeometry
def populate_to_track_table_usage(self) -> None:
"""Use the element_handler to get a list of all the table names used in
QGeometry.
The dict qgeometry_able_usage should get updated by
add_qgeometry(). This dict is used to get a summary tables used
for this component.
"""
for table_name in self.design.qgeometry.tables.keys():
self.qgeometry_table_usage[table_name] = False
| 38.232506
| 102
| 0.58629
|
5c4ae145189874e8eebd87a4887386e1429fa322
| 1,175
|
py
|
Python
|
sympy/strategies/traverse.py
|
eriknw/sympy
|
b7544e2bb74c011f6098a7e886fd77f41776c2c4
|
[
"BSD-3-Clause"
] | 7
|
2015-01-14T06:55:33.000Z
|
2018-08-11T14:43:52.000Z
|
sympy/strategies/traverse.py
|
pbeltran/sympy-1
|
94f92b36731c2bebe6de1037c063c2a258a8a399
|
[
"BSD-3-Clause"
] | 1
|
2018-02-19T04:56:04.000Z
|
2018-02-19T04:56:04.000Z
|
sympy/strategies/traverse.py
|
pbeltran/sympy-1
|
94f92b36731c2bebe6de1037c063c2a258a8a399
|
[
"BSD-3-Clause"
] | 1
|
2016-04-24T14:39:22.000Z
|
2016-04-24T14:39:22.000Z
|
""" Strategies to Traverse a Tree """
from util import basic_fns, expr_fns
from sympy.strategies.core import chain, do_one
def top_down(rule, fns=basic_fns):
""" Apply a rule down a tree running it on the top nodes first """
return chain(rule, lambda expr: sall(top_down(rule, fns), fns)(expr))
def bottom_up(rule, fns=basic_fns):
""" Apply a rule down a tree running it on the bottom nodes first """
return chain(lambda expr: sall(bottom_up(rule, fns), fns)(expr), rule)
def top_down_once(rule, fns=basic_fns):
""" Apply a rule down a tree - stop on success """
return do_one(rule, lambda expr: sall(top_down(rule, fns), fns)(expr))
def bottom_up_once(rule, fns=basic_fns):
""" Apply a rule up a tree - stop on success """
return do_one(lambda expr: sall(bottom_up(rule, fns), fns)(expr), rule)
def sall(rule, fns=basic_fns):
""" Strategic all - apply rule to args """
op, new, children, leaf = map(fns.get, ('op', 'new', 'children', 'leaf'))
def all_rl(expr):
if leaf(expr):
return expr
else:
args = map(rule, children(expr))
return new(op(expr), *args)
return all_rl
| 37.903226
| 77
| 0.651064
|
6f02fcaa58dad0981b449eee505ecc45519262b0
| 1,004
|
py
|
Python
|
ex105.py
|
arthurfas123/Curso-De-Python
|
c4a15d92811bd101a8562d2c3a90fe2d5a3c360d
|
[
"MIT"
] | null | null | null |
ex105.py
|
arthurfas123/Curso-De-Python
|
c4a15d92811bd101a8562d2c3a90fe2d5a3c360d
|
[
"MIT"
] | null | null | null |
ex105.py
|
arthurfas123/Curso-De-Python
|
c4a15d92811bd101a8562d2c3a90fe2d5a3c360d
|
[
"MIT"
] | null | null | null |
def notas(*notas, sit=False):
"""
=> Função que lê uma quantidade indefinida de notas e retorna um dicionario
com quantidade de notas, notas, maior nota, menor nota, media e situação
como um parametro opçional.
:param notas: quantidade indefinida de notas de um determinado aluno.
:param sit: (Opçional) parametro de valor real que define se ira ou nao retornar a situaçao do aluno
(Ruim, Razoavel, bom).
:return: retorna um dicionario com todas as informações.
"""
dados = dict()
dados['quantidade de notas'] = len(notas)
dados['notas'] = notas
dados['maior notas'] = max(notas)
dados['menor nota'] = min(notas)
dados['media'] = sum(notas) / len(notas)
if sit:
if dados['media'] < 6:
dados['situação'] = 'Ruim!'
elif dados['media'] >= 8:
dados['situação'] = 'Boa!'
else:
dados['situação'] = 'Razoavel'
return dados
dicionario = notas(3, 8, 9, sit=True)
print(dicionario)
| 34.62069
| 104
| 0.623506
|
0218f60649b8c3ca843b43b78ec4f11b8d537fd8
| 18,536
|
py
|
Python
|
sktime/forecasting/trend.py
|
mikofski/sktime
|
87bdf36dbc0990f29942eb6f7fa56a8e6c5fa7b7
|
[
"BSD-3-Clause"
] | 1
|
2019-12-23T16:57:14.000Z
|
2019-12-23T16:57:14.000Z
|
sktime/forecasting/trend.py
|
mikofski/sktime
|
87bdf36dbc0990f29942eb6f7fa56a8e6c5fa7b7
|
[
"BSD-3-Clause"
] | null | null | null |
sktime/forecasting/trend.py
|
mikofski/sktime
|
87bdf36dbc0990f29942eb6f7fa56a8e6c5fa7b7
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# !/usr/bin/env python3 -u
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""Implements trend based forecasters."""
__author__ = ["Anthony Jancso", "mloning", "aiwalter"]
__all__ = ["TrendForecaster", "PolynomialTrendForecaster", "STLForecaster"]
import numpy as np
import pandas as pd
from sklearn.base import clone
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
from statsmodels.tsa.seasonal import STL as _STL
from sktime.forecasting.base import BaseForecaster
from sktime.forecasting.base._base import DEFAULT_ALPHA
from sktime.forecasting.naive import NaiveForecaster
from sktime.utils.datetime import _get_duration
class TrendForecaster(BaseForecaster):
"""Trend based forecasts of time series data.
Default settings train a linear regression model.
Parameters
----------
regressor : estimator object, default = None
Define the regression model type. If not set, will default to
sklearn.linear_model.LinearRegression
Examples
--------
>>> from sktime.datasets import load_airline
>>> from sktime.forecasting.trend import TrendForecaster
>>> y = load_airline()
>>> forecaster = TrendForecaster()
>>> forecaster.fit(y)
TrendForecaster(...)
>>> y_pred = forecaster.predict(fh=[1,2,3])
"""
_tags = {
"ignores-exogeneous-X": True,
"requires-fh-in-fit": False,
"handles-missing-data": False,
}
def __init__(self, regressor=None):
# for default regressor, set fit_intercept=True
self.regressor = regressor
super(TrendForecaster, self).__init__()
def _fit(self, y, X=None, fh=None):
"""Fit to training data.
Parameters
----------
y : pd.Series
Target time series with which to fit the forecaster.
X : pd.DataFrame, default=None
Exogenous variables are ignored
fh : int, list or np.array, optional (default=None)
The forecasters horizon with the steps ahead to to predict.
Returns
-------
self : returns an instance of self.
"""
self.regressor_ = self.regressor or LinearRegression(fit_intercept=True)
# create a clone of self.regressor
self.regressor_ = clone(self.regressor_)
# transform data
X = y.index.astype("int").to_numpy().reshape(-1, 1)
# fit regressor
self.regressor_.fit(X, y)
return self
def _predict(self, fh=None, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA):
"""Make forecasts for the given forecast horizon.
Parameters
----------
fh : int, list or np.array
The forecast horizon with the steps ahead to predict
X : pd.DataFrame, default=None
Exogenous variables (ignored)
return_pred_int : bool, default=False
Return the prediction intervals for the forecast.
alpha : float or list, default=0.95
If alpha is iterable, multiple intervals will be calculated.
Returns
-------
y_pred : pd.Series
Point predictions for the forecast
y_pred_int : pd.DataFrame
Prediction intervals for the forecast
"""
# use relative fh as time index to predict
fh = self.fh.to_absolute_int(self._y.index[0], self.cutoff)
X_pred = fh.to_numpy().reshape(-1, 1)
y_pred = self.regressor_.predict(X_pred)
return pd.Series(y_pred, index=self.fh.to_absolute(self.cutoff))
class PolynomialTrendForecaster(BaseForecaster):
"""Forecast time series data with a polynomial trend.
Default settings train a linear regression model with a 1st degree
polynomial transformation of the feature.
Parameters
----------
regressor : estimator object, default = None
Define the regression model type. If not set, will default to
sklearn.linear_model.LinearRegression
degree : int, default = 1
Degree of polynomial function
with_intercept : bool, default=True
If true, then include a feature in which all polynomial powers are
zero. (i.e. a column of ones, acts as an intercept term in a linear
model)
Examples
--------
>>> from sktime.datasets import load_airline
>>> from sktime.forecasting.trend import PolynomialTrendForecaster
>>> y = load_airline()
>>> forecaster = PolynomialTrendForecaster(degree=1)
>>> forecaster.fit(y)
PolynomialTrendForecaster(...)
>>> y_pred = forecaster.predict(fh=[1,2,3])
"""
_tags = {
"ignores-exogeneous-X": True,
"requires-fh-in-fit": False,
"handles-missing-data": False,
}
def __init__(self, regressor=None, degree=1, with_intercept=True):
self.regressor = regressor
self.degree = degree
self.with_intercept = with_intercept
self.regressor_ = self.regressor
super(PolynomialTrendForecaster, self).__init__()
def _fit(self, y, X=None, fh=None):
"""Fit to training data.
Parameters
----------
y : pd.Series
Target time series with which to fit the forecaster.
X : pd.DataFrame, default=None
Exogenous variables are ignored
fh : int, list or np.array, default=None
The forecasters horizon with the steps ahead to to predict.
Returns
-------
self : returns an instance of self.
"""
# for default regressor, set fit_intercept=False as we generate a
# dummy variable in polynomial features
if self.regressor is None:
regressor = LinearRegression(fit_intercept=False)
else:
regressor = self.regressor
# make pipeline with polynomial features
self.regressor_ = make_pipeline(
PolynomialFeatures(degree=self.degree, include_bias=self.with_intercept),
regressor,
)
# transform data
n_timepoints = _get_duration(self._y.index, coerce_to_int=True) + 1
X = np.arange(n_timepoints).reshape(-1, 1)
# fit regressor
self.regressor_.fit(X, y)
return self
def _predict(self, fh=None, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA):
"""Make forecasts for the given forecast horizon.
Parameters
----------
fh : int, list or np.array
The forecast horizon with the steps ahead to predict
X : pd.DataFrame, default=None
Exogenous variables (ignored)
return_pred_int : bool, default=False
Return the prediction intervals for the forecast.
alpha : float or list, default=0.95
If alpha is iterable, multiple intervals will be calculated.
Returns
-------
y_pred : pd.Series
Point predictions for the forecast
y_pred_int : pd.DataFrame
Prediction intervals for the forecast
"""
# use relative fh as time index to predict
fh = self.fh.to_absolute_int(self._y.index[0], self.cutoff)
X_pred = fh.to_numpy().reshape(-1, 1)
y_pred = self.regressor_.predict(X_pred)
return pd.Series(y_pred, index=self.fh.to_absolute(self.cutoff))
class STLForecaster(BaseForecaster):
"""Implements STLForecaster based on statsmodels.tsa.seasonal.STL implementation.
The STLForecaster is using an STL to decompose the given
series y into the three components trend, season and residuals [1]_. Then,
the forecaster_trend, forecaster_seasonal and forecaster_resid are fitted
on the components individually to forecast them also individually. The
final forecast is then the sum of the three component forecasts. The STL
decomposition is done by means of using the package statsmodels [2]_.
Parameters
----------
sp : int, optional
Length of the seasonal period for STL, by default 2.
It's also the default sp for the forecasters
(forecaster_seasonal, forecaster_resid) that are None. The
default forecaster_trend does not get sp as trend is independent
to seasonality.
seasonal : int, optional
Length of the seasonal smoother. Must be an odd integer, and should
normally be >= 7 (default).
trend : {int, None}, optional
Length of the trend smoother. Must be an odd integer. If not provided
uses the smallest odd integer greater than
1.5 * period / (1 - 1.5 / seasonal), following the suggestion in
the original implementation.
low_pass : {int, None}, optional
Length of the low-pass filter. Must be an odd integer >=3. If not
provided, uses the smallest odd integer > period.
seasonal_deg : int, optional
Degree of seasonal LOESS. 0 (constant) or 1 (constant and trend).
trend_deg : int, optional
Degree of trend LOESS. 0 (constant) or 1 (constant and trend).
low_pass_deg : int, optional
Degree of low pass LOESS. 0 (constant) or 1 (constant and trend).
robust : bool, optional
Flag indicating whether to use a weighted version that is robust to
some forms of outliers.
seasonal_jump : int, optional
Positive integer determining the linear interpolation step. If larger
than 1, the LOESS is used every seasonal_jump points and linear
interpolation is between fitted points. Higher values reduce
estimation time.
trend_jump : int, optional
Positive integer determining the linear interpolation step. If larger
than 1, the LOESS is used every trend_jump points and values between
the two are linearly interpolated. Higher values reduce estimation
time.
low_pass_jump : int, optional
Positive integer determining the linear interpolation step. If larger
than 1, the LOESS is used every low_pass_jump points and values between
the two are linearly interpolated. Higher values reduce estimation
time.
inner_iter: int, optional
Number of iterations to perform in the inner loop. If not provided uses 2 if
robust is True, or 5 if not. This param goes into STL.fit() from statsmodels.
outer_iter: int, optional
Number of iterations to perform in the outer loop. If not provided uses 15 if
robust is True, or 0 if not. This param goes into STL.fit() from statsmodels.
forecaster_trend : sktime forecaster, optional
Forecaster to be fitted on trend_ component of the
STL, by default None. If None, then
a NaiveForecaster(strategy="drift") is used.
forecaster_seasonal : sktime forecaster, optional
Forecaster to be fitted on seasonal_ component of the
STL, by default None. If None, then
a NaiveForecaster(strategy="last") is used.
forecaster_resid : sktime forecaster, optional
Forecaster to be fitted on resid_ component of the
STL, by default None. If None, then
a NaiveForecaster(strategy="mean") is used.
Attributes
----------
trend_ : pd.Series
Trend component.
seasonal_ : pd.Series
Seasonal component.
resid_ : pd.Series
Residuals component.
forecaster_trend_ : sktime forecaster
Fitted trend forecaster.
forecaster_seasonal_ : sktime forecaster
Fitted seasonal forecaster.
forecaster_resid_ : sktime forecaster
Fitted residual forecaster.
Examples
--------
>>> from sktime.datasets import load_airline
>>> from sktime.forecasting.trend import STLForecaster
>>> y = load_airline()
>>> forecaster = STLForecaster(sp=12)
>>> forecaster.fit(y)
STLForecaster(...)
>>> y_pred = forecaster.predict(fh=[1,2,3])
See Also
--------
Deseasonalizer
Detrender
References
----------
.. [1] R. B. Cleveland, W. S. Cleveland, J.E. McRae, and I. Terpenning (1990)
STL: A Seasonal-Trend Decomposition Procedure Based on LOESS.
Journal of Official Statistics, 6, 3-73.
.. [2] https://www.statsmodels.org/dev/generated/statsmodels.tsa.seasonal.STL.html
"""
_tags = {
"scitype:y": "univariate", # which y are fine? univariate/multivariate/both
"ignores-exogeneous-X": False, # does estimator ignore the exogeneous X?
"handles-missing-data": False, # can estimator handle missing data?
"y_inner_mtype": "pd.Series", # which types do _fit, _predict, assume for y?
"X_inner_mtype": "pd.DataFrame", # which types do _fit, _predict, assume for X?
"requires-fh-in-fit": False, # is forecasting horizon already required in fit?
}
def __init__(
self,
sp=2,
seasonal=7,
trend=None,
low_pass=None,
seasonal_deg=1,
trend_deg=1,
low_pass_deg=1,
robust=False,
seasonal_jump=1,
trend_jump=1,
low_pass_jump=1,
inner_iter=None,
outer_iter=None,
forecaster_trend=None,
forecaster_seasonal=None,
forecaster_resid=None,
):
self.sp = sp
self.seasonal = seasonal
self.trend = trend
self.low_pass = low_pass
self.seasonal_deg = seasonal_deg
self.trend_deg = trend_deg
self.low_pass_deg = low_pass_deg
self.robust = robust
self.seasonal_jump = seasonal_jump
self.trend_jump = trend_jump
self.low_pass_jump = low_pass_jump
self.inner_iter = inner_iter
self.outer_iter = outer_iter
self.forecaster_trend = forecaster_trend
self.forecaster_seasonal = forecaster_seasonal
self.forecaster_resid = forecaster_resid
super(STLForecaster, self).__init__()
def _fit(self, y, X=None, fh=None):
"""Fit forecaster to training data.
Parameters
----------
y : pd.Series
Target time series to which to fit the forecaster.
fh : int, list, np.array or ForecastingHorizon, optional (default=None)
The forecasters horizon with the steps ahead to to predict.
X : pd.DataFrame, optional (default=None)
Returns
-------
self : returns an instance of self.
"""
self._stl = _STL(
y.values,
period=self.sp,
seasonal=self.seasonal,
trend=self.trend,
low_pass=self.low_pass,
seasonal_deg=self.seasonal_deg,
trend_deg=self.trend_deg,
low_pass_deg=self.low_pass_deg,
robust=self.robust,
seasonal_jump=self.seasonal_jump,
trend_jump=self.trend_jump,
low_pass_jump=self.low_pass_jump,
).fit(inner_iter=self.inner_iter, outer_iter=self.outer_iter)
self.seasonal_ = pd.Series(self._stl.seasonal, index=y.index)
self.resid_ = pd.Series(self._stl.resid, index=y.index)
self.trend_ = pd.Series(self._stl.trend, index=y.index)
self.forecaster_seasonal_ = (
NaiveForecaster(sp=self.sp, strategy="last")
if self.forecaster_seasonal is None
else clone(self.forecaster_seasonal)
)
# trend forecaster does not need sp
self.forecaster_trend_ = (
NaiveForecaster(strategy="drift")
if self.forecaster_trend is None
else clone(self.forecaster_trend)
)
self.forecaster_resid_ = (
NaiveForecaster(sp=self.sp, strategy="mean")
if self.forecaster_resid is None
else clone(self.forecaster_resid)
)
# fitting forecasters to different components
self.forecaster_seasonal_.fit(y=self.seasonal_, X=X, fh=fh)
self.forecaster_trend_.fit(y=self.trend_, X=X, fh=fh)
self.forecaster_resid_.fit(y=self.resid_, X=X, fh=fh)
def _predict(self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA):
"""Forecast time series at future horizon.
Parameters
----------
fh : int, list, np.array or ForecastingHorizon
Forecasting horizon
X : pd.DataFrame, optional (default=None)
Exogenous time series
return_pred_int : bool, optional (default=False)
If True, returns prediction intervals for given alpha values.
alpha : float or list, optional (default=0.95)
Returns
-------
y_pred : pd.Series
Point predictions
"""
y_pred_seasonal = self.forecaster_seasonal_.predict(fh=fh, X=X)
y_pred_trend = self.forecaster_trend_.predict(fh=fh, X=X)
y_pred_resid = self.forecaster_resid_.predict(fh=fh, X=X)
y_pred = y_pred_seasonal + y_pred_trend + y_pred_resid
return y_pred
def _update(self, y, X=None, update_params=True):
"""Update cutoff value and, optionally, fitted parameters.
Parameters
----------
y : pd.Series, pd.DataFrame, or np.array
Target time series to which to fit the forecaster.
X : pd.DataFrame, optional (default=None)
Exogeneous data
update_params : bool, optional (default=True)
whether model parameters should be updated
Returns
-------
self : reference to self
"""
self._stl = _STL(
y.values,
period=self.sp,
seasonal=self.seasonal,
trend=self.trend,
low_pass=self.low_pass,
seasonal_deg=self.seasonal_deg,
trend_deg=self.trend_deg,
low_pass_deg=self.low_pass_deg,
robust=self.robust,
seasonal_jump=self.seasonal_jump,
trend_jump=self.trend_jump,
low_pass_jump=self.low_pass_jump,
).fit(inner_iter=self.inner_iter, outer_iter=self.outer_iter)
self.seasonal_ = pd.Series(self._stl.seasonal, index=y.index)
self.resid_ = pd.Series(self._stl.resid, index=y.index)
self.trend_ = pd.Series(self._stl.trend, index=y.index)
self.forecaster_seasonal_.update(
y=self.seasonal_, X=X, update_params=update_params
)
self.forecaster_trend_.update(y=self.trend_, X=X, update_params=update_params)
self.forecaster_resid_.update(y=self.resid_, X=X, update_params=update_params)
return self
| 37.370968
| 88
| 0.641616
|
ea43000fa67e9c436fec501f01895b9f48511a0e
| 1,459
|
py
|
Python
|
haemapod/main.py
|
globalspin/haemapod
|
990440d7c8f369bc5082f0aae5867a0a150613f1
|
[
"MIT"
] | null | null | null |
haemapod/main.py
|
globalspin/haemapod
|
990440d7c8f369bc5082f0aae5867a0a150613f1
|
[
"MIT"
] | 1
|
2015-03-24T21:25:45.000Z
|
2015-03-24T21:25:45.000Z
|
haemapod/main.py
|
globalspin/haemapod
|
990440d7c8f369bc5082f0aae5867a0a150613f1
|
[
"MIT"
] | null | null | null |
from google.appengine.ext.webapp import WSGIApplication
from google.appengine.ext.webapp.util import run_wsgi_app
from request_handler import RequestHandler
def application():
return WSGIApplication([
('/', RequestHandler.with_page('handlers.default')),
('/people/add', RequestHandler.with_page('handlers.people.add')),
('/people', RequestHandler.with_page('handlers.people.search')),
('/people/proximity', RequestHandler.with_page('handlers.people.proximity')),
('/people/bounding_box', RequestHandler.with_page('handlers.people.bounding_box')),
('/people/upload', RequestHandler.with_page('handlers.people.upload')),
('/people/([^/]*)$', RequestHandler.with_page('handlers.people.detail')),
('/events/add', RequestHandler.with_page('handlers.events.add')),
('/events/proximity', RequestHandler.with_page('handlers.events.proximity')),
('/events/bounding_box', RequestHandler.with_page('handlers.events.bounding_box')),
('/events', RequestHandler.with_page('handlers.events.default')),
('/events/users', RequestHandler.with_page('handlers.events.users')),
('/events/upload', RequestHandler.with_page('handlers.events.upload')),
('/events/([^/]*)$', RequestHandler.with_page('handlers.events.detail')),
('/events/([^/]*)/attending/add$', RequestHandler.with_page('handlers.events.attending.add')),
], debug=True)
def main():
run_wsgi_app(application())
if __name__ == "__main__":
main()
| 48.633333
| 98
| 0.721727
|
1a91572afce184707c214322b4415929e3e63a21
| 3,632
|
py
|
Python
|
lib/world/render/ChunkRenderer.py
|
defgsus/thegame
|
38a627d9108f1418b94b08831fd640dd87fbba83
|
[
"MIT"
] | 1
|
2021-11-05T11:49:26.000Z
|
2021-11-05T11:49:26.000Z
|
lib/world/render/ChunkRenderer.py
|
defgsus/thegame
|
38a627d9108f1418b94b08831fd640dd87fbba83
|
[
"MIT"
] | null | null | null |
lib/world/render/ChunkRenderer.py
|
defgsus/thegame
|
38a627d9108f1418b94b08831fd640dd87fbba83
|
[
"MIT"
] | null | null | null |
from ...opengl import *
from ...opengl import postproc
from .deferred.ChunkMeshRenderNode import ChunkMeshRenderNode
from .deferred.ChunkMeshLightingNode import ChunkMeshLightingNode
from .full.ChunkMeshAllNode import ChunkMeshAllNode
from .split.ChunkMeshWithoutLight import ChunkMeshWithoutLight
from .split.ChunkMeshOnlyLight import ChunkMeshOnlyLight
from .split.CombineNode import CombineNode
"""
some benchmarks:
480 x 320 on GTX 1050
Mesh with Lighting
smooth shadow
no MSAA 444 fps
9xMSAA 196 fps (212fps backface culling)
16xMSAA 196 fps (213fps backface culling)
voxel shadow
no MSAA 580 fps
16xMSAA 300 fps (320fps backface culling)
no shadow
no MSAA 725 fps
16xMSAA 350 fps
deferred lighting
smooth shadow
16xMSAA 148 fps
(below always backface clulling)
ChunkMeshWithoutLight
16xMSAA 405 fps
ChunkMeshOnlyLight
no MSAA 580 fps
4xMSAA 560 fps
9xMSAA 236 fps
16xMSAA 237 fps
split ligting
16/0xMSAA 357 fps
16/4xMSAA 376 fps
16/9xMSAA 200 fps (first aesthetically acceptable value)
"""
class ChunkRenderer:
def __init__(self, world):
self.world = world
self.render_settings = world.render_settings
self.asset_id = "level01"
self.render_graph = RenderGraph()
if 0: # deferred
self.render_graph.add_node(ChunkMeshRenderNode(self.world, self, "mesh"))
self.render_graph.add_node(ChunkMeshLightingNode(self.world, self, "light"))
self.render_graph.connect("mesh", 0, "light", 0)
self.render_graph.connect("mesh", 1, "light", 1)
self.render_graph.connect("mesh", 2, "light", 2)
self.pp_depth_blur = self.render_graph.add_node(postproc.Blur("depth-blur", use_mask=True))
self.render_graph.connect("light", 0, "depth-blur", 0)
self.render_graph.connect("mesh", "depth", "depth-blur", 1)
elif 0: # split
self.render_graph.add_node(ChunkMeshWithoutLight(self.world, self, "mesh"))
self.render_graph.add_node(ChunkMeshOnlyLight(self.world, self, "mesh-light"))
self.render_graph.add_node(CombineNode("add"))
self.pp_depth_blur = self.render_graph.add_node(postproc.Blur("depth-blur", use_mask=True))
self.render_graph.connect("mesh", 0, "add", 0)
self.render_graph.connect("mesh-light", 0, "add", 1)
self.render_graph.connect("add", 0, "depth-blur", 0)
self.render_graph.connect("mesh", "depth", "depth-blur", 1)
else: # all in one
self.render_graph.add_node(ChunkMeshAllNode(self.world, self, "mesh"))
self.pp_depth_blur = self.render_graph.add_node(postproc.Blur("depth-blur", use_mask=True))
self.render_graph.connect("mesh", 0, "depth-blur", 0)
self.render_graph.connect("mesh", "depth", "depth-blur", 1)
self.pipeline = self.render_graph.create_pipeline()
#self.pipeline.dump()
#self.pipeline.verbose = 5
def render(self):
if hasattr(self, "pp_depth_blur"):
(self.pp_depth_blur.mask_center,
self.pp_depth_blur.mask_spread) = self.render_settings.projection.get_depth_mask_values()
self.pipeline.render(self.render_settings)
self.pipeline.render_to_screen(self.render_settings)
| 40.355556
| 103
| 0.62913
|
eaf2636404414336a028aa6b88599d50fecf9f29
| 4,388
|
py
|
Python
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/locks/v2016_09_01/_management_lock_client.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | 1
|
2022-02-01T18:50:12.000Z
|
2022-02-01T18:50:12.000Z
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/locks/v2016_09_01/_management_lock_client.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | null | null | null |
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/locks/v2016_09_01/_management_lock_client.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, TYPE_CHECKING
from msrest import Deserializer, Serializer
from azure.core.rest import HttpRequest, HttpResponse
from azure.mgmt.core import ARMPipelineClient
from . import models
from ._configuration import ManagementLockClientConfiguration
from .operations import AuthorizationOperationsOperations, ManagementLocksOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class ManagementLockClient:
"""Azure resources can be locked to prevent other users in your organization from deleting or
modifying resources.
:ivar authorization_operations: AuthorizationOperationsOperations operations
:vartype authorization_operations:
azure.mgmt.resource.locks.v2016_09_01.operations.AuthorizationOperationsOperations
:ivar management_locks: ManagementLocksOperations operations
:vartype management_locks:
azure.mgmt.resource.locks.v2016_09_01.operations.ManagementLocksOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2016-09-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = ManagementLockClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.authorization_operations = AuthorizationOperationsOperations(self._client, self._config, self._serialize, self._deserialize)
self.management_locks = ManagementLocksOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request: HttpRequest,
**kwargs: Any
) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> ManagementLockClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| 43.019608
| 137
| 0.698952
|
14c9d5d6681482e973d6157c46da1013acc5a912
| 6,283
|
py
|
Python
|
zabbix/scripts/ali-ecs-monitor/ecs_bandwidth_monitor.py
|
harvey-zang/ali-tools
|
3d34dcbd841f101de3e014adcf7117ec1221b8d7
|
[
"Apache-2.0"
] | null | null | null |
zabbix/scripts/ali-ecs-monitor/ecs_bandwidth_monitor.py
|
harvey-zang/ali-tools
|
3d34dcbd841f101de3e014adcf7117ec1221b8d7
|
[
"Apache-2.0"
] | null | null | null |
zabbix/scripts/ali-ecs-monitor/ecs_bandwidth_monitor.py
|
harvey-zang/ali-tools
|
3d34dcbd841f101de3e014adcf7117ec1221b8d7
|
[
"Apache-2.0"
] | 1
|
2021-05-31T14:08:43.000Z
|
2021-05-31T14:08:43.000Z
|
#! /usr/bin/env /usr/bin/python3.7
from aliyunsdkcore.client import AcsClient
from aliyunsdkecs.request.v20140526 import DescribeInstancesRequest
from aliyunsdkcms.request.v20170301 import QueryMetricLastRequest
import json
import sys
class ecsBandMonitor(object):
def __init__(self, ali_key, ali_secret, region_id):
self.ali_key = ali_key
self.ali_secret = ali_secret
self.client = AcsClient(
self.ali_key,
self.ali_secret,
)
# 创建AcsClient实例
self.client.set_region_id(region_id)
def getAliyunEcsInfo(self):
# 获取ecs实例信息,利用迭代器,可直接遍历函数得到ecs实例信息
# for a in get_aliyun_ecs(ali_key, ali_secret, region_ids):
# for ecs in a:
# print(ecs['InstanceId'], ecs['HostName'], ecs['ZoneId'], ecs['PublicIpAddress']['IpAddress'], ecs['InnerIpAddress']['IpAddress'], ecs['StartTime'])
# ecs instance各字段解释:https://help.aliyun.com/document_detail/25656.html?spm=a2c4g.11186623.2.2.a0TLxW#InstanceAttributesType
# 设置初始页码
pageNumber = 1
# 创建request,并设置参数
request = DescribeInstancesRequest.DescribeInstancesRequest()
request.set_accept_format('json')
request.set_PageSize(10)
request.set_PageNumber(pageNumber)
# request.set_
response = self.client.do_action_with_exception(request)
# 发起API请求并显示返回值
response_dict = json.loads(response)
# 生成生成器
while response_dict['Instances']['Instance']:
yield response_dict['Instances']['Instance']
pageNumber += 1
request.set_PageNumber(pageNumber)
response = self.client.do_action_with_exception(request)
response_dict = json.loads(response)
def get_ecs_monitor(self, instances_ids, period):
# 实例化request
request = QueryMetricLastRequest.QueryMetricLastRequest()
# 组装request,接手json,监控流出流量,周期300s
request.set_accept_format('json')
request.set_Project('acs_ecs_dashboard')
request.set_Metric('InternetOutRate')
request.set_Period(period)
request.set_Dimensions(instances_ids)
response = self.client.do_action_with_exception(request)
response_dict = json.loads(response)
infos = response_dict.get('Datapoints')
return infos
def get_json(str):
str_lst = str.split(',')
lst = []
for kv_str in str_lst:
kv_lst = kv_str.split(':')
lst.append('"{k}":"{v}"'.format(k=kv_lst[0], v=kv_lst[1]))
res = '{' + ','.join(lst) + '}'
return res
def main():
special_threshold = None
if len(sys.argv) < 3:
default_threshold = None
period = None
print('need threshold and period args')
exit(1)
else:
default_threshold = sys.argv[1]
period = sys.argv[2]
if len(sys.argv) == 4:
special_threshold = sys.argv[3]
try:
default_threshold = float(default_threshold)
period = int(period)
except Exception as err:
print(err)
print('wrong args type, threshold float, period int')
exit(2)
if special_threshold:
try:
special_threshold = get_json(special_threshold)
special_threshold = json.loads(special_threshold)
except Exception as err:
print(err)
print('special_threshold needs json type')
exit(2)
if not 0 < default_threshold < 1:
print('wrong arg, needs int, 0 < threshold < 1')
exit(3)
elif period % 60 != 0:
print('wrong second arg, needs int, Multiple of 60')
exit(4)
ecs = ecsBandMonitor('ali-key', 'ali-secret', 'region')
# 测试获取ecs实例信息
ecs_bandwidth_info = {}
# 组装instance_ids
instance_ids = "["
for ecs_instances_info in ecs.getAliyunEcsInfo():
for ecs_info in ecs_instances_info:
if ecs_info.get('InternetMaxBandwidthOut') > 0:
instance_id = ecs_info.get('InstanceId')
if special_threshold and instance_id in special_threshold.keys():
real_threshold = special_threshold.get(instance_id)
else:
real_threshold = default_threshold
ecs_bandwidth_info[ecs_info.get('InstanceId')] = {'instance_name': ecs_info.get('InstanceName'),
'public_ipaddress': ecs_info.get('PublicIpAddress'),
'max_bandwidth_in': ecs_info.get(
'InternetMaxBandwidthIn'),
'max_bandwidth_out': ecs_info.get(
'InternetMaxBandwidthOut'),
'threshold': real_threshold
}
instance_ids += "{'instanceId':'%s'}," % instance_id
instance_ids += "]"
bandwidth_infos = ecs.get_ecs_monitor(instance_ids, period)
# 遍历info 取出instalceid和当前流量值
result = []
for bandwidth_info in bandwidth_infos:
instance_id = bandwidth_info.get('instanceId')
average = bandwidth_info.get('Average')
instance_name = ecs_bandwidth_info.get(instance_id).get('instance_name')
max_bandwidth = ecs_bandwidth_info.get(instance_id).get('max_bandwidth_out')
threshold = float(ecs_bandwidth_info.get(instance_id).get('threshold'))
address = ecs_bandwidth_info.get(instance_id).get('public_ipaddress')
if average / 1024 / 1024 > max_bandwidth * threshold:
result.append(
'{instance_name}: {average:.2f} > {threshold_bandwidth:.2f} (max: {max_bandwidth}, units: Mbps)'.format(
instance_name=instance_name, average=average / 1024 / 1024,
threshold_bandwidth=max_bandwidth * threshold, max_bandwidth=max_bandwidth))
print('\n'.join(result))
if __name__ == '__main__':
try:
main()
except Exception as err:
print('调用阿里云接口失败,非运维人员请忽略。')
print(err)
exit(1)
| 39.515723
| 163
| 0.594143
|
03f7ab3d9eb2cf738646e12e1d94c1cb186974ac
| 4,558
|
py
|
Python
|
scripts/get_uwsgi_args.py
|
emily101-gif/immport-galaxy
|
8f353d1f9b4e0d044e1a9d0b1f928b440df78b8c
|
[
"CC-BY-3.0"
] | 1
|
2020-01-06T21:04:22.000Z
|
2020-01-06T21:04:22.000Z
|
scripts/get_uwsgi_args.py
|
emily101-gif/immport-galaxy
|
8f353d1f9b4e0d044e1a9d0b1f928b440df78b8c
|
[
"CC-BY-3.0"
] | 7
|
2019-04-26T12:29:58.000Z
|
2022-03-02T04:33:12.000Z
|
scripts/get_uwsgi_args.py
|
emily101-gif/immport-galaxy
|
8f353d1f9b4e0d044e1a9d0b1f928b440df78b8c
|
[
"CC-BY-3.0"
] | 7
|
2016-11-03T19:11:01.000Z
|
2020-05-11T14:23:52.000Z
|
from __future__ import print_function
import os
import sys
from six import string_types
from six.moves import shlex_quote
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, 'lib')))
from galaxy.util.path import get_ext
from galaxy.util.properties import load_app_properties, nice_config_parser
from galaxy.util.script import main_factory
DESCRIPTION = "Script to determine uWSGI command line arguments"
# socket is not an alias for http, but it is assumed that if you configure a socket in your uwsgi config you do not
# want to run the default http server (or you can configure it yourself)
ALIASES = {
'virtualenv': ('home', 'venv', 'pyhome'),
'pythonpath': ('python-path', 'pp'),
'http': ('httprouter', 'socket', 'uwsgi-socket', 'suwsgi-socket', 'ssl-socket'),
'module': ('mount',), # mount is not actually an alias for module, but we don't want to set module if mount is set
}
DEFAULT_ARGS = {
'_all_': ('pythonpath', 'threads', 'buffer-size', 'http', 'static-map', 'die-on-term', 'hook-master-start', 'enable-threads'),
'galaxy': ('py-call-osafterfork',),
'reports': (),
'tool_shed': (),
}
DEFAULT_PORTS = {
'galaxy': 8080,
'reports': 9001,
'tool_shed': 9009,
}
def __arg_set(arg, kwargs):
if arg in kwargs:
return True
for alias in ALIASES.get(arg, ()):
if alias in kwargs:
return True
return False
def __add_arg(args, arg, value):
optarg = '--%s' % arg
if isinstance(value, bool):
if value is True:
args.append(optarg)
elif isinstance(value, string_types):
# the = in --optarg=value is usually, but not always, optional
if value.startswith('='):
args.append(shlex_quote(optarg + value))
else:
args.append(optarg)
args.append(shlex_quote(value))
else:
[__add_arg(args, arg, v) for v in value]
def __add_config_file_arg(args, config_file, app):
ext = None
if config_file:
ext = get_ext(config_file)
if ext in ('yaml', 'json'):
__add_arg(args, ext, config_file)
elif ext == 'ini':
config = nice_config_parser(config_file)
has_logging = config.has_section('loggers')
if config.has_section('app:main'):
# uWSGI does not have any way to set the app name when loading with paste.deploy:loadapp(), so hardcoding
# the name to `main` is fine
__add_arg(args, 'ini-paste' if not has_logging else 'ini-paste-logged', config_file)
return # do not add --module
else:
__add_arg(args, ext, config_file)
if has_logging:
__add_arg(args, 'paste-logger', True)
def _get_uwsgi_args(cliargs, kwargs):
# it'd be nice if we didn't have to reparse here but we need things out of more than one section
config_file = cliargs.config_file or kwargs.get('__file__')
uwsgi_kwargs = load_app_properties(config_file=config_file, config_section='uwsgi')
args = []
defaults = {
'pythonpath': 'lib',
'threads': '4',
'buffer-size': '16384', # https://github.com/galaxyproject/galaxy/issues/1530
'http': 'localhost:{port}'.format(port=DEFAULT_PORTS[cliargs.app]),
'static-map': ('/static/style={here}/static/style/blue'.format(here=os.getcwd()),
'/static={here}/static'.format(here=os.getcwd())),
'die-on-term': True,
'enable-threads': True,
'hook-master-start': ('unix_signal:2 gracefully_kill_them_all',
'unix_signal:15 gracefully_kill_them_all'),
'py-call-osafterfork': True,
}
__add_config_file_arg(args, config_file, cliargs.app)
if not __arg_set('module', uwsgi_kwargs):
__add_arg(args, 'module', 'galaxy.webapps.{app}.buildapp:uwsgi_app()'.format(app=cliargs.app))
# only include virtualenv if it's set/exists, otherwise this breaks conda-env'd Galaxy
if not __arg_set('virtualenv', uwsgi_kwargs) and ('VIRTUAL_ENV' in os.environ or os.path.exists('.venv')):
__add_arg(args, 'virtualenv', os.environ.get('VIRTUAL_ENV', '.venv'))
for arg in DEFAULT_ARGS['_all_'] + DEFAULT_ARGS[cliargs.app]:
if not __arg_set(arg, uwsgi_kwargs):
__add_arg(args, arg, defaults[arg])
print(' '.join(args))
ACTIONS = {
"get_uwsgi_args": _get_uwsgi_args,
}
if __name__ == '__main__':
main = main_factory(description=DESCRIPTION, actions=ACTIONS, default_action="get_uwsgi_args")
main()
| 37.669421
| 130
| 0.647214
|
29cf0c762c31fc56cd53a64c787a1ae3e1c1173c
| 412
|
py
|
Python
|
packages/python/plotly/plotly/validators/isosurface/_value.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/isosurface/_value.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/isosurface/_value.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
import _plotly_utils.basevalidators
class ValueValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="value", parent_name="isosurface", **kwargs):
super(ValueValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc+clearAxisTypes"),
**kwargs,
)
| 34.333333
| 80
| 0.682039
|
b5abb6f4fa4464a18fb47e2c55fddb3b7afe4ba3
| 2,556
|
py
|
Python
|
neural_network/constant.py
|
PurdueMINDS/SAGA
|
3c64ad6f84d8d998c88341f62ac39aaa25a6727e
|
[
"Apache-2.0"
] | 1
|
2018-10-15T12:53:01.000Z
|
2018-10-15T12:53:01.000Z
|
neural_network/constant.py
|
PurdueMINDS/SAGA
|
3c64ad6f84d8d998c88341f62ac39aaa25a6727e
|
[
"Apache-2.0"
] | null | null | null |
neural_network/constant.py
|
PurdueMINDS/SAGA
|
3c64ad6f84d8d998c88341f62ac39aaa25a6727e
|
[
"Apache-2.0"
] | 1
|
2018-11-05T23:14:27.000Z
|
2018-11-05T23:14:27.000Z
|
# Copyright 2018 Jianfei Gao, Leonardo Teixeira, Bruno Ribeiro.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Summer Time Period
Set summer time period for stratux data to shift time zone.
"""
SUMMER = {
2017: ((3, 12), (11, 5)),
2018: ((3, 11), (11, 4)),
2019: ((3, 10), (11, 3)),
}
"""Minimum Ground Speed
Set minimum ground speed, above which will be regarded as flight.
The default class embedded value is 0.0005, and is empirically designed.
"""
GROUND_SPEED_THRESHOLD = 0.0004
"""Problematic Flights
Set problematic flights which are provided commonly, but in trouble. For instance,
1. There are some flights provided in G1000 records which are not real flight data.
2. Some time stratux will run out of battery, and give partial flight data which will
crash alignment process.
"""
HIZARD_FLIGHTS = ('112017_02', '112817_01', '022718_01', '030418_03')
"""Input Keywords
Necessary keywords of input in short version.
By current, it is defined by five types:
1. GPS
2. Speed of GPS (Not divided by time step)
3. Acceleration of GPS (Not divided by time step)
4. Accelerometer
5. Gyroscope
"""
INPUT_KEYWORDS = (
'alt', 'lat', 'long',
'spd_alt', 'spd_lat', 'spd_long',
'acc_alt', 'acc_lat', 'acc_long',
'accmx', 'accmy', 'accmz',
'gyrox', 'gyroy', 'gyroz',
)
"""Window Configuration
Window configuration for dividing sequences into frames.
It should contain enough information for generating frames. For instance,
1. It should have window length for input and target;
2. It should have information to compute window offset length for input and target;
3. It should have padding method for input and target.
"""
WINDOW_CONFIG = {
'input': {
'length': 32,
'offset_length': None, 'offset_rate': 0.4,
'padding': 'repeat_base',
},
'target': {
'length': 32,
'offset_length': None, 'offset_rate': 0.4,
'padding': 'repeat_base',
},
}
| 28.719101
| 86
| 0.669797
|
c776c9fdf3c612eb3132613caed18b3ae23c8e88
| 4,670
|
py
|
Python
|
optimization/rgbd/step0_prepare_frontend_data.py
|
liubarnabas/hifi3dface
|
33886a82aadd2da7ff7c2d5a91303413d096b778
|
[
"Apache-2.0"
] | 442
|
2020-10-13T03:40:48.000Z
|
2022-03-31T05:00:28.000Z
|
optimization/rgbd/step0_prepare_frontend_data.py
|
liubarnabas/hifi3dface
|
33886a82aadd2da7ff7c2d5a91303413d096b778
|
[
"Apache-2.0"
] | 33
|
2020-10-15T14:50:27.000Z
|
2022-03-18T11:27:50.000Z
|
optimization/rgbd/step0_prepare_frontend_data.py
|
liubarnabas/hifi3dface
|
33886a82aadd2da7ff7c2d5a91303413d096b778
|
[
"Apache-2.0"
] | 99
|
2020-10-14T09:40:53.000Z
|
2022-03-21T06:51:39.000Z
|
# -*- coding:utf8 -*-
"""
This file is part of the repo: https://github.com/tencent-ailab/hifi3dface
If you find the code useful, please cite our paper:
"High-Fidelity 3D Digital Human Head Creation from RGB-D Selfies."
ACM Transactions on Graphics 2021
Code: https://github.com/tencent-ailab/hifi3dface
Copyright (c) [2020-2021] [Tencent AI Lab]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
from absl import app, flags
import cv2
import numpy as np
import sys
sys.path.append("../..")
from RGBD_utils.preProcess import preProcess
def load_depth_png_data(capture_dir, prepare_dir):
rgb_name_all = []
depth_name_all = []
name_all = open(os.path.join(prepare_dir, "img_names.txt"))
line = name_all.readline()
while 1:
if line:
one = line.split(",")
rgb_name_all.append(one[0])
dd = one[1][0 : one[1].find(".") + 4]
depth_name_all.append(dd)
else:
break
line = name_all.readline()
name_all.close()
# load img and depth , change to 640 * 480
imgs_all = []
depths_all = []
for i in range(len(rgb_name_all)):
img = cv2.imread(os.path.join(capture_dir, rgb_name_all[i]))
imgs_all.append(img)
depth = preProcess.load_one_PNG_depth(
os.path.join(capture_dir, depth_name_all[i])
)
depths_all.append(depth)
depths_all = preProcess.depth_bilateral_filter(depths_all)
# load landmarks , change to 640 * 480
lmk3d_all = preProcess.load_landmark_rgbd(prepare_dir, 86)
lmk3d_all = [np.transpose(element) for element in lmk3d_all]
return imgs_all, depths_all, lmk3d_all, rgb_name_all
def run(capture_dir, prepare_dir, output_dir):
print("---- step0 start -----")
if os.path.exists(output_dir) is False:
os.makedirs(output_dir)
print("capture_dir:", capture_dir)
print("prepare_dir:", prepare_dir)
######################### camera parameters #############################
K = {"fx": 592.7247, "fy": 593.7484, "cx": 239.7348, "cy": 319.4659}
K_depth = np.array([[K["fx"], 0, K["cx"]], [0, K["fy"], K["cy"]], [0, 0, 1]])
np.savez(os.path.join(output_dir, "camera_matrix.npz"), K_depth=K_depth)
####################### get image, depth, lanmdarks ######################
# depth->png
img_all, depth_all, lmk3d_all, rgb_name_all = load_depth_png_data(
capture_dir, prepare_dir
)
np.savez(
os.path.join(output_dir, "formatted_data.npz"),
lmk3d_all=lmk3d_all,
img_all=img_all,
depth_all=depth_all,
rgb_name_all=rgb_name_all,
)
########################### get 3d keypoints #############################
pt3d_all = []
for i in range(len(lmk3d_all)):
pt2d = lmk3d_all[i] # 2 * 86
depth = depth_all[i]
one_3d = np.zeros((3, len(pt2d[0])))
for k in range(pt2d.shape[1]):
dd = depth[int(round(pt2d[1][k])), int(round(pt2d[0][k]))]
one_3d[0][k] = (pt2d[0][k] - K["cx"]) * dd / K["fx"]
one_3d[1][k] = (pt2d[1][k] - K["cy"]) * dd / K["fy"]
one_3d[2][k] = dd
pt3d_all.append(one_3d)
np.savez(os.path.join(output_dir, "pt3ds.npz"), pt3d_all=pt3d_all)
print("---- step0 succeed -----")
def main(_):
capture_dir = FLAGS.capture_dir
prepare_dir = FLAGS.prepare_dir
output_dir = FLAGS.prepare_dir
run(capture_dir, prepare_dir, output_dir)
if __name__ == "__main__":
FLAGS = flags.FLAGS
flags.DEFINE_string("capture_dir", "ori/", "input data directory")
flags.DEFINE_string("prepare_dir", "prepare/", "output data directory")
app.run(main)
| 33.84058
| 81
| 0.643469
|
5ebeeaa7c986c270ae5afdbb2fbddccc9f640c0e
| 28,432
|
py
|
Python
|
mujoco-py-1.50.1.1/.eggs/imageio-2.13.5-py3.5.egg/imageio/plugins/ffmpeg.py
|
guen-a-park/stablebl_safedagger
|
a5b1b9357c2f50f32dbfdba1444499aae52f3de5
|
[
"MIT"
] | 5
|
2022-01-05T00:41:46.000Z
|
2022-03-21T07:22:58.000Z
|
mujoco-py-1.50.1.1/.eggs/imageio-2.13.5-py3.5.egg/imageio/plugins/ffmpeg.py
|
guen-a-park/stablebl_safedagger
|
a5b1b9357c2f50f32dbfdba1444499aae52f3de5
|
[
"MIT"
] | null | null | null |
mujoco-py-1.50.1.1/.eggs/imageio-2.13.5-py3.5.egg/imageio/plugins/ffmpeg.py
|
guen-a-park/stablebl_safedagger
|
a5b1b9357c2f50f32dbfdba1444499aae52f3de5
|
[
"MIT"
] | 2
|
2022-03-20T17:35:44.000Z
|
2022-03-21T18:30:31.000Z
|
# -*- coding: utf-8 -*-
# imageio is distributed under the terms of the (new) BSD License.
"""Read/Write video using FFMPEG
Backend Library: https://github.com/imageio/imageio-ffmpeg
.. note::
To use this plugin you have to install its backend::
pip install imageio[ffmpeg]
The ffmpeg format provides reading and writing for a wide range of movie formats
such as .avi, .mpeg, .mp4, etc. as well as the ability to read streams from
webcams and USB cameras. It is based on ffmpeg and is inspired by/based `moviepy
<https://github.com/Zulko/moviepy/>`_ by Zulko.
Parameters for reading
----------------------
fps : scalar
The number of frames per second to read the data at. Default None (i.e.
read at the file's own fps). One can use this for files with a
variable fps, or in cases where imageio is unable to correctly detect
the fps. In case of trouble opening camera streams, it may help to set an
explicit fps value matching a framerate supported by the camera.
loop : bool
If True, the video will rewind as soon as a frame is requested
beyond the last frame. Otherwise, IndexError is raised. Default False.
Setting this to True will internally call ``count_frames()``,
and set the reader's length to that value instead of inf.
size : str | tuple
The frame size (i.e. resolution) to read the images, e.g.
(100, 100) or "640x480". For camera streams, this allows setting
the capture resolution. For normal video data, ffmpeg will
rescale the data.
dtype : str | type
The dtype for the output arrays. Determines the bit-depth that
is requested from ffmpeg. Supported dtypes: uint8, uint16.
Default: uint8.
pixelformat : str
The pixel format for the camera to use (e.g. "yuyv422" or
"gray"). The camera needs to support the format in order for
this to take effect. Note that the images produced by this
reader are always RGB.
input_params : list
List additional arguments to ffmpeg for input file options.
(Can also be provided as ``ffmpeg_params`` for backwards compatibility)
Example ffmpeg arguments to use aggressive error handling:
['-err_detect', 'aggressive']
output_params : list
List additional arguments to ffmpeg for output file options (i.e. the
stream being read by imageio).
print_info : bool
Print information about the video file as reported by ffmpeg.
Parameters for writing
----------------------
fps : scalar
The number of frames per second. Default 10.
codec : str
the video codec to use. Default 'libx264', which represents the
widely available mpeg4. Except when saving .wmv files, then the
defaults is 'msmpeg4' which is more commonly supported for windows
quality : float | None
Video output quality. Default is 5. Uses variable bit rate. Highest
quality is 10, lowest is 0. Set to None to prevent variable bitrate
flags to FFMPEG so you can manually specify them using output_params
instead. Specifying a fixed bitrate using 'bitrate' disables this
parameter.
bitrate : int | None
Set a constant bitrate for the video encoding. Default is None causing
'quality' parameter to be used instead. Better quality videos with
smaller file sizes will result from using the 'quality' variable
bitrate parameter rather than specifiying a fixed bitrate with this
parameter.
pixelformat: str
The output video pixel format. Default is 'yuv420p' which most widely
supported by video players.
input_params : list
List additional arguments to ffmpeg for input file options (i.e. the
stream that imageio provides).
output_params : list
List additional arguments to ffmpeg for output file options.
(Can also be provided as ``ffmpeg_params`` for backwards compatibility)
Example ffmpeg arguments to use only intra frames and set aspect ratio:
['-intra', '-aspect', '16:9']
ffmpeg_log_level: str
Sets ffmpeg output log level. Default is "warning".
Values can be "quiet", "panic", "fatal", "error", "warning", "info"
"verbose", or "debug". Also prints the FFMPEG command being used by
imageio if "info", "verbose", or "debug".
macro_block_size: int
Size constraint for video. Width and height, must be divisible by this
number. If not divisible by this number imageio will tell ffmpeg to
scale the image up to the next closest size
divisible by this number. Most codecs are compatible with a macroblock
size of 16 (default), some can go smaller (4, 8). To disable this
automatic feature set it to None or 1, however be warned many players
can't decode videos that are odd in size and some codecs will produce
poor results or fail. See https://en.wikipedia.org/wiki/Macroblock.
Notes
-----
If you are using anaconda and ``anaconda/ffmpeg`` you will not be able to
encode/decode H.264 (likely due to licensing concerns). If you need this
format on anaconda install ``conda-forge/ffmpeg`` instead.
You can use the ``IMAGEIO_FFMPEG_EXE`` environment variable to force using a
specific ffmpeg executable.
To get the number of frames before having read them all, you can use the
``reader.count_frames()`` method (the reader will then use
``imageio_ffmpeg.count_frames_and_secs()`` to get the exact number of frames,
note that this operation can take a few seconds on large files). Alternatively,
the number of frames can be estimated from the fps and duration in the meta data
(though these values themselves are not always present/reliable).
"""
import re
import sys
import time
import logging
import platform
import threading
import subprocess as sp
import numpy as np
from ..core import Format, image_as_uint
logger = logging.getLogger(__name__)
# Get camera format
if sys.platform.startswith("win"):
CAM_FORMAT = "dshow" # dshow or vfwcap
elif sys.platform.startswith("linux"):
CAM_FORMAT = "video4linux2"
elif sys.platform.startswith("darwin"):
CAM_FORMAT = "avfoundation"
else: # pragma: no cover
CAM_FORMAT = "unknown-cam-format"
def download(directory=None, force_download=False): # pragma: no cover
raise RuntimeError(
"imageio.ffmpeg.download() has been deprecated. "
"Use 'pip install imageio-ffmpeg' instead.'"
)
# For backwards compatibility - we dont use this ourselves
def get_exe(): # pragma: no cover
"""Wrapper for imageio_ffmpeg.get_ffmpeg_exe()"""
import imageio_ffmpeg
return imageio_ffmpeg.get_ffmpeg_exe()
_ffmpeg_api = None
def _get_ffmpeg_api():
global _ffmpeg_api
if _ffmpeg_api is None:
try:
import imageio_ffmpeg
except ImportError:
raise ImportError(
"To use the imageio ffmpeg plugin you need to "
"'pip install imageio-ffmpeg'"
)
_ffmpeg_api = imageio_ffmpeg
return _ffmpeg_api
class FfmpegFormat(Format):
"""Read/Write ImageResources using FFMPEG.
See :mod:`imageio.plugins.ffmpeg`
"""
def _can_read(self, request):
if request.mode[1] not in "I?":
return False
# Read from video stream?
# Note that we could write the _video flag here, but a user might
# select this format explicitly (and this code is not run)
if re.match(r"<video(\d+)>", request.filename):
return True
# Read from file that we know?
if request.extension in self.extensions:
return True
def _can_write(self, request):
if request.mode[1] in (self.modes + "?"):
if request.extension in self.extensions:
return True
# --
class Reader(Format.Reader):
_frame_catcher = None
_read_gen = None
def _get_cam_inputname(self, index):
if sys.platform.startswith("linux"):
return "/dev/" + self.request._video[1:-1]
elif sys.platform.startswith("win"):
# Ask ffmpeg for list of dshow device names
ffmpeg_api = _get_ffmpeg_api()
cmd = [
ffmpeg_api.get_ffmpeg_exe(),
"-list_devices",
"true",
"-f",
CAM_FORMAT,
"-i",
"dummy",
]
# Set `shell=True` in sp.run to prevent popup of a command
# line window in frozen applications. Note: this would be a
# security vulnerability if user-input goes into the cmd.
# Note that the ffmpeg process returns with exit code 1 when
# using `-list_devices` (or `-list_options`), even if the
# command is successful, so we set `check=False` explicitly.
completed_process = sp.run(
cmd,
stdout=sp.PIPE,
stderr=sp.PIPE,
encoding="utf-8",
shell=True,
check=False,
)
# Return device name at index
try:
name = parse_device_names(completed_process.stderr)[index]
except IndexError:
raise IndexError("No ffdshow camera at index %i." % index)
return "video=%s" % name
elif sys.platform.startswith("darwin"):
# Appears that newer ffmpeg builds don't support -list-devices
# on OS X. But you can directly open the camera by index.
name = str(index)
return name
else: # pragma: no cover
return "??"
def _open(
self,
loop=False,
size=None,
dtype=None,
pixelformat=None,
print_info=False,
ffmpeg_params=None,
input_params=None,
output_params=None,
fps=None,
):
# Get generator functions
self._ffmpeg_api = _get_ffmpeg_api()
# Process input args
self._arg_loop = bool(loop)
if size is None:
self._arg_size = None
elif isinstance(size, tuple):
self._arg_size = "%ix%i" % size
elif isinstance(size, str) and "x" in size:
self._arg_size = size
else:
raise ValueError('FFMPEG size must be tuple of "NxM"')
if pixelformat is None:
pass
elif not isinstance(pixelformat, str):
raise ValueError("FFMPEG pixelformat must be str")
if dtype is None:
self._dtype = np.dtype("uint8")
else:
self._dtype = np.dtype(dtype)
allowed_dtypes = ["uint8", "uint16"]
if self._dtype.name not in allowed_dtypes:
raise ValueError(
"dtype must be one of: {}".format(", ".join(allowed_dtypes))
)
self._arg_pixelformat = pixelformat
self._arg_input_params = input_params or []
self._arg_output_params = output_params or []
self._arg_input_params += ffmpeg_params or [] # backward compat
# Write "_video"_arg - indicating webcam support
self.request._video = None
regex_match = re.match(r"<video(\d+)>", self.request.filename)
if regex_match:
self.request._video = self.request.filename
# Specify input framerate? (only on macOS)
if self.request._video and platform.system().lower() == "darwin":
if "-framerate" not in str(self._arg_input_params):
self._arg_input_params.extend(["-framerate", str(float(fps or 15))])
# Get local filename
if self.request._video:
index = int(regex_match.group(1))
self._filename = self._get_cam_inputname(index)
else:
self._filename = self.request.get_local_filename()
# When passed to ffmpeg on command line, carets need to be escaped.
self._filename = self._filename.replace("^", "^^")
# Determine pixel format and depth
self._depth = 3
if self._dtype.name == "uint8":
self._pix_fmt = "rgb24"
self._bytes_per_channel = 1
else:
self._pix_fmt = "rgb48le"
self._bytes_per_channel = 2
# Initialize parameters
self._pos = -1
self._meta = {"plugin": "ffmpeg"}
self._lastread = None
# Calculating this from fps and duration is not accurate,
# and calculating it exactly with ffmpeg_api.count_frames_and_secs
# takes too long to do for each video. But we need it for looping.
self._nframes = float("inf")
if self._arg_loop and not self.request._video:
self._nframes = self.count_frames()
self._meta["nframes"] = self._nframes
# Start ffmpeg subprocess and get meta information
self._initialize()
# For cameras, create thread that keeps reading the images
if self.request._video:
self._frame_catcher = FrameCatcher(self._read_gen)
# For reference - but disabled, because it is inaccurate
# if self._meta["nframes"] == float("inf"):
# if self._meta.get("fps", 0) > 0:
# if self._meta.get("duration", 0) > 0:
# n = round(self._meta["duration"] * self._meta["fps"])
# self._meta["nframes"] = int(n)
def _close(self):
# First close the frame catcher, because we cannot close the gen
# if the frame catcher thread is using it
if self._frame_catcher is not None:
self._frame_catcher.stop_me()
self._frame_catcher = None
if self._read_gen is not None:
self._read_gen.close()
self._read_gen = None
def count_frames(self):
"""Count the number of frames. Note that this can take a few
seconds for large files. Also note that it counts the number
of frames in the original video and does not take a given fps
into account.
"""
# This would have been nice, but this does not work :(
# oargs = []
# if self.request.kwargs.get("fps", None):
# fps = float(self.request.kwargs["fps"])
# oargs += ["-r", "%.02f" % fps]
cf = self._ffmpeg_api.count_frames_and_secs
return cf(self._filename)[0]
def _get_length(self):
return self._nframes # only not inf if loop is True
def _get_data(self, index):
"""Reads a frame at index. Note for coders: getting an
arbitrary frame in the video with ffmpeg can be painfully
slow if some decoding has to be done. This function tries
to avoid fectching arbitrary frames whenever possible, by
moving between adjacent frames."""
# Modulo index (for looping)
if self._arg_loop and self._nframes < float("inf"):
index %= self._nframes
if index == self._pos:
return self._lastread, dict(new=False)
elif index < 0:
raise IndexError("Frame index must be >= 0")
elif index >= self._nframes:
raise IndexError("Reached end of video")
else:
if (index < self._pos) or (index > self._pos + 100):
self._initialize(index)
else:
self._skip_frames(index - self._pos - 1)
result, is_new = self._read_frame()
self._pos = index
return result, dict(new=is_new)
def _get_meta_data(self, index):
return self._meta
def _initialize(self, index=0):
# Close the current generator, and thereby terminate its subprocess
if self._read_gen is not None:
self._read_gen.close()
iargs = []
oargs = []
# Create input args
iargs += self._arg_input_params
if self.request._video:
iargs += ["-f", CAM_FORMAT]
if self._arg_pixelformat:
iargs += ["-pix_fmt", self._arg_pixelformat]
if self._arg_size:
iargs += ["-s", self._arg_size]
elif index > 0: # re-initialize / seek
# Note: only works if we initialized earlier, and now have meta
# Some info here: https://trac.ffmpeg.org/wiki/Seeking
# There are two ways to seek, one before -i (input_params) and
# after (output_params). The former is fast, because it uses
# keyframes, the latter is slow but accurate. According to
# the article above, the fast method should also be accurate
# from ffmpeg version 2.1, however in version 4.1 our tests
# start failing again. Not sure why, but we can solve this
# by combining slow and fast. Seek the long stretch using
# the fast method, and seek the last 10s the slow way.
starttime = index / self._meta["fps"]
seek_slow = min(10, starttime)
seek_fast = starttime - seek_slow
# We used to have this epsilon earlier, when we did not use
# the slow seek. I don't think we need it anymore.
# epsilon = -1 / self._meta["fps"] * 0.1
iargs += ["-ss", "%.06f" % (seek_fast)]
oargs += ["-ss", "%.06f" % (seek_slow)]
# Output args, for writing to pipe
if self._arg_size:
oargs += ["-s", self._arg_size]
if self.request.kwargs.get("fps", None):
fps = float(self.request.kwargs["fps"])
oargs += ["-r", "%.02f" % fps]
oargs += self._arg_output_params
# Get pixelformat and bytes per pixel
pix_fmt = self._pix_fmt
bpp = self._depth * self._bytes_per_channel
# Create generator
rf = self._ffmpeg_api.read_frames
self._read_gen = rf(
self._filename, pix_fmt, bpp, input_params=iargs, output_params=oargs
)
# Read meta data. This start the generator (and ffmpeg subprocess)
if self.request._video:
# With cameras, catch error and turn into IndexError
try:
meta = self._read_gen.__next__()
except IOError as err:
err_text = str(err)
if "darwin" in sys.platform:
if "Unknown input format: 'avfoundation'" in err_text:
err_text += (
"Try installing FFMPEG using "
"home brew to get a version with "
"support for cameras."
)
raise IndexError(
"No (working) camera at {}.\n\n{}".format(
self.request._video, err_text
)
)
else:
self._meta.update(meta)
elif index == 0:
self._meta.update(self._read_gen.__next__())
else:
self._read_gen.__next__() # we already have meta data
def _skip_frames(self, n=1):
"""Reads and throws away n frames"""
for i in range(n):
self._read_gen.__next__()
self._pos += n
def _read_frame(self):
# Read and convert to numpy array
w, h = self._meta["size"]
framesize = w * h * self._depth * self._bytes_per_channel
# t0 = time.time()
# Read frame
if self._frame_catcher: # pragma: no cover - camera thing
s, is_new = self._frame_catcher.get_frame()
else:
s = self._read_gen.__next__()
is_new = True
# Check
if len(s) != framesize:
raise RuntimeError(
"Frame is %i bytes, but expected %i." % (len(s), framesize)
)
result = np.frombuffer(s, dtype=self._dtype).copy()
result = result.reshape((h, w, self._depth))
# t1 = time.time()
# print('etime', t1-t0)
# Store and return
self._lastread = result
return result, is_new
# --
class Writer(Format.Writer):
_write_gen = None
def _open(
self,
fps=10,
codec="libx264",
bitrate=None,
pixelformat="yuv420p",
ffmpeg_params=None,
input_params=None,
output_params=None,
ffmpeg_log_level="quiet",
quality=5,
macro_block_size=16,
):
self._ffmpeg_api = _get_ffmpeg_api()
self._filename = self.request.get_local_filename()
self._pix_fmt = None
self._depth = None
self._size = None
def _close(self):
if self._write_gen is not None:
self._write_gen.close()
self._write_gen = None
def _append_data(self, im, meta):
# Get props of image
h, w = im.shape[:2]
size = w, h
depth = 1 if im.ndim == 2 else im.shape[2]
# Ensure that image is in uint8
im = image_as_uint(im, bitdepth=8)
# To be written efficiently, ie. without creating an immutable
# buffer, by calling im.tobytes() the array must be contiguous.
if not im.flags.c_contiguous:
# checkign the flag is a micro optimization.
# the image will be a numpy subclass. See discussion
# https://github.com/numpy/numpy/issues/11804
im = np.ascontiguousarray(im)
# Set size and initialize if not initialized yet
if self._size is None:
map = {1: "gray", 2: "gray8a", 3: "rgb24", 4: "rgba"}
self._pix_fmt = map.get(depth, None)
if self._pix_fmt is None:
raise ValueError("Image must have 1, 2, 3 or 4 channels")
self._size = size
self._depth = depth
self._initialize()
# Check size of image
if size != self._size:
raise ValueError("All images in a movie should have same size")
if depth != self._depth:
raise ValueError(
"All images in a movie should have same " "number of channels"
)
assert self._write_gen is not None # Check status
# Write. Yes, we can send the data in as a numpy array
self._write_gen.send(im)
def set_meta_data(self, meta):
raise RuntimeError(
"The ffmpeg format does not support setting " "meta data."
)
def _initialize(self):
# Close existing generator
if self._write_gen is not None:
self._write_gen.close()
# Get parameters
# Use None to let imageio-ffmpeg (or ffmpeg) select good results
fps = self.request.kwargs.get("fps", 10)
codec = self.request.kwargs.get("codec", None)
bitrate = self.request.kwargs.get("bitrate", None)
quality = self.request.kwargs.get("quality", None)
input_params = self.request.kwargs.get("input_params") or []
output_params = self.request.kwargs.get("output_params") or []
output_params += self.request.kwargs.get("ffmpeg_params") or []
pixelformat = self.request.kwargs.get("pixelformat", None)
macro_block_size = self.request.kwargs.get("macro_block_size", 16)
ffmpeg_log_level = self.request.kwargs.get("ffmpeg_log_level", None)
macro_block_size = macro_block_size or 1 # None -> 1
# Create generator
self._write_gen = self._ffmpeg_api.write_frames(
self._filename,
self._size,
pix_fmt_in=self._pix_fmt,
pix_fmt_out=pixelformat,
fps=fps,
quality=quality,
bitrate=bitrate,
codec=codec,
macro_block_size=macro_block_size,
ffmpeg_log_level=ffmpeg_log_level,
input_params=input_params,
output_params=output_params,
)
# Seed the generator (this is where the ffmpeg subprocess starts)
self._write_gen.send(None)
class FrameCatcher(threading.Thread):
"""Thread to keep reading the frame data from stdout. This is
useful when streaming from a webcam. Otherwise, if the user code
does not grab frames fast enough, the buffer will fill up, leading
to lag, and ffmpeg can also stall (experienced on Linux). The
get_frame() method always returns the last available image.
"""
def __init__(self, gen):
self._gen = gen
self._frame = None
self._frame_is_new = False
self._lock = threading.RLock()
threading.Thread.__init__(self)
self.setDaemon(True) # do not let this thread hold up Python shutdown
self._should_stop = False
self.start()
def stop_me(self):
self._should_stop = True
while self.is_alive():
time.sleep(0.001)
def get_frame(self):
while self._frame is None: # pragma: no cover - an init thing
time.sleep(0.001)
with self._lock:
is_new = self._frame_is_new
self._frame_is_new = False # reset
return self._frame, is_new
def run(self):
# This runs in the worker thread
try:
while not self._should_stop:
time.sleep(0) # give control to other threads
frame = self._gen.__next__()
with self._lock:
self._frame = frame
self._frame_is_new = True
except (StopIteration, EOFError):
pass
def parse_device_names(ffmpeg_output):
"""Parse the output of the ffmpeg -list-devices command"""
# Collect device names - get [friendly_name, alt_name] of each
device_names = []
in_video_devices = False
for line in ffmpeg_output.splitlines():
if line.startswith("[dshow"):
logger.debug(line)
line = line.split("]", 1)[1].strip()
if in_video_devices and line.startswith('"'):
friendly_name = line[1:-1]
device_names.append([friendly_name, ""])
elif in_video_devices and line.lower().startswith("alternative name"):
alt_name = line.split(" name ", 1)[1].strip()[1:-1]
if sys.platform.startswith("win"):
alt_name = alt_name.replace("&", "^&") # Tested to work
else:
alt_name = alt_name.replace("&", "\\&") # Does this work?
device_names[-1][-1] = alt_name
elif "video devices" in line:
in_video_devices = True
elif "devices" in line:
# set False for subsequent "devices" sections
in_video_devices = False
# Post-process, see #441
# prefer friendly names, use alt name if two cams have same friendly name
device_names2 = []
for friendly_name, alt_name in device_names:
if friendly_name not in device_names2:
device_names2.append(friendly_name)
elif alt_name:
device_names2.append(alt_name)
else:
device_names2.append(friendly_name) # duplicate, but not much we can do
return device_names2
| 39.709497
| 88
| 0.575725
|
2aa79019004f02d265c59f93c17b35e758d1dfd2
| 1,838
|
py
|
Python
|
src/bxcommon/messages/bloxroute/keep_alive_message.py
|
thabaptiser/bxcommon
|
ee8547c9fc68c71b8acf4ce0989a344681ea273c
|
[
"MIT"
] | null | null | null |
src/bxcommon/messages/bloxroute/keep_alive_message.py
|
thabaptiser/bxcommon
|
ee8547c9fc68c71b8acf4ce0989a344681ea273c
|
[
"MIT"
] | null | null | null |
src/bxcommon/messages/bloxroute/keep_alive_message.py
|
thabaptiser/bxcommon
|
ee8547c9fc68c71b8acf4ce0989a344681ea273c
|
[
"MIT"
] | null | null | null |
from typing import Optional
from bxcommon import constants
from bxcommon.messages.bloxroute.abstract_bloxroute_message import AbstractBloxrouteMessage
from bxcommon.messages.bloxroute.protocol_version import PROTOCOL_VERSION
from bxcommon.utils.message_buffer_builder import PayloadElement, PayloadBlock
class KeepAliveMessage(AbstractBloxrouteMessage):
"""
BloXroute Version message that contains a message nonce to be used in the response.
nonce: long, to be provided and managed by the connection
"""
KEEP_ALIVE_MESSAGE_BLOCK = PayloadBlock(
AbstractBloxrouteMessage.HEADER_LENGTH, "ResponseMessage", PROTOCOL_VERSION,
PayloadElement(name="nonce", structure="<Q", decode=lambda x: x or None),
)
KEEP_ALIVE_MESSAGE_LENGTH = KEEP_ALIVE_MESSAGE_BLOCK.size + constants.CONTROL_FLAGS_LEN
def __init__(
self,
msg_type: bytes,
nonce: Optional[int] = None,
buf: Optional[bytearray] = None,
payload_length: Optional[int] = None
) -> None:
if payload_length is None:
payload_length = self.KEEP_ALIVE_MESSAGE_LENGTH
if buf is None:
buf = bytearray(self.HEADER_LENGTH + payload_length)
buf = self.KEEP_ALIVE_MESSAGE_BLOCK.build(buf, nonce=nonce)
self.buf = buf
self._nonce: Optional[int] = None
self._network_num: Optional[int] = None
self._memoryview = memoryview(buf)
super(KeepAliveMessage, self).__init__(msg_type, payload_length, buf)
def __unpack(self) -> None:
contents = self.KEEP_ALIVE_MESSAGE_BLOCK.read(self._memoryview)
self._nonce = contents.get("nonce")
def nonce(self) -> int:
if self._nonce is None:
self.__unpack()
nonce = self._nonce
assert nonce is not None
return nonce
| 36.039216
| 91
| 0.699674
|
0ef3435628ba579027ca9440403a2de775298d8b
| 1,904
|
py
|
Python
|
tests/services/requests/test_requests_service_config.py
|
Pineirin/invenio-requests
|
7eb85365128e4189e4c81d154e8918b09aae033d
|
[
"MIT"
] | null | null | null |
tests/services/requests/test_requests_service_config.py
|
Pineirin/invenio-requests
|
7eb85365128e4189e4c81d154e8918b09aae033d
|
[
"MIT"
] | null | null | null |
tests/services/requests/test_requests_service_config.py
|
Pineirin/invenio-requests
|
7eb85365128e4189e4c81d154e8918b09aae033d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 TU Wien.
#
# Invenio-Requests is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Test the service configuration."""
import pytest
from invenio_records_permissions.generators import AnyUser, SystemProcess
from invenio_requests.proxies import current_requests
from invenio_requests.services import RequestsServiceConfig
from invenio_requests.services.permissions import PermissionPolicy
class CustomPermissionPolicy(PermissionPolicy):
"""Custom permission policy."""
can_search = [SystemProcess()]
can_read = [SystemProcess()]
can_create = [SystemProcess()]
can_update = [SystemProcess()]
can_delete = [SystemProcess()]
can_test = [AnyUser()]
@pytest.fixture(scope="module")
def app_config(app_config):
"""Fixture for customizing the service config via app config."""
app_config["REQUESTS_PERMISSION_POLICY"] = CustomPermissionPolicy
return app_config
def test_customizations_via_app_config(app):
"""Test if the customization mechanism works correctly."""
current_permission_policy_cls = (
current_requests.requests_service.config.permission_policy_cls
)
assert current_permission_policy_cls is CustomPermissionPolicy
assert hasattr(current_permission_policy_cls, "can_test")
def test_customization_mixin():
"""Test if the customize mixin method does what it is supposed to do."""
custom_config_cls = RequestsServiceConfig.customize(
permission_policy=CustomPermissionPolicy
)
# check if it created a new class
assert custom_config_cls is not RequestsServiceConfig
# check if both classes have the correct
assert RequestsServiceConfig.permission_policy_cls is PermissionPolicy
assert custom_config_cls.permission_policy_cls is CustomPermissionPolicy
| 32.271186
| 76
| 0.772059
|
dfbafb3d7759dea7fa61390cb2cd233c4ff56599
| 1,039
|
py
|
Python
|
examples/basic-ipc/bot.py
|
MeFaltaUnVerano21/discord-ext-ipc
|
0e69ff62aa05a1891080b2b81e6035883acf6e2d
|
[
"Apache-2.0"
] | 1
|
2021-01-07T22:45:16.000Z
|
2021-01-07T22:45:16.000Z
|
examples/basic-ipc/bot.py
|
MeFaltaUnVerano21/discord-ext-ipc
|
0e69ff62aa05a1891080b2b81e6035883acf6e2d
|
[
"Apache-2.0"
] | 1
|
2021-02-09T00:38:52.000Z
|
2021-02-09T00:38:52.000Z
|
examples/basic-ipc/bot.py
|
MeFaltaUnVerano21/discord-ext-ipc
|
0e69ff62aa05a1891080b2b81e6035883acf6e2d
|
[
"Apache-2.0"
] | 2
|
2021-01-09T20:43:10.000Z
|
2021-01-22T00:48:48.000Z
|
import discord
from discord.ext import commands, ipc
class MyBot(commands.Bot):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.ipc = ipc.Server(self, secret_key="my_secret_key") # create our IPC Server
async def on_ready(self):
"""Called upon the READY event"""
print("Bot is ready.")
async def on_ipc_ready(self):
"""Called upon the IPC Server being ready"""
print("Ipc is ready.")
async def on_ipc_error(self, endpoint, error):
"""Called upon an error being raised within an IPC route"""
print(endpoint, "raised", error)
my_bot = MyBot(command_prefix="!", intents=discord.Intents.all())
@my_bot.ipc.route()
async def get_member_count(data):
guild = my_bot.get_guild(
data.guild_id
) # get the guild object using parsed guild_id
return guild.member_count # return the member count to the client
if __name__ == "__main__":
my_bot.ipc.start() # start the IPC Server
my_bot.run("TOKEN")
| 26.641026
| 88
| 0.658325
|
ed9664fbeffb2119304f380ce8a00cbd61e1f3f3
| 201
|
py
|
Python
|
CodeChef/MISS_NUM/test2.py
|
afifabroory/CompetitiveProgramming
|
231883eeab5abbd84005e80c5065dd02fd8430ef
|
[
"Unlicense"
] | null | null | null |
CodeChef/MISS_NUM/test2.py
|
afifabroory/CompetitiveProgramming
|
231883eeab5abbd84005e80c5065dd02fd8430ef
|
[
"Unlicense"
] | null | null | null |
CodeChef/MISS_NUM/test2.py
|
afifabroory/CompetitiveProgramming
|
231883eeab5abbd84005e80c5065dd02fd8430ef
|
[
"Unlicense"
] | null | null | null |
from math import sqrt, floor
def factor(n):
factorization = list()
for d in range(1, n+1):
if n%d == 0:
factorization.append(d)
return factorization
print(factor(99))
| 18.272727
| 35
| 0.60199
|
05cc10ce3986c46408ad100420e91fb1f4ba99e3
| 10,862
|
py
|
Python
|
plugins/modules/oci_opsi_sql_statistics_time_series_facts.py
|
hanielburton/oci-ansible-collection
|
dfdffde637f746d346ba35569be8c3a3407022f2
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/oci_opsi_sql_statistics_time_series_facts.py
|
hanielburton/oci-ansible-collection
|
dfdffde637f746d346ba35569be8c3a3407022f2
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/oci_opsi_sql_statistics_time_series_facts.py
|
hanielburton/oci-ansible-collection
|
dfdffde637f746d346ba35569be8c3a3407022f2
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2017, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_opsi_sql_statistics_time_series_facts
short_description: Fetches details about a SqlStatisticsTimeSeries resource in Oracle Cloud Infrastructure
description:
- Fetches details about a SqlStatisticsTimeSeries resource in Oracle Cloud Infrastructure
- Query SQL Warehouse to get the performance statistics time series for a given SQL across given databases for a given time period.
version_added: "2.9"
author: Oracle (@oracle)
options:
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment.
type: str
required: true
sql_identifier:
description:
- "Unique SQL_ID for a SQL Statement.
Example: `6rgjh9bjmy2s7`"
type: str
required: true
database_id:
description:
- Optional list of database L(OCIDs,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
type: list
analysis_time_interval:
description:
- Specify time period in ISO 8601 format with respect to current time.
Default is last 30 days represented by P30D.
If timeInterval is specified, then timeIntervalStart and timeIntervalEnd will be ignored.
Examples P90D (last 90 days), P4W (last 4 weeks), P2M (last 2 months), P1Y (last 12 months), . Maximum value allowed is 25 months prior to
current time (P25M).
type: str
time_interval_start:
description:
- Analysis start time in UTC in ISO 8601 format(inclusive).
Example 2019-10-30T00:00:00Z (yyyy-MM-ddThh:mm:ssZ).
The minimum allowed value is 2 years prior to the current day.
timeIntervalStart and timeIntervalEnd parameters are used together.
If analysisTimeInterval is specified, this parameter is ignored.
type: str
time_interval_end:
description:
- Analysis end time in UTC in ISO 8601 format(exclusive).
Example 2019-10-30T00:00:00Z (yyyy-MM-ddThh:mm:ssZ).
timeIntervalStart and timeIntervalEnd are used together.
If timeIntervalEnd is not specified, current time is used as timeIntervalEnd.
type: str
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Get a specific sql_statistics_time_series
oci_opsi_sql_statistics_time_series_facts:
compartment_id: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
sql_identifier: 6rgjh9bjmy2s7
"""
RETURN = """
sql_statistics_time_series:
description:
- SqlStatisticsTimeSeries resource
returned: on success
type: complex
contains:
sql_identifier:
description:
- Unique SQL_ID for a SQL Statement.
returned: on success
type: string
sample: sql_identifier_example
time_interval_start:
description:
- The start timestamp that was passed into the request.
returned: on success
type: string
sample: 2020-12-06T00:00:00.000Z
time_interval_end:
description:
- The end timestamp that was passed into the request.
returned: on success
type: string
sample: 2020-12-06T00:00:00.000Z
item_duration_in_ms:
description:
- Time duration in milliseconds between data points (one hour or one day).
returned: on success
type: int
sample: 86400000
end_timestamps:
description:
- Array comprising of all the sampling period end timestamps in RFC 3339 format.
returned: on success
type: list
sample: []
items:
description:
- Array of SQL performance statistics across databases.
returned: on success
type: complex
contains:
database_details:
description:
- ""
returned: on success
type: complex
contains:
database_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the database.
returned: on success
type: string
sample: ocid1.database.oc1..xxxxxxEXAMPLExxxxxx
database_name:
description:
- The database name. The database name is unique within the tenancy.
returned: on success
type: string
sample: database_name_example
database_display_name:
description:
- The user-friendly name for the database. The name does not have to be unique.
returned: on success
type: string
sample: database_display_name_example
database_type:
description:
- Operations Insights internal representation of the database type.
returned: on success
type: string
sample: database_type_example
database_version:
description:
- The version of the database.
returned: on success
type: string
sample: database_version_example
statistics:
description:
- SQL performance statistics for a given database
returned: on success
type: complex
contains:
name:
description:
- SQL performance statistic name
returned: on success
type: string
sample: name_example
values:
description:
- SQL performance statistic value
returned: on success
type: list
sample: []
sample: {
"sql_identifier": "sql_identifier_example",
"time_interval_start": "2020-12-06T00:00:00.000Z",
"time_interval_end": "2020-12-06T00:00:00.000Z",
"item_duration_in_ms": 86400000,
"end_timestamps": [],
"items": [{
"database_details": {
"database_id": "ocid1.database.oc1..xxxxxxEXAMPLExxxxxx",
"database_name": "database_name_example",
"database_display_name": "database_display_name_example",
"database_type": "database_type_example",
"database_version": "database_version_example"
},
"statistics": [{
"name": "name_example",
"values": []
}]
}]
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.opsi import OperationsInsightsClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class SqlStatisticsTimeSeriesFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get"""
def get_required_params_for_get(self):
return [
"compartment_id",
"sql_identifier",
]
def get_resource(self):
optional_get_method_params = [
"database_id",
"analysis_time_interval",
"time_interval_start",
"time_interval_end",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_get_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.call_with_backoff(
self.client.summarize_sql_statistics_time_series,
compartment_id=self.module.params.get("compartment_id"),
sql_identifier=self.module.params.get("sql_identifier"),
**optional_kwargs
)
SqlStatisticsTimeSeriesFactsHelperCustom = get_custom_class(
"SqlStatisticsTimeSeriesFactsHelperCustom"
)
class ResourceFactsHelper(
SqlStatisticsTimeSeriesFactsHelperCustom, SqlStatisticsTimeSeriesFactsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
compartment_id=dict(type="str", required=True),
sql_identifier=dict(type="str", required=True),
database_id=dict(type="list"),
analysis_time_interval=dict(type="str"),
time_interval_start=dict(type="str"),
time_interval_end=dict(type="str"),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="sql_statistics_time_series",
service_client_class=OperationsInsightsClient,
namespace="opsi",
)
result = []
if resource_facts_helper.is_get():
result = resource_facts_helper.get()
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(sql_statistics_time_series=result)
if __name__ == "__main__":
main()
| 37.19863
| 153
| 0.584607
|
098dddbc6d536d4a73496501c2e343c4822a6292
| 614
|
py
|
Python
|
clip/mangled/__torch__/torch/nn/modules/linear/___torch_mangle_9472.py
|
shawwn/CLIP
|
ba33b4eb956e6f507b4b39468b3b7336ac2260a1
|
[
"MIT"
] | 6
|
2021-01-09T14:34:17.000Z
|
2021-06-13T06:46:43.000Z
|
clip/ViT-B-32-cuda/code/__torch__/torch/nn/modules/linear/___torch_mangle_9472.py
|
shawwn/CLIP
|
ba33b4eb956e6f507b4b39468b3b7336ac2260a1
|
[
"MIT"
] | null | null | null |
clip/ViT-B-32-cuda/code/__torch__/torch/nn/modules/linear/___torch_mangle_9472.py
|
shawwn/CLIP
|
ba33b4eb956e6f507b4b39468b3b7336ac2260a1
|
[
"MIT"
] | 2
|
2021-01-09T10:30:04.000Z
|
2021-01-09T18:43:29.000Z
|
class Linear(Module):
__parameters__ = ["weight", "bias", ]
__buffers__ = []
weight : Tensor
bias : Tensor
training : bool
def forward(self: __torch__.torch.nn.modules.linear.___torch_mangle_9472.Linear,
argument_1: Tensor) -> Tensor:
_0 = self.bias
output = torch.matmul(argument_1, torch.t(self.weight))
return torch.add_(output, _0, alpha=1)
def forward1(self: __torch__.torch.nn.modules.linear.___torch_mangle_9472.Linear,
argument_1: Tensor) -> Tensor:
_1 = self.bias
output = torch.matmul(argument_1, torch.t(self.weight))
return torch.add_(output, _1, alpha=1)
| 36.117647
| 83
| 0.705212
|
69d4341bfb711926ce56a3d87c48dc31cd70bc18
| 794
|
py
|
Python
|
var/spack/repos/builtin/packages/py-lit/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-lit/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8
|
2021-11-09T20:28:40.000Z
|
2022-03-15T03:26:33.000Z
|
var/spack/repos/builtin/packages/py-lit/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2019-02-08T20:37:20.000Z
|
2019-03-31T15:19:26.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PyLit(PythonPackage):
"""lit is a portable tool for executing LLVM and Clang style test suites,
summarizing their results, and providing indication of failures. lit is
designed to be a lightweight testing tool with as simple a user
interface as possible."""
pypi = "lit/lit-0.5.0.tar.gz"
version('0.7.1', sha256='ecef2833aef7f411cb923dac109c7c9dcc7dbe7cafce0650c1e8d19c243d955f')
version('0.5.0', sha256='3ea4251e78ebeb2e07be2feb33243d1f8931d956efc96ccc2b0846ced212b58c')
depends_on('py-setuptools', type='build')
| 37.809524
| 96
| 0.75063
|
87b2200d46623b525e39e93b1080d3000853433d
| 4,065
|
py
|
Python
|
repos.py
|
mw4rf/repos.py
|
34fbdea2a3706663b045b8d61d827c86f4963b5b
|
[
"MIT"
] | null | null | null |
repos.py
|
mw4rf/repos.py
|
34fbdea2a3706663b045b8d61d827c86f4963b5b
|
[
"MIT"
] | null | null | null |
repos.py
|
mw4rf/repos.py
|
34fbdea2a3706663b045b8d61d827c86f4963b5b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# MIT License
# Copyright (c) [2017] [mw4rf]
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# HowTo
#
# 1.a) Setup (mandatory)
# Fill the configuration file (default: repos.cfg) with the following syntax:
# Repo1_name | Repo1_url
# Repo2_name | Repo2_url
#
# 1.b) Setup (optional)
# Run < chmod u+x repos.py >
# Then, replace < python3 repos.py COMMAND > with < ./repos.py COMMAND >
#
# 2. Clone repositories
# Run < python3 repos.py clone >
# All repositories defined in the configuration file will be cloned. Existing repositories will be ignored.
#
# 3. Get informations about repositories
# Run < python3 repos.py info >
# HISTORY
#
# V. 0.1.2 (27/06/2017)
# Colored output.
#
# V. 0.1.1 (27/06/2017)
# Code cleanup & licence.
#
# v.0.1 (26/06/2017)
# Cloning repositories.
# Configuration
config_file = "repos.cfg"
# Imports
import subprocess
import string
# Classes
class Repo:
def __init__(self):
self.name = ""
self.url = ""
class bcolors:
HEADER = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
ORANGE = '\033[93m'
RED = '\033[91m'
END = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
# Get repositories list from config file
repos = [] # Store repositories in this array
with open(config_file) as f: # Read config file
content = (line.rstrip() for line in f) # Read all the lines, including empty ones
content = list(line for line in content if line) # Remove blank lines
content = [x.strip() for x in content] # Remove leading & trailing spaces & line feed characters (\n)
for line in content:
# Ignore commentaries, i.e. lines starting with #
if(line.startswith("#")):
continue
# Explode line with | delimiter
res = line.split("|")
# Make Repo object
r = Repo()
r.name = res[0].strip()
r.url = res[1].strip()
# Append object to array
repos.append(r)
# Run shell commands
def run_command(cmd):
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)
for stdout_line in iter(popen.stdout.readline, ""):
yield stdout_line
popen.stdout.close()
return_code = popen.wait()
if return_code:
#raise subprocess.CalledProcessError(return_code, cmd)
pass
# Parse command line arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("commande")
args = parser.parse_args()
# Command = info
def info():
print(bcolors.HEADER + "Repositories list." + bcolors.END)
for repo in repos:
print(bcolors.BLUE + repo.name + ": <" + repo.url + ">" + bcolors.END)
# Command = clone
def clone():
print(bcolors.HEADER + "Cloning repositories..." + bcolors.END)
for repo in repos:
print(bcolors.BLUE + "Cloning: <" + repo.name + ">" + bcolors.END)
for op in run_command(["git", "clone", repo.url]):
print(op, end="")
# Run commands
if(args.commande == "info"):
info()
elif(args.commande == "clone"):
clone()
else:
print("Unknown command!")
| 30.111111
| 108
| 0.680197
|
515862231ae95716ca1b5d2ff29b083c8f2fa3e6
| 3,888
|
py
|
Python
|
util_kerasmodel_info.py
|
galsh17/cartwheel_train
|
a50abe18cfe8c1f0f24267c3efa8537ecf211e72
|
[
"MIT"
] | 32
|
2018-09-04T08:51:08.000Z
|
2022-02-22T02:04:38.000Z
|
util_kerasmodel_info.py
|
galsh17/cartwheel_train
|
a50abe18cfe8c1f0f24267c3efa8537ecf211e72
|
[
"MIT"
] | 5
|
2019-05-27T07:54:52.000Z
|
2022-01-11T10:14:25.000Z
|
util_kerasmodel_info.py
|
galsh17/cartwheel_train
|
a50abe18cfe8c1f0f24267c3efa8537ecf211e72
|
[
"MIT"
] | 14
|
2018-06-22T15:29:39.000Z
|
2021-09-28T12:58:37.000Z
|
#------------------------------------------------------------------------------#
# This script prints out info on the specified model.
# Can give you:
# a) Input output dimensions
# b) Sample Computation times for various input dimensions on your GPU
# c) FLOPS for the computation
#
# Author : Manohar Kuse <mpkuse@connect.ust.hk>
# Created : 4th June, 2019
#
#------------------------------------------------------------------------------#
import keras
import numpy as np
import os
import tensorflow as tf
from CustomNets import NetVLADLayer, GhostVLADLayer
from predict_utils import change_model_inputshape
from keras import backend as K
import time
import code
import TerminalColors
tcol = TerminalColors.bcolors()
import argparse
def load_keras_hdf5_model( kerasmodel_h5file, verbose=True ):
""" Loads keras model from a HDF5 file """
assert os.path.isfile( kerasmodel_h5file ), 'The model weights file doesnot exists or there is a permission issue.'+"kerasmodel_file="+kerasmodel_h5file
# K.set_learning_phase(0)
model = keras.models.load_model(kerasmodel_h5file, custom_objects={'NetVLADLayer': NetVLADLayer, 'GhostVLADLayer': GhostVLADLayer} )
if verbose:
print tcol.OKGREEN+'====\n==== Original Input Model\n====', tcol.ENDC
model.summary();
print tcol.OKGREEN, 'Successfully Loaded kerasmodel_h5file: ', tcol.ENDC, kerasmodel_h5file
print tcol.OKGREEN+ '====\n==== END Original Input Model\n====', tcol.ENDC
return model
def do_inference_on_random_input( model ):
input_shape= ( 1, int(model.input.shape[1].value), int(model.input.shape[2].value), model.input.shape[3].value )
start_t = time.time()
for n in range(100):
X = np.random.random(input_shape)
y_pred = model.predict(X)
end_t = time.time()
print 'Prediction with random input with shape=%s took %4.2f ms (100 predictions including random generation time) and resulted in output vector of dimensions=%s' %( str(X.shape), 1000. * (end_t-start_t ), str(y_pred.shape) )
# print('try predict with a random input_img with shape='+str(X.shape)+'\n'+ str(y_pred) )
if __name__ == '__main__':
#---
# Parse command line
parser = argparse.ArgumentParser(description='Print Memory and FLOPS related info on the Keras hdf5 models.')
parser.add_argument('--kerasmodel_h5file', '-h5', type=str, required=True, help='The input keras modelarch_and_weights full filename')
args = parser.parse_args()
#---
# Path and filename
kerasmodel_h5file = args.kerasmodel_h5file
LOG_DIR = '/'.join( kerasmodel_h5file.split('/')[0:-1] )
print tcol.HEADER
print '##------------------------------------------------------------##'
print '## kerasmodel_h5file = ', kerasmodel_h5file
print '## LOG_DIR = ', LOG_DIR
print '##------------------------------------------------------------##'
print tcol.ENDC
#---
# Load HDF5 Keras model
model = load_keras_hdf5_model( kerasmodel_h5file, verbose=True ) #this
model.summary()
#---
# FLOPS
from CustomNets import print_model_memory_usage, print_flops_report
print tcol.OKGREEN+ '====\n==== print_model_memory_usage\n====', tcol.ENDC
print_model_memory_usage( 1, model )
print_flops_report( model )
#---
# Change Model Shape and do flops computation
print tcol.OKGREEN+ '====\n==== Memory and FLOPS for various Input Shapes\n====', tcol.ENDC
for m in [0.5, 1.0, 2.0, 4.0]:
from predict_utils import change_model_inputshape
new_input_shape= ( None, int(model.input.shape[1].value*m), int(model.input.shape[2].value*m), model.input.shape[3].value )
new_model = change_model_inputshape( model, new_input_shape=new_input_shape )
print_model_memory_usage( 1, new_model )
do_inference_on_random_input( new_model )
| 37.028571
| 229
| 0.643519
|
53290a9b944ece1789d592932cc3697c9431f2f3
| 17,510
|
py
|
Python
|
UROPcode/utlis/process.py
|
YingjieQiao/WebScraping
|
c5045e16e1ad2ac9dd85330cbacf9aa4abf08251
|
[
"MIT"
] | 2
|
2020-07-30T17:20:32.000Z
|
2021-08-12T20:03:39.000Z
|
UROPcode/utlis/process.py
|
YingjieQiao/WebScraping
|
c5045e16e1ad2ac9dd85330cbacf9aa4abf08251
|
[
"MIT"
] | null | null | null |
UROPcode/utlis/process.py
|
YingjieQiao/WebScraping
|
c5045e16e1ad2ac9dd85330cbacf9aa4abf08251
|
[
"MIT"
] | 1
|
2021-10-02T10:41:19.000Z
|
2021-10-02T10:41:19.000Z
|
import regex
import string
import unidecode
from os import path
from monty.fractions import gcd_float
from chemdataextractor.doc import Paragraph
from gensim.models.phrases import Phraser
from pymatgen.core.periodic_table import Element
from pymatgen.core.composition import Composition, CompositionError
PHRASER_PATH = path.join(path.dirname(__file__), "models/phraser.pkl")
__author__ = "Vahe Tshitoyan"
__credits__ = "John Dagdelen, Leigh Weston, Anubhav Jain"
__copyright__ = "Copyright 2018 - 2019, Materials Intelligence."
__version__ = "0.0.3"
__maintainer__ = "John Dagdelen"
__email__ = "vahe.tshitoyan@gmail.com, jdagdelen@berkeley.edu"
__date__ = "June 10, 2019"
class MaterialsTextProcessor:
"""
Materials Science Text Processing Tools.
"""
ELEMENTS = ["H", "He", "Li", "Be", "B", "C", "N", "O", "F", "Ne", "Na", "Mg", "Al", "Si", "P", "S", "Cl", "Ar", "K",
"Ca", "Sc", "Ti", "V", "Cr", "Mn", "Fe", "Co", "Ni", "Cu", "Zn", "Ga", "Ge", "As", "Se", "Br", "Kr",
"Rb", "Sr", "Y", "Zr", "Nb", "Mo", "Tc", "Ru", "Rh", "Pd", "Ag", "Cd", "In", "Sn", "Sb", "Te", "I",
"Xe", "Cs", "Ba", "La", "Ce", "Pr", "Nd", "Pm", "Sm", "Eu", "Gd", "Tb", "Dy", "Ho", "Er", "Tm", "Yb",
"Lu", "Hf", "Ta", "W", "Re", "Os", "Ir", "Pt", "Au", "Hg", "Tl", "Pb", "Bi", "Po", "At", "Rn", "Fr",
"Ra", "Ac", "Th", "Pa", "U", "Np", "Pu", "Am", "Cm", "Bk", "Cf", "Es", "Fm", "Md", "No", "Lr", "Rf",
"Db", "Sg", "Bh", "Hs", "Mt", "Ds", "Rg", "Cn", "Nh", "Fl", "Mc", "Lv", "Ts", "Og", "Uue"]
ELEMENT_NAMES = ["hydrogen", "helium", "lithium", "beryllium", "boron", "carbon", "nitrogen", "oxygen", "fluorine",
"neon", "sodium", "magnesium", "aluminium", "silicon", "phosphorus", "sulfur", "chlorine", "argon",
"potassium", "calcium", "scandium", "titanium", "vanadium", "chromium", "manganese", "iron",
"cobalt", "nickel", "copper", "zinc", "gallium", "germanium", "arsenic", "selenium", "bromine",
"krypton", "rubidium", "strontium", "yttrium", "zirconium", "niobium", "molybdenum", "technetium",
"ruthenium", "rhodium", "palladium", "silver", "cadmium", "indium", "tin", "antimony", "tellurium",
"iodine", "xenon", "cesium", "barium", "lanthanum", "cerium", "praseodymium", "neodymium",
"promethium", "samarium", "europium", "gadolinium", "terbium", "dysprosium", "holmium", "erbium",
"thulium", "ytterbium", "lutetium", "hafnium", "tantalum", "tungsten", "rhenium", "osmium",
"iridium", "platinum", "gold", "mercury", "thallium", "lead", "bismuth", "polonium", "astatine",
"radon", "francium", "radium", "actinium", "thorium", "protactinium", "uranium", "neptunium",
"plutonium", "americium", "curium", "berkelium", "californium", "einsteinium", "fermium",
"mendelevium", "nobelium", "lawrencium", "rutherfordium", "dubnium", "seaborgium", "bohrium",
"hassium", "meitnerium", "darmstadtium", "roentgenium", "copernicium", "nihonium", "flerovium",
"moscovium", "livermorium", "tennessine", "oganesson", "ununennium"]
ELEMENTS_AND_NAMES = ELEMENTS + ELEMENT_NAMES + [en.capitalize() for en in ELEMENT_NAMES]
ELEMENTS_NAMES_UL = ELEMENT_NAMES + [en.capitalize() for en in ELEMENT_NAMES]
# Elemement with the valence state in parenthesis.
ELEMENT_VALENCE_IN_PAR = regex.compile(r"^("+r"|".join(ELEMENTS_AND_NAMES) +
r")(\(([IV|iv]|[Vv]?[Ii]{0,3})\))$")
ELEMENT_DIRECTION_IN_PAR = regex.compile(r"^(" + r"|".join(ELEMENTS_AND_NAMES) + r")(\(\d\d\d\d?\))")
# Exactly IV, VI or has 2 consecutive II, or roman in parenthesis: is not a simple formula.
VALENCE_INFO = regex.compile(r"(II+|^IV$|^VI$|\(IV\)|\(V?I{0,3}\))")
SPLIT_UNITS = ["K", "h", "V", "wt", "wt.", "MHz", "kHz", "GHz", "Hz", "days", "weeks",
"hours", "minutes", "seconds", "T", "MPa", "GPa", "at.", "mol.",
"at", "m", "N", "s-1", "vol.", "vol", "eV", "A", "atm", "bar",
"kOe", "Oe", "h.", "mWcm−2", "keV", "MeV", "meV", "day", "week", "hour",
"minute", "month", "months", "year", "cycles", "years", "fs", "ns",
"ps", "rpm", "g", "mg", "mAcm−2", "mA", "mK", "mT", "s-1", "dB",
"Ag-1", "mAg-1", "mAg−1", "mAg", "mAh", "mAhg−1", "m-2", "mJ", "kJ",
"m2g−1", "THz", "KHz", "kJmol−1", "Torr", "gL-1", "Vcm−1", "mVs−1",
"J", "GJ", "mTorr", "bar", "cm2", "mbar", "kbar", "mmol", "mol", "molL−1",
"MΩ", "Ω", "kΩ", "mΩ", "mgL−1", "moldm−3", "m2", "m3", "cm-1", "cm",
"Scm−1", "Acm−1", "eV−1cm−2", "cm-2", "sccm", "cm−2eV−1", "cm−3eV−1",
"kA", "s−1", "emu", "L", "cmHz1", "gmol−1", "kVcm−1", "MPam1",
"cm2V−1s−1", "Acm−2", "cm−2s−1", "MV", "ionscm−2", "Jcm−2", "ncm−2",
"Jcm−2", "Wcm−2", "GWcm−2", "Acm−2K−2", "gcm−3", "cm3g−1", "mgl−1",
"mgml−1", "mgcm−2", "mΩcm", "cm−2s−1", "cm−2", "ions", "moll−1",
"nmol", "psi", "mol·L−1", "Jkg−1K−1", "km", "Wm−2", "mass", "mmHg",
"mmmin−1", "GeV", "m−2", "m−2s−1", "Kmin−1", "gL−1", "ng", "hr", "w",
"mN", "kN", "Mrad", "rad", "arcsec", "Ag−1", "dpa", "cdm−2",
"cd", "mcd", "mHz", "m−3", "ppm", "phr", "mL", "ML", "mlmin−1", "MWm−2",
"Wm−1K−1", "Wm−1K−1", "kWh", "Wkg−1", "Jm−3", "m-3", "gl−1", "A−1",
"Ks−1", "mgdm−3", "mms−1", "ks", "appm", "ºC", "HV", "kDa", "Da", "kG",
"kGy", "MGy", "Gy", "mGy", "Gbps", "μB", "μL", "μF", "nF", "pF", "mF",
"A", "Å", "A˚", "μgL−1"]
NR_BASIC = regex.compile(r"^[+-]?\d*\.?\d+\(?\d*\)?+$", regex.DOTALL)
NR_AND_UNIT = regex.compile(r"^([+-]?\d*\.?\d+\(?\d*\)?+)([\p{script=Latin}|Ω|μ]+.*)", regex.DOTALL)
PUNCT = list(string.punctuation) + ["\"", "“", "”", "≥", "≤", "×"]
def __init__(self, phraser_path=PHRASER_PATH):
self.elem_name_dict = {en: es for en, es in zip(self.ELEMENT_NAMES, self.ELEMENTS)}
self.phraser = Phraser.load(phraser_path)
def tokenize(self, text, split_oxidation=True, keep_sentences=True):
"""Converts a string to a list tokens (words) using a modified chemdataextractor tokenizer.
Adds a few fixes for inorganic materials science, such as splitting common units from numbers
and splitting the valence state.
Args:
text: input text as a string
split_oxidation: if True, will split the oxidation state from the element, e.g. iron(II)
will become iron (II), same with Fe(II), etc.
keep_sentences: if False, will disregard the sentence structure and return tokens as a
single list of strings. Otherwise returns a list of lists, each sentence separately.
Returns:
A list of strings if keep_sentence is False, otherwise a list of list of strings, which each
list corresponding to a single sentence.
"""
def split_token(token, so=split_oxidation):
"""Processes a single token, in case it needs to be split up.
There are 2 cases when the token is split: A number with a common unit, or an
element with a valence state.
Args:
token: The string to be processed.
so: If True, split the oxidation (valence) string. Units are always split.
Returns:
A list of strings.
"""
elem_with_valence = self.ELEMENT_VALENCE_IN_PAR.match(token) if so else None
nr_unit = self.NR_AND_UNIT.match(token)
if nr_unit is not None and nr_unit.group(2) in self.SPLIT_UNITS:
# Splitting the unit from number, e.g. "5V" -> ["5", "V"].
return [nr_unit.group(1), nr_unit.group(2)]
elif elem_with_valence is not None:
# Splitting element from it"s valence state, e.g. "Fe(II)" -> ["Fe", "(II)"].
return [elem_with_valence.group(1), elem_with_valence.group(2)]
else:
return [token]
cde_p = Paragraph(text)
tokens = cde_p.tokens
toks = []
for sentence in tokens:
if keep_sentences:
toks.append([])
for tok in sentence:
toks[-1] += split_token(tok.text, so=split_oxidation)
else:
for tok in sentence:
toks += split_token(tok.text, so=split_oxidation)
return toks
def process(self, tokens, exclude_punct=False, convert_num=True, normalize_materials=True, remove_accents=True,
make_phrases=False, split_oxidation=True):
"""Processes a pre-tokenized list of strings or a string.
Selective lower casing, material normalization, etc.
Args:
tokens: A list of strings or a string. If a string is supplied, will use the
tokenize method first to split it into a list of token strings.
exclude_punct: Bool flag to exclude all punctuation.
convert_num: Bool flag to convert numbers (selectively) to <nUm>.
normalize_materials: Bool flag to normalize all simple material formula.
remove_accents: Bool flag to remove accents, e.g. Néel -> Neel.
make_phrases: Bool flag to convert single tokens to common materials science phrases.
split_oxidation: Only used if string is supplied, see docstring for tokenize method.
Returns:
A (processed_tokens, material_list) tuple. processed_tokens is a list of strings,
whereas material_list is a list of (original_material_string, normalized_material_string)
tuples.
"""
if not isinstance(tokens, list): # If it"s a string.
return self.process(self.tokenize(
tokens, split_oxidation=split_oxidation, keep_sentences=False),
exclude_punct=exclude_punct,
convert_num=convert_num,
normalize_materials=normalize_materials,
remove_accents=remove_accents,
make_phrases=make_phrases
)
processed, mat_list = [], []
for i, tok in enumerate(tokens):
if exclude_punct and tok in self.PUNCT: # Punctuation.
continue
elif convert_num and self.is_number(tok): # Number.
# Replace all numbers with <nUm>, except if it is a crystal direction (e.g. "(111)").
try:
if tokens[i - 1] == "(" and tokens[i + 1] == ")" \
or tokens[i - 1] == "〈" and tokens[i + 1] == "〉":
pass
else:
tok = "<nUm>"
except IndexError:
tok = "<nUm>"
elif tok in self.ELEMENTS_NAMES_UL: # Chemical element name.
# Add as a material mention.
mat_list.append((tok, self.elem_name_dict[tok.lower()]))
tok = tok.lower()
elif self.is_simple_formula(tok): # Simple chemical formula.
normalized_formula = self.normalized_formula(tok)
mat_list.append((tok, normalized_formula))
if normalize_materials:
tok = normalized_formula
elif (len(tok) == 1 or (len(tok) > 1 and tok[0].isupper() and tok[1:].islower())) \
and tok not in self.ELEMENTS and tok not in self.SPLIT_UNITS \
and self.ELEMENT_DIRECTION_IN_PAR.match(tok) is None:
# To lowercase if only first letter is uppercase (chemical elements already covered above).
tok = tok.lower()
if remove_accents:
tok = self.remove_accent(tok)
processed.append(tok)
if make_phrases:
processed = self.make_phrases(processed, reps=2)
return processed, mat_list
def make_phrases(self, sentence, reps=2):
"""Generates phrases from a sentence of words.
Args:
sentence: A list of tokens (strings).
reps: How many times to combine the words.
Returns:
A list of strings where the strings in the original list are combined
to form phrases, separated from each other with an underscore "_".
"""
while reps > 0:
sentence = self.phraser[sentence]
reps -= 1
return sentence
def is_number(self, s):
"""Determines if the supplied string is number.
Args:
s: The input string.
Returns:
True if the supplied string is a number (both . and , are acceptable), False otherwise.
"""
return self.NR_BASIC.match(s.replace(",", "")) is not None
@staticmethod
def is_element(txt):
"""Checks if the string is a chemical symbol.
Args:
txt: The input string.
Returns:
True if the string is a chemical symbol, e.g. Hg, Fe, V, etc. False otherwise.
"""
try:
Element(txt)
return True
except ValueError:
return False
def is_simple_formula(self, text):
"""Determines if the string is a simple chemical formula.
Excludes some roman numbers, e.g. IV.
Args:
text: The input string.
Returns:
True if the supplied string a simple formula, e.g. IrMn, LiFePO4, etc. More complex
formula such as LiFePxO4-x are not considered to be simple formulae.
"""
if self.VALENCE_INFO.search(text) is not None:
# 2 consecutive II, IV or VI should not be parsed as formula.
# Related to valence state, so don"t want to mix with I and V elements.
return False
elif any(char.isdigit() or char.islower() for char in text):
# Aas to contain at least one lowercase letter or at least one number (to ignore abbreviations).
# Also ignores some materials like BN, but these are few and usually written in the same way,
# so normalization won"t be crucial.
try:
if text in ["O2", "N2", "Cl2", "F2", "H2"]:
# Including chemical elements that are diatomic at room temperature and atm pressure,
# despite them having only a single element.
return True
composition = Composition(text)
# Has to contain more than one element, single elements are handled differently.
if len(composition.keys()) < 2 or any([not self.is_element(key) for key in composition.keys()]):
return False
return True
except (CompositionError, ValueError):
return False
else:
return False
@staticmethod
def get_ordered_integer_formula(el_amt, max_denominator=1000):
"""Converts a mapping of {element: stoichiometric value} to a alphabetically ordered string.
Given a dictionary of {element : stoichiometric value, ..}, returns a string with
elements ordered alphabetically and stoichiometric values normalized to smallest common
integer denominator.
Args:
el_amt: {element: stoichiometric value} mapping.
max_denominator: The maximum common denominator of stoichiometric values to use for
normalization. Smaller stoichiometric fractions will be converted to the same
integer stoichiometry.
Returns:
A material formula string with elements ordered alphabetically and the stoichiometry
normalized to the smallest integer fractions.
"""
g = gcd_float(list(el_amt.values()), 1 / max_denominator)
d = {k: round(v / g) for k, v in el_amt.items()}
formula = ""
for k in sorted(d):
if d[k] > 1:
formula += k + str(d[k])
elif d[k] != 0:
formula += k
return formula
def normalized_formula(self, formula, max_denominator=1000):
"""Normalizes chemical formula to smallest common integer denominator, and orders elements alphabetically.
Args:
formula: the string formula.
max_denominator: highest precision for the denominator (1000 by default).
Returns:
A normalized formula string, e.g. Ni0.5Fe0.5 -> FeNi.
"""
try:
formula_dict = Composition(formula).get_el_amt_dict()
return self.get_ordered_integer_formula(formula_dict, max_denominator)
except (CompositionError, ValueError):
return formula
@staticmethod
def remove_accent(txt):
"""Removes accents from a string.
Args:
txt: The input string.
Returns:
The de-accented string.
"""
# There is a problem with angstrom sometimes, so ignoring length 1 strings.
return unidecode.unidecode(txt) if len(txt) > 1 else txt
| 49.185393
| 120
| 0.551571
|
a2daeeed69ec3b3e736b5cbe22ef9581ab66b369
| 2,987
|
py
|
Python
|
django_extensions/management/commands/mail_debug.py
|
EvgeneOskin/django-extensions
|
f40be190de1102f663573e65cbb19b4d7da83864
|
[
"MIT"
] | null | null | null |
django_extensions/management/commands/mail_debug.py
|
EvgeneOskin/django-extensions
|
f40be190de1102f663573e65cbb19b4d7da83864
|
[
"MIT"
] | null | null | null |
django_extensions/management/commands/mail_debug.py
|
EvgeneOskin/django-extensions
|
f40be190de1102f663573e65cbb19b4d7da83864
|
[
"MIT"
] | 1
|
2019-03-21T08:17:36.000Z
|
2019-03-21T08:17:36.000Z
|
# coding=utf-8
import asyncore
import sys
from logging import getLogger
from smtpd import SMTPServer
from django.core.management.base import CommandError
from django_extensions.management.utils import setup_logger, signalcommand
from django_extensions.compat import CompatibilityBaseCommand as BaseCommand
logger = getLogger(__name__)
class ExtensionDebuggingServer(SMTPServer):
"""Duplication of smtpd.DebuggingServer, but using logging instead of print."""
# Do something with the gathered message
def process_message(self, peer, mailfrom, rcpttos, data):
"""Output will be sent to the module logger at INFO level."""
inheaders = 1
lines = data.split('\n')
logger.info('---------- MESSAGE FOLLOWS ----------')
for line in lines:
# headers first
if inheaders and not line:
logger.info('X-Peer: %s' % peer[0])
inheaders = 0
logger.info(line)
logger.info('------------ END MESSAGE ------------')
class Command(BaseCommand):
help = "Starts a test mail server for development."
args = '[optional port number or ippaddr:port]'
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument(
'--output', dest='output_file', default=None,
help='Specifies an output file to send a copy of all messages '
'(not flushed immediately).')
parser.add_argument(
'--use-settings', dest='use_settings',
action='store_true', default=False,
help='Uses EMAIL_HOST and HOST_PORT from Django settings.')
@signalcommand
def handle(self, addrport='', *args, **options):
if args:
raise CommandError('Usage is mail_debug %s' % self.args)
if not addrport:
if options.get('use_settings', False):
from django.conf import settings
addr = getattr(settings, 'EMAIL_HOST', '')
port = str(getattr(settings, 'EMAIL_PORT', '1025'))
else:
addr = ''
port = '1025'
else:
try:
addr, port = addrport.split(':')
except ValueError:
addr, port = '', addrport
if not addr:
addr = '127.0.0.1'
if not port.isdigit():
raise CommandError("%r is not a valid port number." % port)
else:
port = int(port)
# Add console handler
setup_logger(logger, stream=self.stdout, filename=options.get('output_file', None))
def inner_run():
quit_command = (sys.platform == 'win32') and 'CTRL-BREAK' or 'CONTROL-C'
print("Now accepting mail at %s:%s -- use %s to quit" % (addr, port, quit_command))
ExtensionDebuggingServer((addr, port), None)
asyncore.loop()
try:
inner_run()
except KeyboardInterrupt:
pass
| 34.333333
| 95
| 0.586207
|
710d44f55d3ca8555c7f41ede49ea8f72d0f9aeb
| 18,455
|
py
|
Python
|
datasets.py
|
rkolli929/DexiNed
|
340909a74ebe19b889f30a20c4aa458c537ab86e
|
[
"MIT"
] | null | null | null |
datasets.py
|
rkolli929/DexiNed
|
340909a74ebe19b889f30a20c4aa458c537ab86e
|
[
"MIT"
] | null | null | null |
datasets.py
|
rkolli929/DexiNed
|
340909a74ebe19b889f30a20c4aa458c537ab86e
|
[
"MIT"
] | 2
|
2021-11-23T16:59:08.000Z
|
2021-11-23T17:01:43.000Z
|
import os
import random
import cv2
import numpy as np
import torch
from torch.utils.data import Dataset
import json
DATASET_NAMES = [
'BIPED',
'BSDS',
'BSDS-RIND',
'BSDS300',
'CID',
'DCD',
'MDBD', #5
'PASCAL',
'NYUD',
'CLASSIC'
] # 8
def dataset_info(dataset_name, is_linux=True):
if is_linux:
config = {
'BSDS': {
'img_height': 512, #321
'img_width': 512, #481
'train_list': 'train_pair.lst',
'test_list': 'test_pair.lst',
'data_dir': '/opt/dataset/BSDS', # mean_rgb
'yita': 0.5
},
'BSDS-RIND': {
'img_height': 512, # 321
'img_width': 512, # 481
'train_list': 'train_pair2.lst',
'test_list': 'test_pair.lst',
'data_dir': '/opt/dataset/BSDS-RIND', # mean_rgb
'yita': 0.5
},
'BSDS300': {
'img_height': 512, #321
'img_width': 512, #481
'test_list': 'test_pair.lst',
'train_list': None,
'data_dir': '/opt/dataset/BSDS300', # NIR
'yita': 0.5
},
'PASCAL': {
'img_height': 416, # 375
'img_width': 512, #500
'test_list': 'test_pair.lst',
'train_list': None,
'data_dir': '/opt/dataset/PASCAL', # mean_rgb
'yita': 0.3
},
'CID': {
'img_height': 512,
'img_width': 512,
'test_list': 'test_pair.lst',
'train_list': None,
'data_dir': '/opt/dataset/CID', # mean_rgb
'yita': 0.3
},
'NYUD': {
'img_height': 448,#425
'img_width': 560,#560
'test_list': 'test_pair.lst',
'train_list': None,
'data_dir': '/opt/dataset/NYUD', # mean_rgb
'yita': 0.5
},
'MDBD': {
'img_height': 720,
'img_width': 1280,
'test_list': 'test_pair.lst',
'train_list': 'train_pair.lst',
'data_dir': '/opt/dataset/MDBD', # mean_rgb
'yita': 0.3
},
'BIPED': {
'img_height': 720, #720 # 1088
'img_width': 1280, # 1280 5 1920
'test_list': 'test_pair.lst',
'train_list': 'train_rgb.lst',
'data_dir': '/opt/dataset/BIPED', # mean_rgb
'yita': 0.5
},
'CLASSIC': {
'img_height': 512,
'img_width': 512,
'test_list': None,
'train_list': None,
'data_dir': 'data', # mean_rgb
'yita': 0.5
},
'DCD': {
'img_height': 352, #240
'img_width': 480,# 360
'test_list': 'test_pair.lst',
'train_list': None,
'data_dir': '/opt/dataset/DCD', # mean_rgb
'yita': 0.2
}
}
else:
config = {
'BSDS': {'img_height': 512, # 321
'img_width': 512, # 481
'test_list': 'test_pair.lst',
'data_dir': 'C:/Users/xavysp/dataset/BSDS', # mean_rgb
'yita': 0.5},
'BSDS300': {'img_height': 512, # 321
'img_width': 512, # 481
'test_list': 'test_pair.lst',
'data_dir': 'C:/Users/xavysp/dataset/BSDS300', # NIR
'yita': 0.5},
'PASCAL': {'img_height': 375,
'img_width': 500,
'test_list': 'test_pair.lst',
'data_dir': 'C:/Users/xavysp/dataset/PASCAL', # mean_rgb
'yita': 0.3},
'CID': {'img_height': 512,
'img_width': 512,
'test_list': 'test_pair.lst',
'data_dir': 'C:/Users/xavysp/dataset/CID', # mean_rgb
'yita': 0.3},
'NYUD': {'img_height': 425,
'img_width': 560,
'test_list': 'test_pair.lst',
'data_dir': 'C:/Users/xavysp/dataset/NYUD', # mean_rgb
'yita': 0.5},
'MDBD': {'img_height': 720,
'img_width': 1280,
'test_list': 'test_pair.lst',
'train_list': 'train_pair.lst',
'data_dir': 'C:/Users/xavysp/dataset/MDBD', # mean_rgb
'yita': 0.3},
'BIPED': {'img_height': 720, # 720
'img_width': 1280, # 1280
'test_list': 'test_pair.lst',
'train_list': 'train_rgb.lst',
'data_dir': 'C:/Users/xavysp/dataset/BIPED', # WIN: '../.../dataset/BIPED/edges'
'yita': 0.5},
'CLASSIC': {'img_height': 512,
'img_width': 512,
'test_list': None,
'train_list': None,
'data_dir': 'data', # mean_rgb
'yita': 0.5},
'DCD': {'img_height': 240,
'img_width': 360,
'test_list': 'test_pair.lst',
'data_dir': 'C:/Users/xavysp/dataset/DCD', # mean_rgb
'yita': 0.2}
}
return config[dataset_name]
class TestDataset(Dataset):
def __init__(self,
data_root,
test_data,
mean_bgr,
img_height,
img_width,
test_list=None,
arg=None
):
if test_data not in DATASET_NAMES:
raise ValueError(f"Unsupported dataset: {test_data}")
self.data_root = data_root
self.test_data = test_data
self.test_list = test_list
self.args=arg
# self.arg = arg
# self.mean_bgr = arg.mean_pixel_values[0:3] if len(arg.mean_pixel_values) == 4 \
# else arg.mean_pixel_values
self.mean_bgr = mean_bgr
self.img_height = img_height
self.img_width = img_width
self.data_index = self._build_index()
print(f"mean_bgr: {self.mean_bgr}")
def _build_index(self):
sample_indices = []
if self.test_data == "CLASSIC":
# for single image testing
images_path = os.listdir(self.data_root)
labels_path = None
sample_indices = [images_path, labels_path]
else:
# image and label paths are located in a list file
if not self.test_list:
raise ValueError(
f"Test list not provided for dataset: {self.test_data}")
list_name = os.path.join(self.data_root, self.test_list)
if self.test_data.upper()=='BIPED':
with open(list_name) as f:
files = json.load(f)
for pair in files:
tmp_img = pair[0]
tmp_gt = pair[1]
sample_indices.append(
(os.path.join(self.data_root, tmp_img),
os.path.join(self.data_root, tmp_gt),))
else:
with open(list_name, 'r') as f:
files = f.readlines()
files = [line.strip() for line in files]
pairs = [line.split() for line in files]
for pair in pairs:
tmp_img = pair[0]
tmp_gt = pair[1]
sample_indices.append(
(os.path.join(self.data_root, tmp_img),
os.path.join(self.data_root, tmp_gt),))
return sample_indices
def __len__(self):
return len(self.data_index[0]) if self.test_data.upper()=='CLASSIC' else len(self.data_index)
def __getitem__(self, idx):
# get data sample
# image_path, label_path = self.data_index[idx]
if self.data_index[1] is None:
image_path = self.data_index[0][idx]
else:
image_path = self.data_index[idx][0]
label_path = None if self.test_data == "CLASSIC" else self.data_index[idx][1]
img_name = os.path.basename(image_path)
file_name = os.path.splitext(img_name)[0] + ".png"
# base dir
if self.test_data.upper() == 'BIPED':
img_dir = os.path.join(self.data_root, 'imgs', 'test')
gt_dir = os.path.join(self.data_root, 'edge_maps', 'test')
elif self.test_data.upper() == 'CLASSIC':
img_dir = self.data_root
gt_dir = None
else:
img_dir = self.data_root
gt_dir = self.data_root
# load data
image = cv2.imread(os.path.join(img_dir, image_path), cv2.IMREAD_COLOR)
if not self.test_data == "CLASSIC":
label = cv2.imread(os.path.join(
gt_dir, label_path), cv2.IMREAD_COLOR)
else:
label = None
im_shape = [image.shape[0], image.shape[1]]
image, label = self.transform(img=image, gt=label)
return dict(images=image, labels=label, file_names=file_name, image_shape=im_shape)
def transform(self, img, gt):
# gt[gt< 51] = 0 # test without gt discrimination
if self.test_data == "CLASSIC":
img_height = self.img_height
img_width = self.img_width
print(
f"actual size: {img.shape}, target size: {( img_height,img_width,)}")
# img = cv2.resize(img, (self.img_width, self.img_height))
img = cv2.resize(img, (img_width,img_height))
gt = None
# Make images and labels at least 512 by 512
elif img.shape[0] < 512 or img.shape[1] < 512:
img = cv2.resize(img, (self.args.test_img_width, self.args.test_img_height)) # 512
gt = cv2.resize(gt, (self.args.test_img_width, self.args.test_img_height)) # 512
# Make sure images and labels are divisible by 2^4=16
elif img.shape[0] % 16 != 0 or img.shape[1] % 16 != 0:
img_width = ((img.shape[1] // 16) + 1) * 16
img_height = ((img.shape[0] // 16) + 1) * 16
img = cv2.resize(img, (img_width, img_height))
gt = cv2.resize(gt, (img_width, img_height))
else:
img_width =self.args.test_img_width
img_height =self.args.test_img_height
img = cv2.resize(img, (img_width, img_height))
gt = cv2.resize(gt, (img_width, img_height))
# if self.yita is not None:
# gt[gt >= self.yita] = 1
img = np.array(img, dtype=np.float32)
# if self.rgb:
# img = img[:, :, ::-1] # RGB->BGR
img -= self.mean_bgr
img = img.transpose((2, 0, 1))
img = torch.from_numpy(img.copy()).float()
if self.test_data == "CLASSIC":
gt = np.zeros((img.shape[:2]))
gt = torch.from_numpy(np.array([gt])).float()
else:
gt = np.array(gt, dtype=np.float32)
if len(gt.shape) == 3:
gt = gt[:, :, 0]
gt /= 255.
gt = torch.from_numpy(np.array([gt])).float()
return img, gt
class BipedDataset(Dataset):
train_modes = ['train', 'test', ]
dataset_types = ['rgbr', ]
data_types = ['aug', ]
def __init__(self,
data_root,
img_height,
img_width,
mean_bgr,
train_mode='train',
dataset_type='rgbr',
# is_scaling=None,
# Whether to crop image or otherwise resize image to match image height and width.
crop_img=False,
arg=None
):
self.data_root = data_root
self.train_mode = train_mode
self.dataset_type = dataset_type
self.data_type = 'aug' # be aware that this might change in the future
self.img_height = img_height
self.img_width = img_width
self.mean_bgr = mean_bgr
self.crop_img = crop_img
self.arg = arg
self.data_index = self._build_index()
def _build_index(self):
assert self.train_mode in self.train_modes, self.train_mode
assert self.dataset_type in self.dataset_types, self.dataset_type
assert self.data_type in self.data_types, self.data_type
data_root = os.path.abspath(self.data_root)
sample_indices = []
if self.arg.train_data.lower()=='biped':
images_path = os.path.join(data_root,
'edges/imgs',
self.train_mode,
self.dataset_type,
self.data_type)
labels_path = os.path.join(data_root,
'edges/edge_maps',
self.train_mode,
self.dataset_type,
self.data_type)
for directory_name in os.listdir(images_path):
image_directories = os.path.join(images_path, directory_name)
for file_name_ext in os.listdir(image_directories):
file_name = os.path.splitext(file_name_ext)[0]
sample_indices.append(
(os.path.join(images_path, directory_name, file_name + '.jpg'),
os.path.join(labels_path, directory_name, file_name + '.png'),)
)
else:
file_path = os.path.join(data_root, self.arg.train_list)
if self.arg.train_data.lower()=='bsds':
with open(file_path, 'r') as f:
files = f.readlines()
files = [line.strip() for line in files]
pairs = [line.split() for line in files]
for pair in pairs:
tmp_img = pair[0]
tmp_gt = pair[1]
sample_indices.append(
(os.path.join(data_root,tmp_img),
os.path.join(data_root,tmp_gt),))
else:
with open(file_path) as f:
files = json.load(f)
for pair in files:
tmp_img = pair[0]
tmp_gt = pair[1]
sample_indices.append(
(os.path.join(data_root, tmp_img),
os.path.join(data_root, tmp_gt),))
return sample_indices
def __len__(self):
return len(self.data_index)
def __getitem__(self, idx):
# get data sample
image_path, label_path = self.data_index[idx]
# load data
image = cv2.imread(image_path, cv2.IMREAD_COLOR)
label = cv2.imread(label_path, cv2.IMREAD_GRAYSCALE)
image, label = self.transform(img=image, gt=label)
return dict(images=image, labels=label)
def transform(self, img, gt):
gt = np.array(gt, dtype=np.float32)
if len(gt.shape) == 3:
gt = gt[:, :, 0]
gt /= 255. # for DexiNed input and BDCN
img = np.array(img, dtype=np.float32)
img -= self.mean_bgr
i_h, i_w,_ = img.shape
# data = []
# if self.scale is not None:
# for scl in self.scale:
# img_scale = cv2.resize(img, None, fx=scl, fy=scl, interpolation=cv2.INTER_LINEAR)
# data.append(torch.from_numpy(img_scale.transpose((2, 0, 1))).float())
# return data, gt
# 400 for BIPEd and 352 for BSDS check with 384
crop_size = self.img_height if self.img_height == self.img_width else None#448# MDBD=480 BIPED=480/400 BSDS=352
# # for BSDS 352/BRIND
# if i_w> crop_size and i_h>crop_size:
# i = random.randint(0, i_h - crop_size)
# j = random.randint(0, i_w - crop_size)
# img = img[i:i + crop_size , j:j + crop_size ]
# gt = gt[i:i + crop_size , j:j + crop_size ]
# for BIPED/MDBD
if np.random.random() > 0.4: #l
h,w = gt.shape
if i_w> 500 and i_h>500:
LR_img_size = crop_size #l BIPED=256, 240 200 # MDBD= 352 BSDS= 176
i = random.randint(0, h - LR_img_size)
j = random.randint(0, w - LR_img_size)
# if img.
img = img[i:i + LR_img_size , j:j + LR_img_size ]
gt = gt[i:i + LR_img_size , j:j + LR_img_size ]
else:
LR_img_size = 352#256 # l BIPED=208-352, # MDBD= 352-480- BSDS= 176-320
i = random.randint(0, h - LR_img_size)
j = random.randint(0, w - LR_img_size)
# if img.
img = img[i:i + LR_img_size, j:j + LR_img_size]
gt = gt[i:i + LR_img_size, j:j + LR_img_size]
img = cv2.resize(img, dsize=(crop_size, crop_size), )
gt = cv2.resize(gt, dsize=(crop_size, crop_size))
else:
# New addidings
img = cv2.resize(img, dsize=(crop_size, crop_size))
gt = cv2.resize(gt, dsize=(crop_size, crop_size))
# BSDS
# gt[gt>0.28]=1. # BSDS/MDBD
# gt[gt<=0.28]=0. # BSDS/MDBD
# for BIPED / BRIND
gt[gt > 0.2] += 0.6# 0.5 for BIPED/BSDS-RIND
gt = np.clip(gt, 0., 1.) # BIPED/BSDS-RIND
# # for MDBD
# gt[gt > 0.1] +=0.7
# gt = np.clip(gt, 0., 1.)
# # For RCF input
# # -----------------------------------
# gt[gt==0]=0.
# gt[np.logical_and(gt>0.,gt<0.5)] = 2.
# gt[gt>=0.5]=1.
#
# gt = gt.astype('float32')
# ----------------------------------
img = img.transpose((2, 0, 1))
img = torch.from_numpy(img.copy()).float()
gt = torch.from_numpy(np.array([gt])).float()
return img, gt
| 37.895277
| 119
| 0.475427
|
6adef8a04726b0bb7ea377d0cdb322449a426eb4
| 50,191
|
py
|
Python
|
psyneulink/library/components/mechanisms/modulatory/control/agt/lccontrolmechanism.py
|
AlirezaFarnia/PsyNeuLink
|
c66f8248d1391830e76c97df4b644e12a02c2b73
|
[
"Apache-2.0"
] | null | null | null |
psyneulink/library/components/mechanisms/modulatory/control/agt/lccontrolmechanism.py
|
AlirezaFarnia/PsyNeuLink
|
c66f8248d1391830e76c97df4b644e12a02c2b73
|
[
"Apache-2.0"
] | null | null | null |
psyneulink/library/components/mechanisms/modulatory/control/agt/lccontrolmechanism.py
|
AlirezaFarnia/PsyNeuLink
|
c66f8248d1391830e76c97df4b644e12a02c2b73
|
[
"Apache-2.0"
] | null | null | null |
# Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
# ************************************** LCControlMechanism ************************************************
"""
Contents
--------
* `LCControlMechanism_Overview`
* `LCControlMechanism_Creation`
- `LCControlMechanism_ObjectiveMechanism_Creation`
- `LCControlMechanism_Modulated_Mechanisms`
* `LCControlMechanism_Structure`
- `LCControlMechanism_Input`
• `LCControlMechanism_ObjectiveMechanism`
- `LCControlMechanism_Function`
• `LCControlMechanism_Modes_Of_Operation`
- `LCControlMechanism_Output`
* `LCControlMechanism_Execution`
* `LCControlMechanism_Examples`
* `LCControlMechanism_Class_Reference`
.. _LCControlMechanism_Overview:
Overview
--------
An LCControlMechanism is a `ControlMechanism <ControlMechanism>` that multiplicatively modulates the `function
<Mechanism_Base.function>` of one or more `Mechanisms <Mechanism>` (usually `TransferMechanisms <TransferMechanism>`).
It implements an abstract model of the `locus coeruleus (LC) <https://www.ncbi.nlm.nih.gov/pubmed/12371518>`_ that
uses an `FitzHughNagumoIntegrator` Function to generate its output. This is modulated by a `mode
<LCControlMechanism.mode_FitzHughNagumo>` parameter that regulates its function between `"tonic" and "phasic" modes of
operation <LCControlMechanism_Modes_Of_Operation>`. The Mechanisms modulated by an LCControlMechanism can be listed
using its `show <LCControlMechanism.show>` method. When used with an `AGTControlMechanism` to regulate the `mode
<FitzHughNagumoIntegrator.mode>` parameter of its `FitzHughNagumoIntegrator` Function, it implements a form of the
`Adaptive Gain Theory <http://www.annualreviews.org/doi/abs/10.1146/annurev.neuro.28.061604.135709>`_ of the locus
coeruleus-norepinephrine (LC-NE) system.
.. _LCControlMechanism_Creation:
Creating an LCControlMechanism
------------------------------
An LCControlMechanism can be created in any of the ways used to `create a ControlMechanism <ControlMechanism_Creation>`.
The following sections describe how to specify the inputs that drive the LCControlMechanism's response, and the
Mechanisms that it controls.
.. _LCControlMechanism_ObjectiveMechanism_Creation:
*ObjectiveMechanism and Monitored OutputPorts*
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Like any ControlMechanisms, when an LCControlMechanism is created it may `automatically create
<`ControlMechanism_ObjectiveMechanism`> an `ObjectiveMechanism` from which it receives its input. The
ObjectiveMechanism receives its input from any `OutputPorts <OutputPort>` specified in **monitor_for_control**
argument of the constructor for LCControlMechanism
COMMENT:
TBI FOR COMPOSITION
(or of a `System` for which
it is assigned as a `controller <System.controller>`; see `ControlMechanism_ObjectiveMechanism`).
COMMENT
By default, the ObjectiveMechanism of an LCControlMechanism is assigned a `CombineMeans` Function as its `function
<ObjectiveMechanism.function>` (see `LCControlMechanism_ObjectiveMechanism`). The ObjectiveMechanism can be
customized using the **objective_mechanism** argument of the LCControlMechanism's constructor; however, the `value
<OutputPort.value>` of its *OUTCOME* `OutputPort` must be a scalar value (that is used as the input to the
LCControlMechanism's `function <LCControlMechanism.function>` to drive its `phasic response
<LCControlMechanism_Modes_Of_Operation>`.
.. _LCControlMechanism_Modulated_Mechanisms:
*Mechanisms to Modulate*
~~~~~~~~~~~~~~~~~~~~~~~~
The Mechanisms to be modulated by an LCControlMechanism are specified in the **modulated_mechanisms** argument of its
constructor. An LCControlMechanism controls a `Mechanism <Mechanism>` by modifying the `multiplicative_param
<Function_Modulatory_Params>` of the Mechanism's `function <Mechanism_Base.function>`. Therefore, any Mechanism
specified for control by an LCControlMechanism must be either a `TransferMechanism`, or a Mechanism that uses a
`TransferFunction` or a class of `Function <Function>` that implements a `multiplicative_param
<Function_Modulatory_Params>`. The **modulate_mechanisms** argument must be a list of such Mechanisms. The keyword
*ALL* can also be used to specify all of the eligible `ProcessMechanisms <ProcessingMechanism>` in all of the
`Compositions <Composition>` to which the LCControlMechanism belongs. If a Mechanism specified in the
**modulated_mechanisms** argument does not implement a multiplicative_param, it is ignored. A `ControlProjection` is
automatically created that projects from the LCControlMechanism to the `ParameterPort` for the `multiplicative_param
<Function_Modulatory_Params>` of every Mechanism specified in the **modulated_mechanisms** argument. The Mechanisms
modulated by an LCControlMechanism are listed in its `modulated_mechanisms <LCControlMechanism.modulated_mechanisms>`
attribute).
.. _LCControlMechanism_Structure:
Structure
---------
.. _LCControlMechanism_Input:
*Input*
~~~~~~~
An LCControlMechanism has a single (primary) `InputPort <InputPort_Primary>`, the `value <InputPort.value>` of
which is a scalar that is provided by a `MappingProjection` from the *OUTCOME* `OutputPort <ObjectiveMechanism_Output>`
of the LCControlMechanism's `ObjectiveMechanism`. That value is used as the input to the LCControlMechanism's
`function <LCControlMechanism.function>`, which drives its `phasic response <LCControlMechanism_Modes_Of_Operation>`.
.. _LCControlMechanism_ObjectiveMechanism:
ObjectiveMechanism
^^^^^^^^^^^^^^^^^^
If an ObjectiveMechanism is `automatically created <LCControlMechanism_ObjectiveMechanism_Creation> for an
LCControlMechanism, it receives its inputs from the `OutputPort(s) <OutputPort>` specified the
**monitor_for_control** argument of the LCControlMechanism constructor, or the **montiored_output_ports** argument
of the LCControlMechanism's `ObjectiveMechanism <ControlMechanism_ObjectiveMechanism>`. By default, the
ObjectiveMechanism is assigned a `CombineMeans` Function with a default `operation <LinearCombination.operation>` of
*SUM*; this takes the mean of each array that the ObjectiveMechanism receives from the `value <OutputPort.value>` of
each of the OutputPorts that it monitors, and returns the sum of these means. The `value <OutputPort.value>` of
each OutputPort can be weighted (multiplicatively and/or exponentially), by specifying this in the
**monitor_for_control** argument of the LCControlMechanism (see `ControlMechanism_Monitor_for_Control` for details).
As with any ControlMechanism, its ObjectiveMechanism can be explicitly specified to customize its `function
<ObjectiveMechanism.function>` or any of its other parameters, by specifyihng it in the **objective_mechanism**
argument of the LCControlMechanism's constructor.
.. _LCControlMechanism_Objective_Mechanism_Function_Note:
.. note::
If an `ObjectiveMechanism` is specified in the **objective_mechanism** argument of the LCControlMechanism's
constructor, then its attribute values (including any defaults) override those used by a LCControlMechanism for
creating its `objective_mechanism <LCControlMechanism.objective_mechanism>`. In particular, whereas an
ObjectiveMechanism uses `LinearCombination` as the default for its `function <ObjectiveMechanism.function>`,
an LCControlMechanism uses `CombineMeans` as the `function <ObjectiveMechanism.function>` of its `objective_mechanism
<LCControlMechanism.objective_mechanism>`. As a consequence, if an ObjectiveMechanism is explicitly specified in
the LCControlMechanism's **objective_mechanism** argument, and its **function** argument is not also
explicitly specified as `CombineMeans`, then `LinearCombination` will be used for the ObjectiveMechanism's `function
<ObjectiveMechanism.function>`. To insure that `CombineMeans` is used, it must be specified explicitly in the
**function** argument of the constructor for the ObjectiveMechanism (for an example of a similar condition
see example under `ControlMechanism_ObjectiveMechanism_Function`).
The ObjectiveFunction is listed in the LCControlMechanism's `objective_mechanism
<LCControlMechanism.objective_mechanism>` attribute. The OutputPorts it monitors are listed in the
ObjectiveMechanism's `monitored_output_ports <ObjectiveMechanism.monitored_output_ports>` attribute) as well as the
LCControlMechanism's `monitor_for_control <LCControlMechanism.monitor_for_control>` attribute. These can be
displayed using the LCControlMechanism's `show <LCControlMechanism.show>` method.
.. _LCControlMechanism_Function:
*Function*
~~~~~~~~~~
An LCControlMechanism uses the `FitzHughNagumoIntegrator` as its `function <LCControlMechanism.function>`; this
implements a `FitzHugh-Nagumo model <https://en.wikipedia.org/wiki/FitzHugh–Nagumo_model>`_ often used to describe
the spiking of a neuron, but in this case the population activity of the LC (see `Gilzenrat et al., 2002
<http://www.sciencedirect.com/science/article/pii/S0893608002000552?via%3Dihub>`_). The `FitzHughNagumoIntegrator`
Function of an LCControlMechanism takes a scalar as its `variable <FitzHughNagumoIntegrator.variable>`, received from
the `input <LCControlMechanism_Input>` to the LCControlMechanism, and the result serves as the `control_allocation
<LCControlMechanism.control_allocation>` for the LCControlMechanism. All of the parameters of the
`FitzHughNagumoIntegrator` function are accessible as attributes of the LCControlMechanism.
.. _LCControlMechanism_Modes_Of_Operation:
LC Modes of Operation
^^^^^^^^^^^^^^^^^^^^^
The `mode <FitzHughNagumoIntegrator.mode>` parameter of the LCControlMechanism's `FitzHughNagumoIntegrator` Function
regulates its operation between `"tonic" and "phasic" modes <https://www.ncbi.nlm.nih.gov/pubmed/8027789>`_:
* in the *tonic mode* (low value of `mode <FitzHughNagumoIntegrator.mode>`), the output of the LCControlMechanism is
moderately low and constant; that is, it is relatively unaffected by its `input <LCControlMechanism_Input`.
This blunts the response of the Mechanisms that the LCControlMechanism controls to their inputs.
* in the *phasic mode* (high value of `mode <FitzHughNagumoIntegrator.mode>`), when the `input to the
LCControlMechanism <LCControlMechanism_Input>` is low, its `output <LCControlMechanism_Output>` is even lower
than when it is in the tonic regime, and thus the response of the Mechanisms it controls to their outputs is even
more blunted. However, when the LCControlMechanism's input rises above a certain value (determined by the
`threshold <LCControlMechanism.threshold>` parameter), its output rises sharply generating a "phasic response",
and inducing a much sharper response of the Mechanisms it controls to their inputs.
.. _LCControlMechanism_Output:
*Output*
~~~~~~~~
An LCControlMechanism has a single `ControlSignal`, that uses its `control_allocation
<LCControlMechanism.control_allocation>` (the scalar value generated by its `function <LCControlMechanism.function>`)
to modulate the function of the Mechanism(s) it controls. The ControlSignal is assigned a `ControlProjection` to the
`ParameterPort` for the `multiplicative_param <Function_Modulatory_Params>` of the `function
<Mechanism_Base.function>` for each of those Mechanisms. The Mechanisms modulated by an LCControlMechanism are listed
in its `modulated_mechanisms <LCControlMechanism.modulated_mechanisms>` attribute) and can be displayed using its
:func:`show <LCControlMechanism.show>` method.
COMMENT:
VERSION FOR MULTIPLE CONTROL SIGNALS
An LCControlMechanism has a `ControlSignal` for each Mechanism listed in its `modulated_mechanisms
<LCControlMechanism.modulated_mechanisms>` attribute. All of its ControlSignals are assigned the same value: the
result of the LCControlMechanism's `function <LCControlMechanism.function>`. Each ControlSignal is assigned a
`ControlProjection` to the `ParameterPort` for the `multiplicative_param <Function_Modulatory_Params>` of `function
<Mechanism_Base.function>` for the Mechanism in `modulated_mechanisms <LCControlMechanism.modulate_mechanisms>` to
which it corresponds. The Mechanisms modulated by an LCControlMechanism can be displayed using its :func:`show
<LCControlMechanism.show>` method.
COMMENT
.. _LCControlMechanism_Execution:
Execution
---------
An LCControlMechanism executes within a `Composition` at a point specified in the Composition's `Scheduler` or, if it
is the `controller <Composition.controller>` for a `Composition`, after all of the other Mechanisms in the Composition
have `executed <Composition_Run>` in a `TRIAL`. It's `function <LCControlMechanism.function>` takes the `value
<InputPort.value>` of the LCControlMechanism's `primary InputPort <InputPort_Primary>` as its input, and generates a
response -- under the influence of its `mode <FitzHughNagumoIntegrator.mode>` parameter -- that is assigned as the
`allocation <LCControlSignal.allocation>` of its `ControlSignals <ControlSignal>`. The latter are used by its
`ControlProjections <ControlProjection>` to modulate the response -- in the next `TRIAL` of execution -- of the
Mechanisms the LCControlMechanism controls.
.. note::
A `ParameterPort` that receives a `ControlProjection` does not update its value until its owner Mechanism
executes (see `Lazy Evaluation <LINK>` for an explanation of "lazy" updating). This means that even if a
LCControlMechanism has executed, the `multiplicative_param <Function_Modulatory_Params>` parameter of the `function
<Mechanism_Base.function>` of a Mechanism that it controls will not assume its new value until that Mechanism has
executed.
.. _LCControlMechanism_Examples:
Examples
--------
The following example generates an LCControlMechanism that modulates the function of two TransferMechanisms, one that
uses a `Linear` function and the other a `Logistic` function::
>>> import psyneulink as pnl
>>> my_mech_1 = pnl.TransferMechanism(function=pnl.Linear,
... name='my_linear_mechanism')
>>> my_mech_2 = pnl.TransferMechanism(function=pnl.Logistic,
... name='my_logistic_mechanism')
>>> LC = LCControlMechanism(modulated_mechanisms=[my_mech_1, my_mech_2],
... name='my_LC')
COMMENT:
# Calling `LC.show()` generates the following report::
#
# >>> LC.show()
# <BLANKLINE>
# ---------------------------------------------------------
# <BLANKLINE>
# my_LC
# <BLANKLINE>
# Monitoring the following Mechanism OutputPorts:
# <BLANKLINE>
# Modulating the following parameters:
# my_logistic_mechanism: gain
# my_linear_mechanism: slope
# <BLANKLINE>
# ---------------------------------------------------------
COMMENT
Calling `LC.show()` generates the following report::
my_LC
Monitoring the following Mechanism OutputPorts:
Modulating the following parameters:
my_logistic_mechanism: gain
my_linear_mechanism: slope
Note that the LCControlMechanism controls the `multiplicative_param <Function_Modulatory_Params>` of the `function
<Mechanism_Base.function>` of each Mechanism: the `gain <Logistic.gain>` parameter for ``my_mech_1``, since it uses
a `Logistic` Function; and the `slope <Linear.slope>` parameter for ``my_mech_2``, since it uses a `Linear` Function.
COMMENT:
ADDITIONAL EXAMPLES HERE OF THE DIFFERENT FORMS OF SPECIFICATION FOR
**monitor_for_control** and **modulated_mechanisms**
STRUCTURE:
MODE INPUT_PORT <- NAMED ONE, LAST?
SIGNAL INPUT_PORT(S) <- PRIMARY; MUST BE FROM PROCESSING MECHANISMS
CONTROL SIGNALS
COMMENT
.. _LCControlMechanism_Class_Reference:
Class Reference
---------------
"""
import typecheck as tc
from psyneulink.core import llvm as pnlvm
from psyneulink.core.components.functions.statefulfunctions.integratorfunctions import FitzHughNagumoIntegrator
from psyneulink.core.components.mechanisms.modulatory.control.controlmechanism import ControlMechanism
from psyneulink.core.components.mechanisms.processing.objectivemechanism import ObjectiveMechanism
from psyneulink.core.components.projections.modulatory.controlprojection import ControlProjection
from psyneulink.core.components.shellclasses import Mechanism, System_Base
from psyneulink.core.components.ports.outputport import OutputPort
from psyneulink.core.globals.context import Context, ContextFlags
from psyneulink.core.globals.keywords import \
ALL, CONTROL, CONTROL_PROJECTIONS, FUNCTION, INIT_EXECUTE_METHOD_ONLY, \
MULTIPLICATIVE, MULTIPLICATIVE_PARAM, PROJECTIONS
from psyneulink.core.globals.parameters import Parameter, ParameterAlias
from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set
from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel
from psyneulink.core.globals.utilities import is_iterable, convert_to_list
__all__ = [
'CONTROL_SIGNAL_NAME', 'ControlMechanismRegistry', 'LCControlMechanism', 'LCControlMechanismError',
'MODULATED_MECHANISMS',
]
MODULATED_MECHANISMS = 'modulated_mechanisms'
CONTROL_SIGNAL_NAME = 'LCControlMechanism_ControlSignal'
ControlMechanismRegistry = {}
class LCControlMechanismError(Exception):
def __init__(self, error_value):
self.error_value = error_value
class LCControlMechanism(ControlMechanism):
"""
LCControlMechanism( \
modulated_mechanisms=None, \
initial_w_FitzHughNagumo=0.0, \
initial_v_FitzHughNagumo=0.0, \
time_step_size_FitzHughNagumo=0.05, \
t_0_FitzHughNagumo=0.0, \
a_v_FitzHughNagumo=-1/3, \
b_v_FitzHughNagumo=0.0, \
c_v_FitzHughNagumo=1.0, \
d_v_FitzHughNagumo=0.0, \
e_v_FitzHughNagumo=-1.0, \
f_v_FitzHughNagumo=1.0, \
threshold_FitzHughNagumo=-1.0 \
time_constant_v_FitzHughNagumo=1.0, \
a_w_FitzHughNagumo=1.0, \
b_w_FitzHughNagumo=-0.8, \
c_w_FitzHughNagumo=0.7, \
mode_FitzHughNagumo=1.0, \
uncorrelated_activity_FitzHughNagumo=0.0 \
time_constant_w_FitzHughNagumo = 12.5, \
integration_method="RK4" \
base_level_gain=0.5, \
scaling_factor_gain=3.0)
Subclass of `ControlMechanism` that modulates the `multiplicative_param <Function_Modulatory_Params>` of the
`function <Mechanism_Base.function>` of one or more `Mechanisms <Mechanism>`.
See `ControlMechanism <ControlMechanism_Class_Reference>` for additional arguments and attributes.
Arguments
---------
modulated_mechanisms : List[`Mechanism <Mechanism>`] or *ALL*
specifies the Mechanisms to be modulated by the LCControlMechanism. If it is a list, every item must be a
Mechanism with a `function <Mechanism_Base.function>` that implements a `multiplicative_param
<Function_Modulatory_Params>`; alternatively the keyword *ALL* can be used to specify all of the
`ProcessingMechanisms <ProcessingMechanism>` in the Composition(s) to which the LCControlMechanism belongs.
initial_w_FitzHughNagumo : float : default 0.0
sets `initial_w <initial_w.FitzHughNagumoIntegrator>` on the LCControlMechanism's `FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
initial_v_FitzHughNagumo : float : default 0.0
sets `initial_v <initial_v.FitzHughNagumoIntegrator>` on the LCControlMechanism's `FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
time_step_size_FitzHughNagumo : float : default 0.0
sets `time_step_size <time_step_size.FitzHughNagumoIntegrator>` on the LCControlMechanism's `FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
t_0_FitzHughNagumo : float : default 0.0
sets `t_0 <t_0.FitzHughNagumoIntegrator>` on the LCControlMechanism's `FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
a_v_FitzHughNagumo : float : default -1/3
sets `a_v <a_v.FitzHughNagumoIntegrator>` on the LCControlMechanism's `FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
b_v_FitzHughNagumo : float : default 0.0
sets `b_v <b_v.FitzHughNagumoIntegrator>` on the LCControlMechanism's `FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
c_v_FitzHughNagumo : float : default 1.0
sets `c_v <c_v.FitzHughNagumoIntegrator>` on the LCControlMechanism's `FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
d_v_FitzHughNagumo : float : default 0.0
sets `d_v <d_v.FitzHughNagumoIntegrator>` on the LCControlMechanism's `FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
e_v_FitzHughNagumo : float : default -1.0
sets `e_v <e_v.FitzHughNagumoIntegrator>` on the LCControlMechanism's `FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
f_v_FitzHughNagumo : float : default 1.0
sets `f_v <f_v.FitzHughNagumoIntegrator>` on the LCControlMechanism's `FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
threshold_FitzHughNagumo : float : default -1.0
sets `threshold <threshold.FitzHughNagumoIntegrator>` on the LCControlMechanism's `FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
time_constant_v_FitzHughNagumo : float : default 1.0
sets `time_constant_w <time_constant_w.FitzHughNagumoIntegrator>` on the LCControlMechanism's
`FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
a_w_FitzHughNagumo : float : default 1.0
sets `a_w <a_w.FitzHughNagumoIntegrator>` on the LCControlMechanism's `FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
b_w_FitzHughNagumo : float : default -0.8,
sets `b_w <b_w.FitzHughNagumoIntegrator>` on the LCControlMechanism's `FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
c_w_FitzHughNagumo : float : default 0.7
sets `c_w <c_w.FitzHughNagumoIntegrator>` on the LCControlMechanism's `FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
mode_FitzHughNagumo : float : default 1.0
sets `mode <mode.FitzHughNagumoIntegrator>` on the LCControlMechanism's `FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
uncorrelated_activity_FitzHughNagumo : float : default 0.0
sets `uncorrelated_activity <uncorrelated_activity.FitzHughNagumoIntegrator>` on the LCControlMechanism's
`FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
time_constant_w_FitzHughNagumo : float : default 12.5
sets `time_constant_w <time_constant_w.FitzHughNagumoIntegrator>` on the LCControlMechanism's
`FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
integration_method : float : default "RK4"
sets `integration_method <integration_method.FitzHughNagumoIntegrator>` on the LCControlMechanism's
`FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
base_level_gain : float : default 0.5
sets the base value in the equation used to compute the time-dependent gain value that the LCControl applies
to each of the mechanisms it modulates
.. math::
g(t) = G + k w(t)
base_level_gain = G
scaling_factor_gain : float : default 3.0
sets the scaling factor in the equation used to compute the time-dependent gain value that the LCControl
applies to each of the mechanisms it modulates
.. math::
g(t) = G + k w(t)
scaling_factor_gain = k
Attributes
----------
monitor_for_control : List[OutputPort]
list of the `OutputPorts <OutputPort>` that project to `objective_mechanism
<LCControlMechanism.objective_mechanism>` (and also listed in the ObjectiveMechanism's `monitored_output_ports
<ObjectiveMechanism.monitored_output_ports>` attribute); these are used by the ObjectiveMechanism to
generate the ControlMechanism's `input <ControlMechanism_Input>`, which drives the `phasic response
<LCControlMechanism_Modes_Of_Operation>` of its `function <LControlMechanism.function>`.
monitored_output_ports_weights_and_exponents : List[Tuple(float, float)]
each tuple in the list contains the weight and exponent associated with a corresponding item of
`monitored_output_ports <LCControlMechanism.monitored_output_ports>`; these are the same as those in
the `monitored_output_ports_weights_and_exponents
<ObjectiveMechanism.monitored_output_ports_weights_and_exponents>` attribute of the `objective_mechanism
<LCControlMechanism.objective_mechanism>`, and are used by the ObjectiveMechanism's `function
<ObjectiveMechanism.function>` to parametrize the contribution made to its output by each of the values that
it monitors (see `ObjectiveMechanism Function <ObjectiveMechanism_Function>`).
function : FitzHughNagumoIntegrator
takes the LCControlMechanism's `input <LCControlMechanism_Input>` and generates its response
<LCControlMechanism_Output>` under
the influence of the `FitzHughNagumoIntegrator` Function's `mode <FitzHughNagumoIntegrator.mode>` attribute
(see `LCControlMechanism_Function` for additional details).
control_allocation : 2d np.array
contains a single item — the result of the LCControlMechanism's `function <LCControlMechanism.function>` —
that is assigned as the `allocation <ControlSignal.allocation>` for the LCControlMechanism's single
`ControlSignal`, listed in its `control_signals` attribute; the control_allocation is the same as the
ControlMechanism's `value <Mechanism_Base.value>` attribute).
control_signals : List[ControlSignal]
contains the LCControlMechanism's single `ControlSignal`, which sends `ControlProjections
<ControlProjection>` to the `multiplicative_param <Function_Modulatory_Params>` of each of the Mechanisms
listed in the LCControlMechanism's `modulated_mechanisms <LCControlMechanism.modulated_mechanisms>`
attribute.
control_projections : List[ControlProjection]
list of `ControlProjections <ControlProjection>` sent by the LCControlMechanism's `ControlSignal`, each of
which projects to the `ParameterPort` for the `multiplicative_param <Function_Modulatory_Params>` of the
`function <Mechanism_Base.function>` of one of the Mechanisms listed in `modulated_mechanisms
<LCControlMechanism.modulated_mechanisms>` attribute.
modulated_mechanisms : List[Mechanism]
list of `Mechanisms <Mechanism>` modulated by the LCControlMechanism.
initial_w_FitzHughNagumo : float : default 0.0
sets `initial_w <initial_w.FitzHughNagumoIntegrator>` on the LCControlMechanism's `FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
initial_v_FitzHughNagumo : float : default 0.0
sets `initial_v <initial_v.FitzHughNagumoIntegrator>` on the LCControlMechanism's `FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
time_step_size_FitzHughNagumo : float : default 0.0
sets `time_step_size <time_step_size.FitzHughNagumoIntegrator>` on the LCControlMechanism's
`FitzHughNagumoIntegrator <FitzHughNagumoIntegrator>` function
t_0_FitzHughNagumo : float : default 0.0
sets `t_0 <t_0.FitzHughNagumoIntegrator>` on the LCControlMechanism's `FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
a_v_FitzHughNagumo : float : default -1/3
sets `a_v <a_v.FitzHughNagumoIntegrator>` on the LCControlMechanism's `FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
b_v_FitzHughNagumo : float : default 0.0
sets `b_v <b_v.FitzHughNagumoIntegrator>` on the LCControlMechanism's `FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
c_v_FitzHughNagumo : float : default 1.0
sets `c_v <c_v.FitzHughNagumoIntegrator>` on the LCControlMechanism's `FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
d_v_FitzHughNagumo : float : default 0.0
sets `d_v <d_v.FitzHughNagumoIntegrator>` on the LCControlMechanism's `FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
e_v_FitzHughNagumo : float : default -1.0
sets `e_v <e_v.FitzHughNagumoIntegrator>` on the LCControlMechanism's `FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
f_v_FitzHughNagumo : float : default 1.0
sets `f_v <f_v.FitzHughNagumoIntegrator>` on the LCControlMechanism's `FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
threshold_FitzHughNagumo : float : default -1.0
sets `threshold <threshold.FitzHughNagumoIntegrator>` on the LCControlMechanism's `FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
time_constant_v_FitzHughNagumo : float : default 1.0
sets `time_constant_w <time_constant_w.FitzHughNagumoIntegrator>` on the LCControlMechanism's
`FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
a_w_FitzHughNagumo : float : default 1.0
sets `a_w <a_w.FitzHughNagumoIntegrator>` on the LCControlMechanism's `FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
b_w_FitzHughNagumo : float : default -0.8,
sets `b_w <b_w.FitzHughNagumoIntegrator>` on the LCControlMechanism's `FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
c_w_FitzHughNagumo : float : default 0.7
sets `c_w <c_w.FitzHughNagumoIntegrator>` on the LCControlMechanism's `FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
mode_FitzHughNagumo : float : default 1.0
sets `mode <mode.FitzHughNagumoIntegrator>` on the LCControlMechanism's `FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
uncorrelated_activity_FitzHughNagumo : float : default 0.0
sets `uncorrelated_activity <uncorrelated_activity.FitzHughNagumoIntegrator>` on the LCControlMechanism's
`FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
time_constant_w_FitzHughNagumo : float : default 12.5
sets `time_constant_w <time_constant_w.FitzHughNagumoIntegrator>` on the LCControlMechanism's
`FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
integration_method : float : default "RK4"
sets `integration_method <integration_method.FitzHughNagumoIntegrator>` on the LCControlMechanism's
`FitzHughNagumoIntegrator
<FitzHughNagumoIntegrator>` function
base_level_gain : float : default 0.5
sets the base value in the equation used to compute the time-dependent gain value that the LCControl applies
to each of the mechanisms it modulates
.. math::
g(t) = G + k w(t)
base_level_gain = G
scaling_factor_gain : float : default 3.0
sets the scaling factor in the equation used to compute the time-dependent gain value that the LCControl
applies to each of the mechanisms it modulates
.. math::
g(t) = G + k w(t)
scaling_factor_gain = k
"""
componentType = "LCControlMechanism"
initMethod = INIT_EXECUTE_METHOD_ONLY
classPreferenceLevel = PreferenceLevel.TYPE
# Any preferences specified below will override those specified in TYPE_DEFAULT_PREFERENCES
# Note: only need to specify setting; level will be assigned to TYPE automatically
# classPreferences = {
# PREFERENCE_SET_NAME: 'ControlMechanismClassPreferences',
# PREFERENCE_KEYWORD<pref>: <setting>...}
class Parameters(ControlMechanism.Parameters):
"""
Attributes
----------
base_level_gain
see `base_level_gain <LCControlMechanism.base_level_gain>`
:default value: 0.5
:type: float
function
see `function <LCControlMechanism.function>`
:default value: `FitzHughNagumoIntegrator`
:type: `Function`
scaling_factor_gain
see `scaling_factor_gain <LCControlMechanism.scaling_factor_gain>`
:default value: 3.0
:type: float
"""
function = Parameter(FitzHughNagumoIntegrator, stateful=False, loggable=False)
base_level_gain = Parameter(0.5, modulable=True)
scaling_factor_gain = Parameter(3.0, modulable=True)
paramClassDefaults = ControlMechanism.paramClassDefaults.copy()
paramClassDefaults.update({FUNCTION:FitzHughNagumoIntegrator,
CONTROL_PROJECTIONS: None,
})
@tc.typecheck
def __init__(self,
default_variable=None,
system:tc.optional(System_Base)=None,
objective_mechanism:tc.optional(tc.any(ObjectiveMechanism, list))=None,
monitor_for_control:tc.optional(tc.any(is_iterable, Mechanism, OutputPort))=None,
# modulated_mechanisms:tc.optional(tc.any(list,str)) = None,
modulated_mechanisms=None,
modulation:tc.optional(str)=MULTIPLICATIVE,
integration_method="RK4",
initial_w_FitzHughNagumo=0.0,
initial_v_FitzHughNagumo=0.0,
time_step_size_FitzHughNagumo=0.05,
t_0_FitzHughNagumo=0.0,
a_v_FitzHughNagumo=-1 / 3,
b_v_FitzHughNagumo=0.0,
c_v_FitzHughNagumo=1.0,
d_v_FitzHughNagumo=0.0,
e_v_FitzHughNagumo=-1.0,
f_v_FitzHughNagumo=1.0,
time_constant_v_FitzHughNagumo=1.0,
a_w_FitzHughNagumo=1.0,
b_w_FitzHughNagumo=-0.8,
c_w_FitzHughNagumo=0.7,
threshold_FitzHughNagumo=-1.0,
time_constant_w_FitzHughNagumo=12.5,
mode_FitzHughNagumo=1.0,
uncorrelated_activity_FitzHughNagumo=0.0,
base_level_gain=0.5,
scaling_factor_gain=3.0,
params=None,
name=None,
prefs:is_pref_set=None
):
# Assign args to params and functionParams dicts
params = self._assign_args_to_param_dicts(system=system,
modulated_mechanisms=modulated_mechanisms,
modulation=modulation,
base_level_gain=base_level_gain,
scaling_factor_gain=scaling_factor_gain,
params=params)
super().__init__(system=system,
default_variable=default_variable,
objective_mechanism=objective_mechanism,
monitor_for_control=monitor_for_control,
function=FitzHughNagumoIntegrator(integration_method=integration_method,
initial_v=initial_v_FitzHughNagumo,
initial_w=initial_w_FitzHughNagumo,
time_step_size=time_step_size_FitzHughNagumo,
t_0=t_0_FitzHughNagumo,
a_v=a_v_FitzHughNagumo,
b_v=b_v_FitzHughNagumo,
c_v=c_v_FitzHughNagumo,
d_v=d_v_FitzHughNagumo,
e_v=e_v_FitzHughNagumo,
f_v=f_v_FitzHughNagumo,
time_constant_v=time_constant_v_FitzHughNagumo,
a_w=a_w_FitzHughNagumo,
b_w=b_w_FitzHughNagumo,
c_w=c_w_FitzHughNagumo,
threshold=threshold_FitzHughNagumo,
mode=mode_FitzHughNagumo,
uncorrelated_activity=uncorrelated_activity_FitzHughNagumo,
time_constant_w=time_constant_w_FitzHughNagumo,
),
modulation=modulation,
params=params,
name=name,
prefs=prefs)
def _validate_params(self, request_set, target_set=None, context=None):
"""Validate SYSTEM, MONITOR_FOR_CONTROL and CONTROL_SIGNALS
Check that all items in MONITOR_FOR_CONTROL are Mechanisms or OutputPorts for Mechanisms in self.system
Check that every item in `modulated_mechanisms <LCControlMechanism.modulated_mechanisms>` is a Mechanism
and that its function has a multiplicative_param
"""
super()._validate_params(request_set=request_set,
target_set=target_set,
context=context)
if MODULATED_MECHANISMS in target_set and target_set[MODULATED_MECHANISMS]:
spec = target_set[MODULATED_MECHANISMS]
if not isinstance(spec, list):
spec = [spec]
for mech in spec:
if isinstance (mech, str):
if not mech == ALL:
raise LCControlMechanismError("A string other than the keyword {} was specified "
"for the {} argument the constructor for {}".
format(repr(ALL), repr(MODULATED_MECHANISMS), self.name))
elif not isinstance(mech, Mechanism):
raise LCControlMechanismError("The specification of the {} argument for {} "
"contained an item ({}) that is not a Mechanism.".
format(repr(MODULATED_MECHANISMS), self.name, mech))
elif not hasattr(mech.function, MULTIPLICATIVE_PARAM):
raise LCControlMechanismError("The specification of the {} argument for {} "
"contained a Mechanism ({}) that does not have a {}.".
format(repr(MODULATED_MECHANISMS),
self.name, mech,
repr(MULTIPLICATIVE_PARAM)))
def _instantiate_output_ports(self, context=None):
"""Instantiate ControlSignals and assign ControlProjections to Mechanisms in self.modulated_mechanisms
If **modulated_mechanisms** argument of constructor was specified as *ALL*,
assign all ProcessingMechanisms in Compositions to which LCControlMechanism belongs to self.modulated_mechanisms
Instantiate ControlSignal with Projection to the ParameterPort for the multiplicative_param of every
Mechanism listed in self.modulated_mechanisms
"""
from psyneulink.core.components.mechanisms.processing.processingmechanism import ProcessingMechanism_Base
# *ALL* is specified for modulated_mechanisms:
# assign all Processing Mechanisms in LCControlMechanism's Composition(s) to its modulated_mechanisms attribute
# FIX: IMPLEMENT FOR COMPOSITION
if isinstance(self.modulated_mechanisms, str) and self.modulated_mechanisms is ALL:
if self.systems:
for system in self.systems:
self.modulated_mechanisms = []
for mech in system.mechanisms:
if (mech not in self.modulated_mechanisms and
isinstance(mech, ProcessingMechanism_Base) and
not (isinstance(mech, ObjectiveMechanism) and mech._role is CONTROL) and
hasattr(mech.function, MULTIPLICATIVE_PARAM)):
self.modulated_mechanisms.append(mech)
else:
# If LCControlMechanism is not in a Process or System, defer implementing OutputPorts until it is
return
# Get the name of the multiplicative_param of each Mechanism in self.modulated_mechanisms
if self.modulated_mechanisms:
# Create (param_name, Mechanism) specification for **control_signals** argument of ControlSignal constructor
self.modulated_mechanisms = convert_to_list(self.modulated_mechanisms)
multiplicative_param_names = []
for mech in self.modulated_mechanisms:
if isinstance(mech.function.parameters.multiplicative_param, ParameterAlias):
multiplicative_param_names.append(mech.function.parameters.multiplicative_param.source.name)
else:
multiplicative_param_names.append(mech.function.parameters.multiplicative_param.name)
ctl_sig_projs = []
for mech, mult_param_name in zip(self.modulated_mechanisms, multiplicative_param_names):
ctl_sig_projs.append((mult_param_name, mech))
self.control = [{PROJECTIONS: ctl_sig_projs}]
self.parameters.control_allocation.default_value = self.value[0]
super()._instantiate_output_ports(context=context)
def _check_for_composition(self, context=None):
from psyneulink.core.compositions.composition import Composition
if self.modulated_mechanisms == ALL:
raise LCControlMechanismError(f"'ALL' not currently supported for '{MODULATED_MECHANISMS}' argument "
f"of {self.__class__.__name__} in context of a {Composition.__name__}")
def _execute(
self,
variable=None,
context=None,
runtime_params=None,
):
"""Updates LCControlMechanism's ControlSignal based on input and mode parameter value
"""
# IMPLEMENTATION NOTE: skip ControlMechanism._execute since it is a stub method that returns input_values
output_values = super(ControlMechanism, self)._execute(
variable=variable,
context=context,
runtime_params=runtime_params,
)
gain_t = self.parameters.scaling_factor_gain._get(context) * output_values[1] \
+ self.parameters.base_level_gain._get(context)
return gain_t, output_values[0], output_values[1], output_values[2]
def _get_mech_params_type(self, ctx):
return ctx.convert_python_struct_to_llvm_ir((self.scaling_factor_gain, self.base_level_gain))
def _get_mech_params_init(self):
return (self.scaling_factor_gain, self.base_level_gain)
def _gen_llvm_function_postprocess(self, builder, ctx, mf_out):
# prepend gain type (matches output[1] type)
gain_ty = mf_out.type.pointee.elements[1]
elements = gain_ty, *mf_out.type.pointee.elements
elements_ty = pnlvm.ir.LiteralStructType(elements)
# allocate new output type
new_out = builder.alloca(elements_ty)
# Load mechanism parameters
params, _, _, _ = builder.function.args
mech_params = builder.gep(params, [ctx.int32_ty(0), ctx.int32_ty(2)])
scaling_factor_ptr = builder.gep(mech_params, [ctx.int32_ty(0), ctx.int32_ty(0)])
base_factor_ptr = builder.gep(mech_params, [ctx.int32_ty(0), ctx.int32_ty(1)])
scaling_factor = builder.load(scaling_factor_ptr)
base_factor = builder.load(base_factor_ptr)
# Apply to the entire vector
vi = builder.gep(mf_out, [ctx.int32_ty(0), ctx.int32_ty(1)])
vo = builder.gep(new_out, [ctx.int32_ty(0), ctx.int32_ty(0)])
with pnlvm.helpers.array_ptr_loop(builder, vi, "LC_gain") as (b1, index):
in_ptr = b1.gep(vi, [ctx.int32_ty(0), index])
val = b1.load(in_ptr)
val = b1.fmul(val, scaling_factor)
val = b1.fadd(val, base_factor)
out_ptr = b1.gep(vo, [ctx.int32_ty(0), index])
b1.store(val, out_ptr)
# copy the main function return value
for i, _ in enumerate(mf_out.type.pointee.elements):
ptr = builder.gep(mf_out, [ctx.int32_ty(0), ctx.int32_ty(i)])
out_ptr = builder.gep(new_out, [ctx.int32_ty(0), ctx.int32_ty(i + 1)])
val = builder.load(ptr)
builder.store(val, out_ptr)
return new_out, builder
@tc.typecheck
def _add_system(self, system, role:str):
super()._add_system(system, role)
if isinstance(self.modulated_mechanisms, str) and self.modulated_mechanisms is ALL:
# Call with ContextFlags.COMPONENT so that OutputPorts are replaced rather than added
self._instantiate_output_ports(context=Context(source=ContextFlags.COMPONENT))
@tc.typecheck
def add_modulated_mechanisms(self, mechanisms:list):
"""Add ControlProjections to the specified Mechanisms.
"""
request_set = {MODULATED_MECHANISMS:mechanisms}
target_set = {}
self._validate_params(request_set=request_set, target_set=target_set)
# Assign ControlProjection from the LCControlMechanism's ControlSignal
# to the ParameterPort for the multiplicative_param of each Mechanism in mechanisms
for mech in mechanisms:
self.modulated_mechanisms.append(mech)
parameter_port = mech._parameter_ports[mech.multiplicative_param]
ControlProjection(sender=self.control_signals[0],
receiver=parameter_port)
# self.aux_components.append(ControlProjection(sender=self.control_signals[0],
# receiver=parameter_port))
@tc.typecheck
def remove_modulated_mechanisms(self, mechanisms:list):
"""Remove the ControlProjections to the specified Mechanisms.
"""
for mech in mechanisms:
if not mech in self.modulated_mechanisms:
continue
parameter_port = mech._parameter_ports[mech.multiplicative_param]
# Get ControlProjection
for projection in parameter_port.mod_afferents:
if projection.sender.owner is self:
control_projection = projection
break
# Delete ControlProjection ControlSignal's list of efferents
index = self.control_signals[0].efferents[control_projection]
del(self.control_signals[0].efferents[index])
# Delete ControlProjection from recipient ParameterPort
index = parameter_port.mod_afferents[control_projection]
del(parameter_port.mod_afferents[index])
# Delete Mechanism from self.modulated_mechanisms
index = self.modulated_mechanisms.index(mech)
del(self.modulated_mechanisms[index])
def show(self):
"""Display the `OutputPorts <OutputPort>` monitored by the LCControlMechanism's `objective_mechanism`
and the `multiplicative_params <Function_Modulatory_Params>` modulated by the LCControlMechanism.
"""
print("\n---------------------------------------------------------")
print("\n{0}".format(self.name))
print("\n\tMonitoring the following Mechanism OutputPorts:")
if self.objective_mechanism is None:
print("\t\tNone")
else:
for port in self.objective_mechanism.input_ports:
for projection in port.path_afferents:
monitored_port = projection.sender
monitored_port_Mech = projection.sender.owner
monitored_port_index = self.monitored_output_ports.index(monitored_port)
weight = self.monitored_output_ports_weights_and_exponents[monitored_port_index][0]
exponent = self.monitored_output_ports_weights_and_exponents[monitored_port_index][1]
print ("\t\t{0}: {1} (exp: {2}; wt: {3})".
format(monitored_port_Mech.name, monitored_port.name, weight, exponent))
print ("\n\tModulating the following parameters:".format(self.name))
# Sort for consistency of output:
port_Names_sorted = sorted(self.output_ports.names)
for port_Name in port_Names_sorted:
for projection in self.output_ports[port_Name].efferents:
print ("\t\t{0}: {1}".format(projection.receiver.owner.name, projection.receiver.name))
print ("\n---------------------------------------------------------")
| 51.636831
| 125
| 0.694886
|
222ffc81bd61cd66210c0d4f7cec86da13bee072
| 25,986
|
py
|
Python
|
src/utils/image_processing.py
|
V1ct0reo/lightning-fast-hydra
|
eb86d67da7c97b611c99dcee9c7208142286d4d6
|
[
"MIT"
] | null | null | null |
src/utils/image_processing.py
|
V1ct0reo/lightning-fast-hydra
|
eb86d67da7c97b611c99dcee9c7208142286d4d6
|
[
"MIT"
] | null | null | null |
src/utils/image_processing.py
|
V1ct0reo/lightning-fast-hydra
|
eb86d67da7c97b611c99dcee9c7208142286d4d6
|
[
"MIT"
] | null | null | null |
import argparse
import numpy
import cv2
import scipy
from scipy import ndimage
from random import randint
import random
import os
import shutil
from PIL import Image
from PIL import ImageEnhance
from PIL import ExifTags
from PIL import ImageFilter
# crop a random sized (60-100% of original image) square somewhere on the image
def crop_random_square(img):
y, x = img.shape[0], img.shape[1]
crop_size = int(min([y, x]) * random.uniform(0.6, 1.0))
startx = randint(0, x - crop_size)
starty = randint(0, y - crop_size)
return img[starty:starty + crop_size, startx:startx + crop_size]
# crop a random sized (60-100% of original image) square somewhere on the image using PIL (Python Imaging Library)
def crop_random_square_pil(img):
width = img.size[0]
height = img.size[1]
crop_size = int(min([width, height]) * random.uniform(0.6, 1.0))
startx = randint(0, width - crop_size)
starty = randint(0, width - crop_size)
cropped = img.crop(
(
startx,
starty,
startx + crop_size,
starty + crop_size
)
)
return cropped
# crop a random sized (30% of original image) square somewhere on the image
def crop_random_background_square(img):
y, x = img.shape[0], img.shape[1]
if (min(img.shape[0], img.shape[1]) > 227):
crop_size = int(max([0.3 * min([y, x]), 227]))
else:
crop_size = min([img.shape[0], img.shape[1]])
startx = randint(0, x - crop_size)
starty = randint(0, y - crop_size)
return img[starty:starty + crop_size, startx:startx + crop_size]
# crop a square in the center of the image. Square will be as big as it can be
def crop_square(img):
y, x = img.shape[0], img.shape[1]
crop_size = min([y, x])
startx = x // 2 - (crop_size // 2)
starty = y // 2 - (crop_size // 2)
return img[starty:starty + crop_size, startx:startx + crop_size]
# crop a square in the center of the image using PIL (Python Imaging Library). Square will be as big as it can be
def crop_square_pil(img):
width = img.size[0]
height = img.size[1]
crop_size = min([width, height])
cropped = img.crop(
(
width / 2 - crop_size / 2,
height / 2 - crop_size / 2,
width / 2 + crop_size / 2,
height / 2 + crop_size / 2
)
)
return cropped
# crop a quarter from a square-sized image. Index
def crop_quarter_square_pil(img, index):
width = img.size[0]
height = img.size[1]
i = index % 4
if i < 2:
crop_width_1 = 0
crop_width_2 = width/2
else:
crop_width_1 = width/2
crop_width_2 = width
if i == 0 or i == 2:
crop_height_1 = 0;
crop_height_2 = height/2
else:
crop_height_1 = height/2
crop_height_2 = height
cropped = img.crop(
(
crop_width_1,
crop_height_1,
crop_width_2,
crop_height_2
)
)
return cropped
# rotate image randomly between -15 to 15 degrees
def rotate_radomly(img):
angle = randint(-15, 15)
return ndimage.rotate(img, angle, reshape=False)
# rotate image randomly between -15 to 15 degrees using PIL (Python Imaging Library)
def rotate_randomly_pil(img):
return img.rotate(randint(-15, 15))
# preprocess training images (without adding background!) and use sobel operator for edge detection
# NOTE: no background will be added.
def detect_train_sobel_edges_in_images(im_dir, sav_dir):
for x in range(1, 2):
print(x)
image_string = im_dir + 'screenshot' + str(x) + '.png'
save_string = sav_dir + 'preprocessed' + str(x) + '.jpg'
im = scipy.misc.imread(image_string)
resize_width, resize_height = int(0.5 * im.shape[0]), int(0.5 * im.shape[1])
im = scipy.misc.imresize(im, [resize_width, resize_height])
im = rotate_radomly(im)
im = crop_square(im)
im = crop_random_square(im)
im = scipy.misc.imresize(im, [112, 112])
im = im.astype('int32')
dx = ndimage.sobel(im, 0) # horizontal derivative
dy = ndimage.sobel(im, 1) # vertical derivative
mag = numpy.hypot(dx, dy) # magnitude
mag *= 255.0 / numpy.max(mag) # normalize (Q&D)
scipy.misc.imsave(save_string, mag)
# preprocess test images and use sobel operator for edge detection
def detect_test_sobel_edges_in_images():
for x in range(1, 5506):
image_string = 'zylinder_images/leo_images/Systembediengeraet_A110/merged' + str(x) + '.jpg'
save_string = 'zylinder_images/nur_vorne_kanten/Systembediengeraet_A110/sobel' + str(x) + '.jpg'
im = scipy.misc.imread(image_string)
im = crop_square(im)
im = scipy.misc.imresize(im, [112, 112])
im = im.astype('int32')
dx = ndimage.sobel(im, 0) # horizontal derivative
dy = ndimage.sobel(im, 1) # vertical derivative
mag = numpy.hypot(dx, dy) # magnitude
mag *= 255.0 / numpy.max(mag) # normalize (Q&D)
scipy.misc.imsave(save_string, mag)
if x % 55 == 0:
print('\r' + str(int(x / 55)) + '% done', end='')
# os.remove(image_string)
# do a preprocessing for image without doing any edge detection and without adding background to the image
# NOTE: No background will be added to the image
def preprocess_images_without_sobel(im_dir, sav_dir):
for x in range(1, 536):
image_string = im_dir + 'screenshot' + str(x) + '.png'
im = scipy.misc.imread(image_string)
resize_width, resize_height = int(0.5 * im.shape[0]), int(
0.5 * im.shape[1]) # reduce image_size to 0.5 * image_size for faster processing
im = scipy.misc.imresize(im, [resize_width, resize_height]) # reduce image_size for faster processing
for y in range(1, 11):
save_string = sav_dir + 'preprocessed' + str(x) + '_' + str(y) + '.jpg'
image = rotate_radomly(im)
image = crop_square(image)
image = crop_random_square(image)
image = scipy.misc.imresize(image, [112, 112])
image = image.astype('int32')
scipy.misc.imsave(save_string, image)
print('\rpreprocessing:' + str(round(x / 5.34, 2)) + '% completed', end='')
# do a preprocessing for image without doing any edge detection
def preprocess_background_images(im_dir, sav_dir):
for x in range(1, 2469):
for y in range(1, 10):
image_string = im_dir + 'background' + str(x) + '.jpg'
save_string = sav_dir + 'preprocessed' + str(x) + '_' + str(y) + '.jpg'
im = scipy.misc.imread(image_string)
# resize_width, resize_height = int(0.5 * im.shape[0]), int(
# 0.5 * im.shape[1]) # reduce image_size to 0.5 * image_size for faster processing
# im = scipy.misc.imresize(im,
# [resize_width, resize_height]) # reduce image_size for faster processing
im = crop_random_background_square(im)
im = scipy.misc.imresize(im, [299, 299])
im = im.astype('int32')
scipy.misc.imsave(save_string, im)
if (x % 10 == 0 or x % 24 == 0):
print('\rpreprocessing:' + str(round(x / 24.68, 2)) + '% completed', end='')
# edge detection with prewitt filter
def detect_prewitt_edges_in_images():
for x in range(303, 310):
print(x)
image_string = 'zylinder_images/5045/IMG_0' + str(x) + '.png'
save_string = 'Prewitt_Bilder/prewitt' + str(x) + '.jpg'
im = scipy.misc.imread(image_string)
im = im.astype('int32')
dx = ndimage.prewitt(im, 0) # horizontal derivative
dy = ndimage.prewitt(im, 1) # vertical derivative
mag = numpy.hypot(dx, dy) # magnitude
mag *= 255.0 / numpy.max(mag) # normalize (Q&D)
scipy.misc.imsave(save_string, mag)
# edge detection with canny filter
def detect_canny_edges_in_images():
for x in range(1, 30):
if x % 55 == 0:
print('\r' + str(int(x / 55)) + '% done', end='')
image_string = 'zylinder_images/nur_vorne_kanten/preprocessed/betriebsstufenbediengeraet_A4/test/test_resized' + str(
x) + '.jpg'
save_string = 'zylinder_images/nur_vorne_kanten/preprocessed/betriebsstufenbediengeraet_A4/test/canny' + str(
x) + '.jpg'
img = cv2.imread(image_string, 0)
v = numpy.median(img)
sigma = 0.33
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edges = cv2.Canny(img, lower, upper)
cv2.imwrite(save_string, edges)
os.remove(image_string)
# resize images to a size of 112x112 so they can be used for the neural network
def resize():
for x in range(1, 100):
print(x)
image_string = 'zylinder_images/zylinder/preprocessed/druckspeicher/test/test' + str(x) + '.jpg'
save_string = 'zylinder_images/zylinder/preprocessed/druckspeicher/test/test' + str(x) + '.jpg'
im = scipy.misc.imread(image_string)
im = crop_square(im)
im = scipy.misc.imresize(im, [112, 112])
scipy.misc.imsave(save_string, im)
# os.remove(image_string)
# crop images to a square and then resize them to 112x112
def reshape_and_resize():
for x in range(1, 364):
print(str(x))
image_string = '/Users/adminsitrator/denkbares/deep_learning_stuff/bilder_deep_learning/zylinder/Hydraulik_Druckspeicher/screenshot' + str(
x) + '.png'
save_string = '/Users/adminsitrator/denkbares/deep_learning_stuff/bilder_deep_learning/zylinder_images/zylinder/unprocessed_299/Hydraulikdruckspeicher/screenshot' + str(
x) + '.jpg'
im = scipy.misc.imread(image_string)
im = crop_square(im)
im = scipy.misc.imresize(im, [299, 299])
scipy.misc.imsave(save_string, im)
if (x % 10 == 0):
print('\rpreprocessing:' + str(round(x / 20.57, 2)) + '% completed', end='')
# us a video as input and extract single frames as images
def extract_images_from_video(video_path, save_dir, width, height):
vidcap = cv2.VideoCapture(video_path)
#success, image = vidcap.read()
count = 0
success = True
while success:
success, image = vidcap.read()
if not image is None:
# image = cv2.flip(image, -1)
image = cv2.resize(image, (width, height))
print('Read a new frame %d: ' % count, success)
cv2.imwrite(save_dir + "frame%d.jpg" % count, image) # save frame as JPEG file
count += 1
# remove grey background from training images and make it transparent
def scrap_background(img, save):
img = Image.open(img)
img = img.convert("RGBA")
datas = img.getdata()
newData = []
for item in datas: # if it's grey like the background -> make it transparent
if (210 <= item[0] <= 250 and 210 <= item[1] <= 250 and 210 <= item[2] <= 250):
# if (227 <= item[0] <= 231 and 227 <= item[1] <= 231 and 227 <= item[2] <= 231):
newData.append((item[0], item[1], item[2], 0)) # set alpha-value 0
else:
newData.append(item)
img.putdata(newData)
img.save(save, "PNG")
# merge two images to images. use one image as background and the other one will be in the foreground
def merge_images(im1, im2, im_size):
background = Image.open(im1).resize([im_size, im_size])
foreground = Image.open(im2).resize([im_size, im_size])
background.paste(foreground, (0, 0), foreground)
return background
def merge_image_with_random_noise(image, im_size):
random_noise = numpy.dstack((255 * numpy.random.random((im_size, im_size)), 255 * numpy.random.random((im_size, im_size)), 255 * numpy.random.random((im_size, im_size))))
background = Image.fromarray(numpy.uint8(random_noise))
foreground = Image.open(image).resize([im_size, im_size])
background.paste(foreground, (0, 0), foreground)
return background
# merge a batch of images with background images
def create_merged_images():
for i in range(1, 5351):
for j in range(0, 5):
im1 = 'clutter_backgrounds/preprocessed/preprocessed' + str(i + j * 3000) + '.jpg'
im2 = 'zylinder_images/leo_images_nur_vorne/preprocessed/Systembediengeraet_A110/transparent' + str(
i) + '.jpg'
sav = 'zylinder_images/leo_images/Systembediengeraet_A110/merged' + str(i) + '_' + str(j) + '.jpg'
merge_images(im1, im2, sav)
if i % 53 == 0:
print('\r' + str(int(i / 53.4)) + '% done', end='')
# horizontally flip images
def flip_images():
for x in range(1, 32101):
image_string = 'zylinder_images/leo_images/Systembediengeraet_A110/preprocessed' + str(x) + '.jpg'
save_string = 'zylinder_images/leo_images/Systembediengeraet_A110/preprocessed' + str(32100 + x) + '.jpg'
im = scipy.misc.imread(image_string)
horizontal_im = cv2.flip(im, 0)
scipy.misc.imsave(save_string, horizontal_im)
if x % 32 == 0:
print('\r' + str(int(x / 320)) + '% done', end='')
def change_brightness(image, save):
# manipulate brightness of the image
brightness = ImageEnhance.Brightness(image)
brightness_manipulated = brightness.enhance(random.uniform(0.6, 1.5))
rgb_im = brightness_manipulated.convert('RGB')
rgb_im.save(save, "JPEG")
# manipulate HSV-channels of the whole image
def manipulate_hsv(image):
im = image.convert('HSV')
im_arr = numpy.array(im)
h_vals = random.uniform(0.7, 1.6) * (im_arr[..., 0])
s_vals = random.uniform(0.3, 2.6) * (im_arr[..., 1] + randint(1,
30)) # components have lots of grey colors -> grey means saturation == 0 -> give a little more saturation, so that manipulation is successful
v_vals = random.uniform(0.7, 1.6) * im_arr[..., 2]
# S and V channels should not be greater than 255. H channel can be greater, because it starts from beginning and beginning is the continuous successor of the end -> see HSV cone
s_vals[s_vals > 255] = 255
v_vals[v_vals > 255] = 255
im_arr[..., 0] = h_vals
im_arr[..., 1] = s_vals
im_arr[..., 2] = v_vals
manipulated_image = Image.fromarray(im_arr, mode='HSV')
return manipulated_image.convert('RGB')
# manipulate HSV-channels of the whole image variante 2
def manipulate_hsv_addition(image):
im = image.convert('HSV')
im_arr = numpy.array(im, dtype=numpy.uint16)
h_vals = im_arr[..., 0]
s_vals = im_arr[..., 1]
v_vals = im_arr[..., 2]
h_vals = h_vals + randint(-20, 20)
s_vals = s_vals + randint(-40, 40)
v_vals = v_vals + randint(-40, 40)
s_vals[s_vals < 0] = 0
s_vals[s_vals > 255] = 255
v_vals[v_vals < 0] = 0
v_vals[v_vals > 255] = 255
im_arr[..., 0] = h_vals
im_arr[..., 1] = s_vals
im_arr[..., 2] = v_vals
im_arr = numpy.array(im_arr, dtype=numpy.uint8) # Pillow needs an 8bit array to form a picture from the array
manipulated_image = Image.fromarray(im_arr, mode='HSV')
# manipulated_image.show()
return manipulated_image.convert('RGB')
# manipulate every single pixel's HSV-values
def manipulate_every_pixels_hsv(image):
# image = Image.open(image)
hsv_im = image.convert('HSV')
im_arr = numpy.array(hsv_im)
height, width, _ = im_arr.shape
for j in range(width):
for i in range(height):
im_arr[i][j][0] = min(random.uniform(0.7, 1.6) * im_arr[i][j][0], 255) # H-value
im_arr[i][j][1] = min(random.uniform(0.7, 1.6) * im_arr[i][j][1], 255) # S-value
im_arr[i][j][2] = min(random.uniform(0.7, 1.6) * im_arr[i][j][2], 255) # V-value
manipulated_image = Image.fromarray(im_arr, mode='HSV')
return manipulated_image.convert('RGB')
def manipulate_rgb(image):
rgb_im = image.convert('RGB')
im_arr = numpy.array(rgb_im, dtype=numpy.uint16) # we need 16bit int, because 8bit only works until 255
r_vals = im_arr[..., 0]
g_vals = im_arr[..., 1]
b_vals = im_arr[..., 2]
r_vals = r_vals + randint(-20, 20)
g_vals = g_vals + randint(-20, 20)
b_vals = b_vals + randint(-20, 20)
r_vals[r_vals < 0] = 0
r_vals[r_vals > 255] = 255
g_vals[g_vals < 0] = 0
g_vals[g_vals > 255] = 255
b_vals[b_vals < 0] = 0
b_vals[b_vals > 255] = 255
im_arr[..., 0] = r_vals
im_arr[..., 1] = g_vals
im_arr[..., 2] = b_vals
im_arr = numpy.array(im_arr, dtype=numpy.uint8) # Pillow needs an 8bit array to form a picture from the array
im = Image.fromarray(im_arr, mode='RGB')
im.show()
# equalize the histogram of the luminance (Y-channel) of an image
def equalize_luminance(image):
pil_im = image.convert('RGB')
img = numpy.array(pil_im)
img = img[:, :, ::-1].copy()
img_yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
# equalize the histogram of the Y channel
img_yuv[:, :, 0] = cv2.equalizeHist(img_yuv[:, :, 0])
# convert the YUV image back to RGB format
img_output = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2RGB)
cv2_im = img_output
pil_im = Image.fromarray(cv2_im)
# pil_im.show()
return pil_im
def blur_images(im_dir):
for root, dirs, files in os.walk(im_dir):
for idx, file in enumerate(files):
if file.endswith(".jpg"):
try:
image_path = os.path.join(root, file)
print(image_path)
image = Image.open(image_path)
image = image.filter(ImageFilter.GaussianBlur(radius=1))
image.save(image_path, "JPEG")
except Exception as e:
print(e)
# convert image from *.png to *.jpg
def convert_to_jpg(image):
im = Image.open(image)
# im = im.resize([224, 224])
rgb_im = im.convert('RGB')
rgb_im.save(image.replace('.png', '.jpg'))
def resizeImages(im_dir):
for root, dirs, files in os.walk(im_dir):
for idx, file in enumerate(files):
if file.endswith(".JPG"):
try:
image_path = os.path.join(root, file)
print(image_path)
image = Image.open(image_path)
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation': break
exif = dict(image._getexif().items())
if exif[orientation] == 3:
image = image.rotate(180, expand=True)
elif exif[orientation] == 6:
image = image.rotate(270, expand=True)
elif exif[orientation] == 8:
image = image.rotate(90, expand=True)
basewidth = 1200
wpercent = (basewidth / float(image.size[0]))
hsize = int((float(image.size[1]) * float(wpercent)))
image.thumbnail((basewidth, hsize), Image.ANTIALIAS)
image.save(image_path, "JPEG")
except Exception as e:
print(e)
# do the whole preprocessing process for one image
# preprocess image and merge it with several backgrounds
def preprocess_all(image_path, sav_dir, image_size, background_directory):
sav_dir = sav_dir + os.path.basename(os.path.dirname(image_path))
if not os.path.isdir(sav_dir): # if directory doesn't exist, create it
os.mkdir(sav_dir)
# uri = os.path.dirname(image_path) + '/uri.txt' // uncomment uri path
# shutil.copyfile(uri, sav_dir + '/uri.txt') // uncomment uri path
# read image
im = Image.open(image_path)
width, height = im.size
# im.resize([int(0.5 * width), int(0.5 * height)]) # resize image to 50% to accelerate computation
for y in range(0, 6):
save_string = sav_dir + "/" + os.path.basename(image_path)[:-4] + '_' + str(y) + '.jpg'
image = rotate_randomly_pil(im)
image = crop_square_pil(image)
image = crop_random_square_pil(image)
# image = crop_quarter_square_pil(image, y)
image = image.resize([image_size, image_size])
image.save(save_string, "PNG")
for y in range(0, 6):
foreground = sav_dir + "/" + os.path.basename(image_path)[:-4] + '_' + str(y) + '.jpg'
for z in range(0, 1):
background = background_directory + random.choice(
os.listdir(background_directory)) # randomly choose a background image
save_string_merged = sav_dir + "/" + os.path.basename(image_path)[:-4] + '_' + str(y) + '.jpg'
merged_image = merge_images(background, foreground, image_size)
# merged_image = merge_image_with_random_noise(foreground, image_size)
# hsv_manipulated_image = manipulate_every_pixels_hsv(merged_image)
hsv_manipulated_image = manipulate_hsv_addition(merged_image)
# equalized_image = equalize_luminance(hsv_manipulated_image)
change_brightness(hsv_manipulated_image, save_string_merged)
# initialize preprocessing for a directory.
# Directory should contain images for all classes in different directories
# structure should be as follows:
#
# im_dir (<- the one you use as parameter)
# │
# │
# └───Class1
# │ │ image1.png
# │ │ image2.png
# │ │ ...
# │
# └───Class2
# │ image1.png
# │ image2.png
# │ ...
def do_preprocessing_for_dir(im_dir, sav_dir, image_size, background_directory):
if not os.path.exists(sav_dir):
os.mkdir(sav_dir)
for root, dirs, files in os.walk(im_dir):
for idx, file in enumerate(files):
if file.endswith(".png"):
image_path = os.path.join(root, file)
start_image_preprocessing(image_path, sav_dir, image_size, background_directory)
printProgressBar(idx + 1, len(files), prefix='Progress:', suffix='Complete')
# print('\rpreprocessing:' + str(round(idx / 4.84, 2)) + '% completed', end='')
# start the image preprocessing and take care of possibly occuring ValueErrors
def start_image_preprocessing(image_path, sav_dir, image_size, background_directory):
try:
preprocess_all(image_path, sav_dir, image_size, background_directory)
except ValueError as e: # quick and dirty Solution: sometimes a ValueError is raised while converting to HSV. Retrying always helps. So we catch it here and try again
if str(e) == "conversion from L to HSV not supported":
print(e)
start_image_preprocessing(image_path, sav_dir, image_size, background_directory)
else:
raise
# Print iterations progress
def printProgressBar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end='')
# Print New Line on Complete
if iteration == total:
print()
if __name__ == '__main__':
'''
image_size = 299
# background_directory = "/Users/adminsitrator/denkbares/deep_learning_stuff/bilder_deep_learning/clutter_backgrounds/preprocessed_299/"
parser = argparse.ArgumentParser()
parser.add_argument("--image_dir", help="image directory with images to be processed")
parser.add_argument("--save_dir", help="directory to save the preprocessed images")
parser.add_argument("--size", type=int,
help="size of the images after preprocessing. Images are processed as squares, so only one value is needed. Standard size = 299")
parser.add_argument("--background_directory",
help="directory with background images. Background Images have to be same size as processed images. Optionally change parameter --size")
args = parser.parse_args()
if args.size:
image_size = args.size
if args.background_directory:
background_directory = args.background_directory
if args.image_dir and args.save_dir:
im_dir = args.image_dir
sav_dir = args.save_dir
do_preprocessing_for_dir(im_dir, sav_dir, image_size, background_directory)
else:
print('Please specify image directory and output directory')
'''
i = 0
for path, subdirs, files in os.walk('/Users/tobi/denkbares/deep_learning_stuff/Bilder/videos_AXION_test_16_klassen/'):
for name in files:
if name.endswith('.MOV'):
i+=1
print(os.path.basename(path))
save_dir = '/Users/tobi/denkbares/deep_learning_stuff/Bilder/bilder_AXION_test_16_klassen/' + os.path.basename(path) + '/'
if not os.path.isdir(os.path.dirname(os.path.dirname(save_dir))):
os.mkdir(os.path.dirname(os.path.dirname(save_dir)))
if not os.path.isdir(save_dir):
os.mkdir(save_dir)
i = 0
extract_images_from_video(os.path.join(path, name), save_dir + str(i), 299, 299)
# blur_images('/home/deep/workspace/deepbares/images/blurred_leo_can2019_02_11')
| 38.554896
| 208
| 0.619564
|
52b02cdbefdcf8b7245a961b1148c798c5219efe
| 8,391
|
py
|
Python
|
cirq/value/value_equality.py
|
jlmayfield/Cirq
|
dc1294f54118a9a4f92546ca13780b91615dd675
|
[
"Apache-2.0"
] | 2
|
2019-04-02T10:22:21.000Z
|
2019-06-19T04:54:04.000Z
|
cirq/value/value_equality.py
|
jlmayfield/Cirq
|
dc1294f54118a9a4f92546ca13780b91615dd675
|
[
"Apache-2.0"
] | 4
|
2019-03-27T22:51:42.000Z
|
2019-04-03T22:41:36.000Z
|
cirq/value/value_equality.py
|
jlmayfield/Cirq
|
dc1294f54118a9a4f92546ca13780b91615dd675
|
[
"Apache-2.0"
] | 1
|
2019-03-27T21:30:44.000Z
|
2019-03-27T21:30:44.000Z
|
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines `@cirq.value_equality`, for easy __eq__/__hash__ methods."""
from typing import Union, Callable, overload, Any
from typing_extensions import Protocol
import cirq.protocols
class _SupportsValueEquality(Protocol):
"""An object decorated with the value equality decorator."""
def _value_equality_values_(self) -> Any:
"""Returns a value or values that define the identity of this object.
For example, a Point2D would be defined by the tuple (x, y) and so it
would return `(x, y)` from this method.
The decorated class is responsible for implementing this method.
Returns:
Values used when determining if the receiving object is equal to
another object.
"""
pass
def _value_equality_approximate_values_(self) -> Any:
"""Returns value or values used for approximate equality.
Approximate equality does element-wise comparison of iterable types; if
decorated class is composed of a set of primitive types (or types
supporting `SupportsApproximateEquality` protocol) then they can be
given as an iterable.
If this method is not defined by decorated class,
`_value_equality_values_` is going to be used instead.
Returns:
Any type supported by `cirq.approx_eq()`.
"""
# coverage: ignore
return self._value_equality_values_()
def _value_equality_values_cls_(self) -> Any:
"""Automatically implemented by the `cirq.value_equality` decorator.
This method encodes the logic used to determine whether or not objects
that have the same equivalence values but different types are considered
to be equal. By default, this returns the decorated type. But there is
an option (`distinct_child_types`) to make it return `type(self)`
instead.
Returns:
Type used when determining if the receiving object is equal to
another object.
"""
pass
def _value_equality_eq(self: _SupportsValueEquality,
other: _SupportsValueEquality) -> bool:
cls_self = self._value_equality_values_cls_()
if not isinstance(other, cls_self):
return NotImplemented
cls_other = other._value_equality_values_cls_()
if cls_self != cls_other:
return False
return self._value_equality_values_() == other._value_equality_values_()
def _value_equality_ne(self: _SupportsValueEquality,
other: _SupportsValueEquality) -> bool:
return not self == other
def _value_equality_hash(self: _SupportsValueEquality) -> int:
return hash((self._value_equality_values_cls_(),
self._value_equality_values_()))
def _value_equality_approx_eq(self: _SupportsValueEquality,
other: _SupportsValueEquality,
atol: float) -> bool:
# Preserve regular equality type-comparison logic.
cls_self = self._value_equality_values_cls_()
if not isinstance(other, cls_self):
return NotImplemented
cls_other = other._value_equality_values_cls_()
if cls_self != cls_other:
return False
# Delegate to cirq.approx_eq for approximate equality comparison.
return cirq.protocols.approx_eq(
self._value_equality_approximate_values_(),
other._value_equality_approximate_values_(),
atol=atol
)
# pylint: disable=function-redefined
@overload
def value_equality(cls: type,
*,
unhashable: bool = False,
distinct_child_types: bool = False,
approximate: bool = False
) -> type:
pass
@overload
def value_equality(*,
unhashable: bool = False,
distinct_child_types: bool = False,
approximate: bool = False
) -> Callable[[type], type]:
pass
def value_equality(cls: type = None,
*,
unhashable: bool = False,
distinct_child_types: bool = False,
approximate: bool = False
) -> Union[Callable[[type], type], type]:
"""Implements __eq__/__ne__/__hash__ via a _value_equality_values_ method.
_value_equality_values_ is a method that the decorated class must implement.
_value_equality_approximate_values_ is a method that the decorated class
might implement if special support for approximate equality is required.
This is only used when approximate argument is set. When approximate
argument is set and _value_equality_approximate_values_ is not defined,
_value_equality_values_ values are used for approximate equality.
For example, this can be used to compare periodic values like angles: the
angle value can be wrapped with `PeriodicValue`. When returned as part of
approximate values a special normalization will be done automatically to
guarantee correctness.
Note that the type of the decorated value is included as part of the value
equality values. This is so that completely separate classes with identical
equality values (e.g. a Point2D and a Vector2D) don't compare as equal.
Further note that this means that child types of the decorated type will be
considered equal to each other, though this behavior can be changed via
the 'distinct_child_types` argument. The type logic is implemented behind
the scenes by a `_value_equality_values_cls_` method added to the class.
Args:
cls: The type to decorate. Automatically passed in by python when using
the @cirq.value_equality decorator notation on a class.
unhashable: When set, the __hash__ method will be set to None instead of
to a hash of the equality class and equality values. Useful for
mutable types such as dictionaries.
distinct_child_types: When set, classes that inherit from the decorated
class will not be considered equal to it. Also, different child
classes will not be considered equal to each other. Useful for when
the decorated class is an abstract class or trait that is helping to
define equality for many conceptually distinct concrete classes.
approximate: When set, the decorated class will be enhanced with
`_approx_eq_` implementation and thus start to support the
`SupportsApproximateEquality` protocol.
"""
# If keyword arguments were specified, python invokes the decorator method
# without a `cls` argument, then passes `cls` into the result.
if cls is None:
return lambda deferred_cls: value_equality(
deferred_cls,
unhashable=unhashable,
distinct_child_types=distinct_child_types,
approximate=approximate)
values_getter = getattr(cls, '_value_equality_values_', None)
if values_getter is None:
raise TypeError('The @cirq.value_equality decorator requires a '
'_value_equality_values_ method to be defined.')
if distinct_child_types:
setattr(cls, '_value_equality_values_cls_', lambda self: type(self))
else:
setattr(cls, '_value_equality_values_cls_', lambda self: cls)
setattr(cls, '__hash__', None if unhashable else _value_equality_hash)
setattr(cls, '__eq__', _value_equality_eq)
setattr(cls, '__ne__', _value_equality_ne)
if approximate:
if not hasattr(cls, '_value_equality_approximate_values_'):
setattr(cls, '_value_equality_approximate_values_', values_getter)
setattr(cls, '_approx_eq_', _value_equality_approx_eq)
return cls
# pylint: enable=function-redefined
| 40.536232
| 80
| 0.687522
|
cc6e6b98363c8da89f9a58df0ea45636812d7df2
| 487
|
py
|
Python
|
elasticapm/instrumentation/packages/zlib.py
|
lyrixderaven/apm-agent-python
|
e21b306da70995ca1582666378b7059495ff1bee
|
[
"BSD-3-Clause"
] | 2
|
2019-02-15T20:23:39.000Z
|
2019-02-15T20:26:06.000Z
|
elasticapm/instrumentation/packages/zlib.py
|
lyrixderaven/apm-agent-python
|
e21b306da70995ca1582666378b7059495ff1bee
|
[
"BSD-3-Clause"
] | null | null | null |
elasticapm/instrumentation/packages/zlib.py
|
lyrixderaven/apm-agent-python
|
e21b306da70995ca1582666378b7059495ff1bee
|
[
"BSD-3-Clause"
] | null | null | null |
from elasticapm.instrumentation.packages.base import AbstractInstrumentedModule
from elasticapm.traces import capture_span
class ZLibInstrumentation(AbstractInstrumentedModule):
name = "zlib"
instrument_list = [("zlib", "compress"), ("zlib", "decompress")]
def call(self, module, method, wrapped, instance, args, kwargs):
wrapped_name = module + "." + method
with capture_span(wrapped_name, "compression.zlib"):
return wrapped(*args, **kwargs)
| 37.461538
| 79
| 0.714579
|
c8a6ab6dc6cce760b76de003929be3f339d3db45
| 28,527
|
py
|
Python
|
tests/unit/task/scenarios/neutron/test_network.py
|
jogeo/rally-openstack
|
83437e7c5925d5d647cd28f1821b6d51687b0123
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/task/scenarios/neutron/test_network.py
|
jogeo/rally-openstack
|
83437e7c5925d5d647cd28f1821b6d51687b0123
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/task/scenarios/neutron/test_network.py
|
jogeo/rally-openstack
|
83437e7c5925d5d647cd28f1821b6d51687b0123
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014: Intel Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import ddt
from rally import exceptions as rally_exceptions
from rally_openstack.task.scenarios.neutron import network
from tests.unit import test
BASE = "rally_openstack.task.scenarios.neutron.network"
@ddt.ddt
class NeutronNetworksTestCase(test.ScenarioTestCase):
@ddt.data(
{"network_create_args": {}},
{"network_create_args": {"name": "given-name"}},
{"network_create_args": {"provider:network_type": "vxlan"}}
)
@ddt.unpack
@mock.patch("%s.CreateAndListNetworks._list_networks" % BASE)
@mock.patch("%s.CreateAndListNetworks._create_network" % BASE)
def test_create_and_list_networks(self,
mock__create_network,
mock__list_networks,
network_create_args):
scenario = network.CreateAndListNetworks(self.context)
scenario.run(network_create_args=network_create_args)
mock__create_network.assert_called_once_with(network_create_args)
mock__list_networks.assert_called_once_with()
mock__create_network.reset_mock()
mock__list_networks.reset_mock()
@ddt.data(
{"network_create_args": {}},
{"network_create_args": {"name": "given-name"}},
)
@ddt.unpack
@mock.patch("%s.CreateAndShowNetwork._show_network" % BASE)
@mock.patch("%s.CreateAndShowNetwork._create_network" % BASE)
def test_create_and_show_network(self,
mock__create_network,
mock__show_network,
network_create_args):
scenario = network.CreateAndShowNetwork(self.context)
mock_net = mock.Mock()
mock__create_network.return_value = mock_net
scenario.run(network_create_args=network_create_args)
mock__create_network.assert_called_once_with(network_create_args)
mock__show_network.assert_called_once_with(mock_net)
mock__create_network.reset_mock()
mock__show_network.reset_mock()
@mock.patch("%s.CreateAndUpdateNetworks._update_network" % BASE)
@mock.patch("%s.CreateAndUpdateNetworks._create_network" % BASE,
return_value={
"network": {
"id": "network-id",
"name": "network-name",
"admin_state_up": False
}
})
def test_create_and_update_networks(self,
mock__create_network,
mock__update_network):
scenario = network.CreateAndUpdateNetworks(self.context)
network_update_args = {"name": "_updated", "admin_state_up": True}
# Default options
scenario.run(network_update_args=network_update_args)
mock__create_network.assert_called_once_with({})
mock__update_network.assert_has_calls(
[mock.call(
mock__create_network.return_value, network_update_args
)])
mock__create_network.reset_mock()
mock__update_network.reset_mock()
# Explicit network name is specified
network_create_args = {
"name": "network-name",
"admin_state_up": False
}
scenario.run(network_create_args=network_create_args,
network_update_args=network_update_args)
mock__create_network.assert_called_once_with(network_create_args)
mock__update_network.assert_has_calls(
[mock.call(mock__create_network.return_value,
network_update_args)])
@mock.patch("%s.CreateAndDeleteNetworks._delete_network" % BASE)
@mock.patch("%s.CreateAndDeleteNetworks._create_network" % BASE)
def test_create_and_delete_networks(self,
mock__create_network,
mock__delete_network):
scenario = network.CreateAndDeleteNetworks(self.context)
# Default options
network_create_args = {}
scenario.run()
mock__create_network.assert_called_once_with(network_create_args)
self.assertTrue(mock__delete_network.call_count)
mock__create_network.reset_mock()
mock__delete_network.reset_mock()
# Explicit network name is specified
network_create_args = {"name": "given-name"}
scenario.run(network_create_args=network_create_args)
mock__create_network.assert_called_once_with(network_create_args)
self.assertTrue(mock__delete_network.call_count)
def test_create_and_list_subnets(self):
network_create_args = {"router:external": True}
subnet_create_args = {"allocation_pools": []}
subnet_cidr_start = "default_cidr"
subnets_per_network = 5
net = mock.MagicMock()
scenario = network.CreateAndListSubnets(self.context)
scenario._create_network = mock.Mock(return_value=net)
scenario._create_subnets = mock.Mock()
scenario._list_subnets = mock.Mock()
scenario.run(network_create_args=network_create_args,
subnet_create_args=subnet_create_args,
subnet_cidr_start=subnet_cidr_start,
subnets_per_network=subnets_per_network)
scenario._create_network.assert_called_once_with(
network_create_args)
scenario._create_subnets.assert_called_once_with(
net, subnet_create_args, subnet_cidr_start, subnets_per_network)
scenario._list_subnets.assert_called_once_with()
def test_create_and_show_subnets(self):
network_create_args = {"router:external": True}
subnet_create_args = {"allocation_pools": []}
subnet_cidr_start = "1.1.0.0/30"
subnets_per_network = 5
net = mock.MagicMock()
scenario = network.CreateAndShowSubnets(self.context)
scenario._get_or_create_network = mock.Mock(return_value=net)
scenario._create_subnets = mock.MagicMock()
scenario._show_subnet = mock.Mock()
scenario.run(network_create_args=network_create_args,
subnet_create_args=subnet_create_args,
subnet_cidr_start=subnet_cidr_start,
subnets_per_network=subnets_per_network)
scenario._get_or_create_network.assert_called_once_with(
network_create_args)
scenario._create_subnets.assert_called_once_with(
net, subnet_create_args, subnet_cidr_start, subnets_per_network)
for subnet in scenario._create_subnets.return_value:
scenario._show_subnet.assert_called_with(subnet)
def test_set_and_clear_router_gateway(self):
network_create_args = {"router:external": True}
router_create_args = {"admin_state_up": True}
enable_snat = True
ext_net = mock.MagicMock()
router = mock.MagicMock()
scenario = network.SetAndClearRouterGateway(self.context)
scenario._create_network = mock.Mock(return_value=ext_net)
scenario._create_router = mock.Mock(return_value=router)
scenario._add_gateway_router = mock.Mock()
scenario._remove_gateway_router = mock.Mock()
scenario.run(enable_snat, network_create_args, router_create_args)
scenario._create_network.assert_called_once_with(
network_create_args)
scenario._create_router.assert_called_once_with(router_create_args)
scenario._add_gateway_router.assert_called_once_with(router, ext_net,
enable_snat)
scenario._remove_gateway_router.assert_called_once_with(router)
def test_create_and_update_subnets(self):
network_create_args = {"router:external": True}
subnet_create_args = {"allocation_pools": []}
subnet_update_args = {"enabled_dhcp": True}
subnet_cidr_start = "default_cidr"
subnets_per_network = 5
net = mock.MagicMock()
subnets = [mock.MagicMock() for _ in range(subnets_per_network)]
scenario = network.CreateAndUpdateSubnets(self.context)
scenario._create_network = mock.Mock(return_value=net)
scenario._create_subnets = mock.Mock(return_value=subnets)
scenario._update_subnet = mock.Mock()
scenario.run(subnet_update_args,
network_create_args=network_create_args,
subnet_create_args=subnet_create_args,
subnet_cidr_start=subnet_cidr_start,
subnets_per_network=subnets_per_network)
scenario._create_network.assert_called_once_with(
network_create_args)
scenario._create_subnets.assert_called_once_with(
net, subnet_create_args, subnet_cidr_start, subnets_per_network)
scenario._update_subnet.assert_has_calls(
[mock.call(s, subnet_update_args) for s in subnets])
def test_create_and_delete_subnets(self):
network_create_args = {"router:external": True}
subnet_create_args = {"allocation_pools": []}
subnet_cidr_start = "default_cidr"
subnets_per_network = 5
net = mock.MagicMock()
subnets = [mock.MagicMock() for _ in range(subnets_per_network)]
scenario = network.CreateAndDeleteSubnets(self.context)
scenario._get_or_create_network = mock.Mock(return_value=net)
scenario._create_subnets = mock.Mock(return_value=subnets)
scenario._delete_subnet = mock.Mock()
scenario.run(network_create_args=network_create_args,
subnet_create_args=subnet_create_args,
subnet_cidr_start=subnet_cidr_start,
subnets_per_network=subnets_per_network)
scenario._get_or_create_network.assert_called_once_with(
network_create_args)
scenario._create_subnets.assert_called_once_with(
net, subnet_create_args, subnet_cidr_start, subnets_per_network)
scenario._delete_subnet.assert_has_calls(
[mock.call(s) for s in subnets])
def test_create_and_list_routers(self):
network_create_args = {"router:external": True}
subnet_create_args = {"allocation_pools": []}
subnet_cidr_start = "default_cidr"
subnets_per_network = 5
router_create_args = {"admin_state_up": True}
scenario = network.CreateAndListRouters(self.context)
scenario._create_network_structure = mock.Mock()
scenario._list_routers = mock.Mock()
scenario.run(network_create_args=network_create_args,
subnet_create_args=subnet_create_args,
subnet_cidr_start=subnet_cidr_start,
subnets_per_network=subnets_per_network,
router_create_args=router_create_args)
scenario._create_network_structure.assert_called_once_with(
network_create_args, subnet_create_args, subnet_cidr_start,
subnets_per_network, router_create_args)
scenario._list_routers.assert_called_once_with()
def test_list_agents(self):
agent_args = {
"F": "id",
"sort-dir": "asc"
}
scenario = network.ListAgents(self.context)
scenario._list_agents = mock.Mock()
scenario.run(agent_args=agent_args)
scenario._list_agents.assert_called_once_with(**agent_args)
def test_create_and_update_routers(self):
router_update_args = {"admin_state_up": False}
network_create_args = {"router:external": True}
subnet_create_args = {"allocation_pools": []}
subnet_cidr_start = "default_cidr"
subnets_per_network = 5
router_create_args = {"admin_state_up": True}
net = mock.MagicMock()
subnets = [mock.MagicMock() for i in range(subnets_per_network)]
routers = [mock.MagicMock() for i in range(subnets_per_network)]
scenario = network.CreateAndUpdateRouters(self.context)
scenario._create_network_structure = mock.Mock(
return_value=(net, subnets, routers))
scenario._update_router = mock.Mock()
scenario.run(router_update_args,
network_create_args=network_create_args,
subnet_create_args=subnet_create_args,
subnet_cidr_start=subnet_cidr_start,
subnets_per_network=subnets_per_network,
router_create_args=router_create_args)
scenario._create_network_structure.assert_called_once_with(
network_create_args, subnet_create_args, subnet_cidr_start,
subnets_per_network, router_create_args)
update_calls = [mock.call(router, router_update_args)
for router in routers]
scenario._update_router.assert_has_calls(update_calls)
def test_create_and_delete_routers(self):
network_create_args = {"router:external": True}
subnet_create_args = {"allocation_pools": []}
subnet_cidr_start = "default_cidr"
subnets_per_network = 5
router_create_args = {"admin_state_up": True}
net = mock.MagicMock()
subnets = [mock.MagicMock() for i in range(subnets_per_network)]
routers = [mock.MagicMock() for i in range(subnets_per_network)]
scenario = network.CreateAndDeleteRouters(self.context)
scenario._create_network_structure = mock.Mock(
return_value=(net, subnets, routers))
scenario._remove_interface_router = mock.Mock()
scenario._delete_router = mock.Mock()
scenario.run(network_create_args=network_create_args,
subnet_create_args=subnet_create_args,
subnet_cidr_start=subnet_cidr_start,
subnets_per_network=subnets_per_network,
router_create_args=router_create_args)
scenario._create_network_structure.assert_called_once_with(
network_create_args, subnet_create_args, subnet_cidr_start,
subnets_per_network, router_create_args)
scenario._remove_interface_router.assert_has_calls([
mock.call(subnets[i]["subnet"], routers[i]["router"])
for i in range(subnets_per_network)])
scenario._delete_router.assert_has_calls(
[mock.call(router) for router in routers])
def test_create_and_show_routers(self):
network_create_args = {"router:external": True}
subnet_create_args = {"allocation_pools": []}
subnet_cidr_start = "default_cidr"
subnets_per_network = 5
router_create_args = {"admin_state_up": True}
net = mock.MagicMock()
subnets = [mock.MagicMock() for i in range(subnets_per_network)]
routers = [mock.MagicMock() for i in range(subnets_per_network)]
scenario = network.CreateAndShowRouters(self.context)
scenario._create_network_structure = mock.Mock(
return_value=(net, subnets, routers))
scenario._show_router = mock.Mock()
scenario.run(network_create_args=network_create_args,
subnet_create_args=subnet_create_args,
subnet_cidr_start=subnet_cidr_start,
subnets_per_network=subnets_per_network,
router_create_args=router_create_args)
scenario._create_network_structure.assert_called_once_with(
network_create_args, subnet_create_args, subnet_cidr_start,
subnets_per_network, router_create_args)
scenario._show_router.assert_has_calls(
[mock.call(router) for router in routers])
def test_create_and_list_ports(self):
port_create_args = {"allocation_pools": []}
ports_per_network = 10
network_create_args = {"router:external": True}
net = mock.MagicMock()
scenario = network.CreateAndListPorts(self.context)
scenario._get_or_create_network = mock.Mock(return_value=net)
scenario._create_port = mock.MagicMock()
scenario._list_ports = mock.Mock()
scenario.run(network_create_args=network_create_args,
port_create_args=port_create_args,
ports_per_network=ports_per_network)
scenario._get_or_create_network.assert_called_once_with(
network_create_args)
scenario._create_port.assert_has_calls(
[mock.call(net, port_create_args)
for _ in range(ports_per_network)])
scenario._list_ports.assert_called_once_with()
def test_create_and_update_ports(self):
port_update_args = {"admin_state_up": False},
port_create_args = {"allocation_pools": []}
ports_per_network = 10
network_create_args = {"router:external": True}
net = mock.MagicMock()
ports = [mock.MagicMock() for _ in range(ports_per_network)]
scenario = network.CreateAndUpdatePorts(self.context)
scenario._get_or_create_network = mock.Mock(return_value=net)
scenario._create_port = mock.Mock(side_effect=ports)
scenario._update_port = mock.Mock()
scenario.run(port_update_args,
network_create_args=network_create_args,
port_create_args=port_create_args,
ports_per_network=ports_per_network)
scenario._get_or_create_network.assert_called_once_with(
network_create_args)
scenario._create_port.assert_has_calls(
[mock.call(net, port_create_args)
for _ in range(ports_per_network)])
scenario._update_port.assert_has_calls(
[mock.call(p, port_update_args) for p in ports])
def test_create_and_bind_ports(self):
ports_per_network = 2
ports = [mock.Mock() for _ in range(ports_per_network)]
port_update_args = {
"device_owner": "compute:nova",
"device_id": "ba805478-85ff-11e9-a2e4-2b8dea218fc8",
"binding:host_id": "fake-host",
}
context = {
"tenant": {"id": "fake-tenant-id"},
"tenants": {
"fake-tenant-id": {
"networks": [
mock.Mock()
],
},
},
"networking_agents": [{
"host": "fake-host",
"alive": True,
"admin_state_up": True,
"agent_type": "Open vSwitch agent",
}],
}
scenario = network.CreateAndBindPorts(context)
scenario._create_network = mock.Mock()
scenario._create_subnet = mock.Mock()
scenario._create_port = mock.Mock(
side_effect=ports)
scenario._update_port = mock.Mock()
scenario.run(
ports_per_network=ports_per_network)
scenario._update_port.assert_has_calls(
[mock.call(p, port_update_args=port_update_args) for p in ports])
def test_create_and_show_ports_positive(self):
port_create_args = {"allocation_pools": []}
ports_per_network = 1
network_create_args = {"router:external": True}
net = mock.MagicMock()
scenario = network.CreateAndShowPorts(self.context)
scenario._get_or_create_network = mock.MagicMock(return_value=net)
scenario._create_port = mock.MagicMock()
scenario._show_port = mock.MagicMock()
port = {"port": {"id": 1, "name": "f"}}
port_info = {"port": {"id": 1, "name": "f", "status": "ACTIVE"}}
scenario._show_port.return_value = port_info
# Positive case:
scenario._create_port.return_value = port
scenario.run(network_create_args=network_create_args,
port_create_args=port_create_args,
ports_per_network=ports_per_network)
scenario._get_or_create_network.assert_called_once_with(
network_create_args)
scenario._create_port.assert_called_with(net, port_create_args)
scenario._show_port.assert_called_with(port)
def test_create_and_show_ports_negative(self):
port_create_args = {"allocation_pools": []}
ports_per_network = 1
network_create_args = {"router:external": True}
net = mock.MagicMock()
scenario = network.CreateAndShowPorts(self.context)
scenario._get_or_create_network = mock.MagicMock(return_value=net)
scenario._create_port = mock.MagicMock()
scenario._show_port = mock.MagicMock()
# Negative case1: port isn't created
scenario._create_port.return_value = None
self.assertRaises(rally_exceptions.RallyAssertionError,
scenario.run,
network_create_args,
port_create_args,
ports_per_network)
scenario._get_or_create_network.assert_called_once_with(
network_create_args)
scenario._create_port.assert_called_once_with(net, port_create_args)
# Negative case2: port isn't show
port = {"port": {"id": 1, "name": "f1"}}
port_info = {"port": {"id": 2, "name": "f2", "status": "ACTIVE"}}
scenario._show_port.return_value = port_info
scenario._create_port.return_value = port
self.assertRaises(rally_exceptions.RallyAssertionError,
scenario.run,
network_create_args,
port_create_args,
ports_per_network)
scenario._get_or_create_network.assert_called_with(
network_create_args)
scenario._create_port.assert_called_with(net, port_create_args)
scenario._show_port.assert_called_with(port)
def test_create_and_delete_ports(self):
port_create_args = {"allocation_pools": []}
ports_per_network = 10
network_create_args = {"router:external": True}
net = mock.MagicMock()
ports = [mock.MagicMock() for _ in range(ports_per_network)]
scenario = network.CreateAndDeletePorts(self.context)
scenario._get_or_create_network = mock.Mock(return_value=net)
scenario._create_port = mock.Mock(side_effect=ports)
scenario._delete_port = mock.Mock()
scenario.run(network_create_args=network_create_args,
port_create_args=port_create_args,
ports_per_network=ports_per_network)
scenario._get_or_create_network.assert_called_once_with(
network_create_args)
scenario._create_port.assert_has_calls(
[mock.call(net, port_create_args)
for _ in range(ports_per_network)])
scenario._delete_port.assert_has_calls(
[mock.call(p) for p in ports])
@ddt.data(
{"floating_network": "ext-net"},
{"floating_network": "ext-net",
"floating_ip_args": {"floating_ip_address": "1.1.1.1"}},
)
@ddt.unpack
def test_create_and_list_floating_ips(self, floating_network=None,
floating_ip_args=None):
scenario = network.CreateAndListFloatingIps(self.context)
floating_ip_args = floating_ip_args or {}
scenario._create_floatingip = mock.Mock()
scenario._list_floating_ips = mock.Mock()
scenario.run(floating_network=floating_network,
floating_ip_args=floating_ip_args)
scenario._create_floatingip.assert_called_once_with(
floating_network, **floating_ip_args)
scenario._list_floating_ips.assert_called_once_with()
@ddt.data(
{"floating_network": "ext-net"},
{"floating_network": "ext-net",
"floating_ip_args": {"floating_ip_address": "1.1.1.1"}},
)
@ddt.unpack
def test_create_and_delete_floating_ips(self, floating_network=None,
floating_ip_args=None):
scenario = network.CreateAndDeleteFloatingIps(self.context)
floating_ip_args = floating_ip_args or {}
fip = {"floatingip": {"id": "floating-ip-id"}}
scenario._create_floatingip = mock.Mock(return_value=fip)
scenario._delete_floating_ip = mock.Mock()
scenario.run(floating_network=floating_network,
floating_ip_args=floating_ip_args)
scenario._create_floatingip.assert_called_once_with(
floating_network, **floating_ip_args)
scenario._delete_floating_ip.assert_called_once_with(
scenario._create_floatingip.return_value["floatingip"])
def test_associate_and_dissociate_floating_ips(self):
scenario = network.AssociateAndDissociateFloatingIps(self.context)
fip = {"floatingip": {"id": "floating-ip-id"}}
subnet = {"subnet": {}}
port = {"port": {"id": "port-id"}}
router = {"router": {}}
scenario._create_floatingip = mock.Mock(return_value=fip)
scenario._create_network = mock.Mock()
scenario._create_subnet = mock.Mock(return_value=subnet)
scenario._create_port = mock.Mock(return_value=port)
scenario._create_router = mock.Mock(return_value=router)
scenario._get_network_id = mock.Mock()
scenario._add_gateway_router = mock.Mock()
scenario._add_interface_router = mock.Mock()
scenario._associate_floating_ip = mock.Mock()
scenario._dissociate_floating_ip = mock.Mock()
scenario.run(floating_network="public")
scenario._associate_floating_ip.assert_called_once_with(
floatingip=fip["floatingip"], port=port["port"])
scenario._dissociate_floating_ip.assert_called_once_with(
floatingip=fip["floatingip"])
@mock.patch("%s.DeleteSubnets._delete_subnet" % BASE)
def test_delete_subnets(self, mock__delete_subnet):
# do not guess what user will be used
self.context["user_choice_method"] = "round_robin"
# if it is the 4th iteration, the second user from the second tenant
# should be taken, which means that the second subnets from each
# tenant network should be removed.
self.context["iteration"] = 4
# in case of `round_robin` the user will be selected from the list of
# available users of particular tenant, not from the list of all
# tenants (i.e random choice). BUT to trigger selecting user and
# tenant `users` key should present in context dict
self.context["users"] = []
self.context["tenants"] = {
# this should not be used
"uuid-1": {
"id": "uuid-1",
"networks": [{"subnets": ["subnet-1"]}],
"users": [{"id": "user-1", "credential": mock.MagicMock()},
{"id": "user-2", "credential": mock.MagicMock()}]
},
# this is expected user
"uuid-2": {
"id": "uuid-2",
"networks": [
{"subnets": ["subnet-2", "subnet-3"]},
{"subnets": ["subnet-4", "subnet-5"]}],
"users": [{"id": "user-3", "credential": mock.MagicMock()},
{"id": "user-4", "credential": mock.MagicMock()}]
}
}
scenario = network.DeleteSubnets(self.context)
self.assertEqual("user-4", scenario.context["user"]["id"],
"Unexpected user is taken. The wrong subnets can be "
"affected(removed).")
scenario.run()
self.assertEqual(
[
mock.call({"subnet": {"id": "subnet-3"}}),
mock.call({"subnet": {"id": "subnet-5"}})
],
mock__delete_subnet.call_args_list)
| 42.833333
| 78
| 0.645879
|
edef1bc001a89b12b0cdfc92c2f11923c0c862bf
| 1,426
|
py
|
Python
|
model-optimizer/extensions/ops/Enter.py
|
fujunwei/dldt
|
09497b7724de4be92629f7799b8538b483d809a2
|
[
"Apache-2.0"
] | 1
|
2021-07-30T17:03:50.000Z
|
2021-07-30T17:03:50.000Z
|
model-optimizer/extensions/ops/Enter.py
|
fujunwei/dldt
|
09497b7724de4be92629f7799b8538b483d809a2
|
[
"Apache-2.0"
] | null | null | null |
model-optimizer/extensions/ops/Enter.py
|
fujunwei/dldt
|
09497b7724de4be92629f7799b8538b483d809a2
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import networkx as nx
import numpy as np
from mo.graph.graph import Node, Graph
from mo.ops.op import Op
from mo.utils.error import Error
class Enter(Op):
op = "Enter"
def __init__(self, graph: Graph, attrs: dict):
mandatory_props = {
'type': __class__.op,
'op': __class__.op,
'in_ports_count': 1,
'infer': Enter.enter_infer,
}
super().__init__(graph, mandatory_props, attrs)
@staticmethod
def enter_infer(node: Node):
output_shape = node.in_node(0).shape
output_value = node.in_node(0).value
for _, out_node in node.graph.out_edges(node.id):
node.graph.node[out_node]['shape'] = np.array(output_shape)
node.graph.node[out_node]['value'] = None if output_value is None else np.array(output_value)
| 31.688889
| 105
| 0.685835
|
07e070e48b5f398342da7cb13c9421968e6c43f0
| 3,269
|
py
|
Python
|
openprocurement/auctions/geb/tests/base.py
|
andrey484/openprocurement.auctions.geb
|
05c9ea3db1b1d290521b1430286ff2e5064819cd
|
[
"Apache-2.0"
] | null | null | null |
openprocurement/auctions/geb/tests/base.py
|
andrey484/openprocurement.auctions.geb
|
05c9ea3db1b1d290521b1430286ff2e5064819cd
|
[
"Apache-2.0"
] | null | null | null |
openprocurement/auctions/geb/tests/base.py
|
andrey484/openprocurement.auctions.geb
|
05c9ea3db1b1d290521b1430286ff2e5064819cd
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from StringIO import StringIO
import json
import os
from openprocurement.auctions.core.tests.base import (
BaseWebTest as CoreBaseWebTest,
)
from openprocurement.auctions.core.tests.base import MOCK_CONFIG as BASE_MOCK_CONFIG
from openprocurement.auctions.core.utils import connection_mock_config
from openprocurement.auctions.geb.tests.constants import (
PARTIAL_MOCK_CONFIG
)
MOCK_CONFIG = connection_mock_config(PARTIAL_MOCK_CONFIG,
base=BASE_MOCK_CONFIG,
connector=('plugins', 'api', 'plugins',
'auctions.core', 'plugins'))
class BaseWebTest(CoreBaseWebTest):
"""
Base Web Test to test openprocurement.auctions.geb.
It setups the database before each test and delete it after.
"""
relative_to = os.path.dirname(__file__)
mock_config = MOCK_CONFIG
class BaseWebDocsTest(BaseWebTest):
"""
Base Web Docs Test to dump test results to files
"""
def setUp(self):
super(BaseWebDocsTest, self).setUp()
if not os.environ.get('DOCSTEST'):
self.skipTest('not docs test')
def construct_request(self, request):
buff = StringIO()
lines = []
url = request.url.split(request.host_url)[-1]
format_url = '{} {}'.format(request.method, url)
lines.append(format_url)
auth = request.authorization
format_auth = 'Authorization: {} {}'.format(auth[0], auth[1])
lines.append(format_auth)
if request.content_type:
format_content_type = 'Content-Type: {}'.format(request.content_type)
lines.append(format_content_type)
if request.body:
format_body = json.dumps(json.loads(request.body), indent=2, ensure_ascii=False).encode('utf8')
content = '\n'.join(lines)
buff.write(content)
if request.body:
buff.write('\n\n')
buff.write(format_body)
return buff.getvalue()
def construct_response(self, response, request):
buff = StringIO()
lines = []
format_status = '{}'.format(response.status)
lines.append(format_status)
format_content_type = 'Content-Type: {}'.format(response.content_type)
lines.append(format_content_type)
if response.location:
location = response.location.split(request.host_url)[-1]
format_location = 'Location: {}'.format(location)
lines.append(format_location)
format_body = json.dumps(json.loads(response.body), indent=2, ensure_ascii=False).encode('utf8')
content = '\n'.join(lines)
buff.write(content)
buff.write('\n\n')
buff.write(format_body)
return buff.getvalue()
def dump_to_file(self, request, response, filename):
with open(filename, 'w') as fd:
fd.write(request)
fd.write('\n\n\n')
fd.write(response)
def dump(self, request, response, filename):
format_request = self.construct_request(request)
format_response = self.construct_response(response, request)
self.dump_to_file(format_request, format_response, filename)
| 30.839623
| 107
| 0.631386
|
0f753579159f1b6ea4b7987573167c66c3f5bae7
| 1,183
|
py
|
Python
|
names/database.py
|
PuchatekwSzortach/names_database_builder
|
bdfdd293abb7889bf2f63ce1e9e278b0fe567bcb
|
[
"MIT"
] | 1
|
2021-07-28T05:14:14.000Z
|
2021-07-28T05:14:14.000Z
|
names/database.py
|
PuchatekwSzortach/names_database_builder
|
bdfdd293abb7889bf2f63ce1e9e278b0fe567bcb
|
[
"MIT"
] | null | null | null |
names/database.py
|
PuchatekwSzortach/names_database_builder
|
bdfdd293abb7889bf2f63ce1e9e278b0fe567bcb
|
[
"MIT"
] | null | null | null |
import sqlalchemy
import sqlalchemy.ext.declarative
import sqlalchemy.orm
import sqlalchemy.event
def enforce_foreign_key_constraint(connection, record):
connection.execute('pragma foreign_keys=ON')
engine = sqlalchemy.create_engine('sqlite:///names.db', echo=False)
sqlalchemy.event.listen(engine, 'connect', enforce_foreign_key_constraint)
Base = sqlalchemy.ext.declarative.declarative_base()
class Gender(Base):
__tablename__ = 'genders'
gender = sqlalchemy.Column(sqlalchemy.Unicode, primary_key=True)
def __repr__(self):
return self.gender
class Name(Base):
__tablename__ = 'names'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
hiragana = sqlalchemy.Column(sqlalchemy.Unicode, nullable=False)
kanji = sqlalchemy.Column(sqlalchemy.Unicode, nullable=False)
gender = sqlalchemy.Column(
sqlalchemy.Unicode, sqlalchemy.ForeignKey('genders.gender'), nullable=False)
def __init__(self, japanese_name):
self.hiragana = japanese_name.hiragana
self.kanji = japanese_name.kanji
self.gender = japanese_name.gender
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
| 26.288889
| 84
| 0.754861
|
9fb1c22d750c42d4e739d4deef45c0e8ce9f3496
| 375
|
py
|
Python
|
setup.py
|
sonhal/homemade-crypto-library
|
bb385d786d22f7adccfe1b84fa089e2d3848e16f
|
[
"MIT"
] | null | null | null |
setup.py
|
sonhal/homemade-crypto-library
|
bb385d786d22f7adccfe1b84fa089e2d3848e16f
|
[
"MIT"
] | null | null | null |
setup.py
|
sonhal/homemade-crypto-library
|
bb385d786d22f7adccfe1b84fa089e2d3848e16f
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name='homemade_crypto',
version='0.0.1',
packages=find_packages(where="src"),
package_dir={"": "src"},
url='https://github.com/sonhal/homemade-crypto-library',
license='MIT',
author='sondre',
author_email='',
description='A homemade cryptography library made for educational purposes'
)
| 26.785714
| 79
| 0.688
|
add194a3e774db45b161873a435654c7910496e4
| 288
|
py
|
Python
|
apps/test_readfile/readTest.py
|
mjysh/smarties
|
e739680fc3195ca4107b64f773158fbec42753fb
|
[
"MIT"
] | null | null | null |
apps/test_readfile/readTest.py
|
mjysh/smarties
|
e739680fc3195ca4107b64f773158fbec42753fb
|
[
"MIT"
] | null | null | null |
apps/test_readfile/readTest.py
|
mjysh/smarties
|
e739680fc3195ca4107b64f773158fbec42753fb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 9 15:47:00 2021
@author: yusheng
"""
import sys
import numpy as np
import matplotlib.pyplot as plt
FILE = "./train/restarted_agent_00_net_"
ftype = np.float32
W = np.fromfile(FILE +"weights.raw", dtype=ftype)
| 20.571429
| 56
| 0.684028
|
23b5e23af7194c7b2d15d72e0bc7cd29cc3afd0c
| 4,839
|
py
|
Python
|
utils/FinancesUtil.py
|
klpdotorg/disereports
|
5d6290dbd684c4bb4093562c4882f21b818ec81f
|
[
"MIT"
] | null | null | null |
utils/FinancesUtil.py
|
klpdotorg/disereports
|
5d6290dbd684c4bb4093562c4882f21b818ec81f
|
[
"MIT"
] | null | null | null |
utils/FinancesUtil.py
|
klpdotorg/disereports
|
5d6290dbd684c4bb4093562c4882f21b818ec81f
|
[
"MIT"
] | null | null | null |
import csv
import traceback
import codecs
import sys, os,traceback
def getFinancesText(data,lang,constype):
transDict = {}
f = codecs.open(os.path.join(os.getcwd(),'translations/fin_translations_text.csv'),'r','utf-8')
for line in f.readlines():
text = line.split('|')
if lang == 2:
transDict[str(text[0])] = text[1]
else:
transDict[str(text[0])] = text[2]
if constype == 4:
transDict['1']=transDict['32']
transDict['19']=transDict['35']
transDict['24']=transDict['38']
transDict['11']=transDict['41']
elif constype == 5:
transDict['1']=transDict['33']
transDict['19']=transDict['36']
transDict['24']=transDict['39']
transDict['11']=transDict['42']
elif constype == 6:
transDict['1']=transDict['34']
transDict['19']=transDict['37']
transDict['24']=transDict['40']
transDict['11']=transDict['43']
intro_txt_str=transDict['1'] + transDict['18']
summary_txt_str=transDict['17']
annual_txt_str=transDict['17']
mtnc_txt_str=transDict['17']
tlm_txt_str=transDict['17']
neighbours_txt_str=transDict['17']
#School Text
if int(data["inst_counts"]["abs_schcount"]) > 0:
data['intro_txt'] = intro_txt_str
#--------------------- SUMMARY
total_grant = data["total_tlm"] + data["total_annual"] + data["total_mntc"]
summary_txt_str = transDict['24'] + formatIndian(total_grant) + '. ' + transDict['25'] + formatIndian(data["total_tlm"]) + '. '
summary_txt_str = summary_txt_str + transDict['26'] + formatIndian(data["total_annual"]) + '. ' + transDict['27'] + formatIndian(data["total_mntc"]) + '. '
data['summary_txt'] = summary_txt_str
#---------------------- TLM
tlm_txt_str = transDict['19'] + str(data['tlmgrant_sch']['teacher_count']) + transDict['20'] + formatIndian(data["total_tlm"]) + transDict['8'] + '. '
data['tlm_txt'] = tlm_txt_str
#---------------------- Annual
annual_txt_str = transDict['23']
totalsg = 0
for key in data["annualgrant_sch"]:
annual_txt_str = ' ' + annual_txt_str + transDict[key.upper()] + formatIndian(data["annualgrant_sch"][key][1]) + ', '
totalsg = totalsg + int(data["annualgrant_sch"][key][0])
annual_txt_str = annual_txt_str.rstrip(', ') + transDict['8'] + '. '
annual_txt_str = annual_txt_str + transDict['30'] + str(totalsg) + transDict['31']
data['annual_txt'] = annual_txt_str
#---------------------- Maintenance
mtnc_txt_str = transDict['21'] + formatIndian(data["mtncgrant_sch"].get("With 3 classrooms or fewer ",['0','0'])[1])
mtnc_txt_str = mtnc_txt_str + transDict['22'] + formatIndian(data["mtncgrant_sch"].get("With more than 3 classrooms ",['0','0'])[1]) + transDict['8'] + '. '
mtnc_txt_str = mtnc_txt_str + transDict['28'] + str(int(data["mtncgrant_sch"].get("With 3 classrooms or fewer ",['0','0'])[0]) + int(data["mtncgrant_sch"].get("With more than 3 classrooms ",['0','0'])[0]))
mtnc_txt_str = mtnc_txt_str +'/'+str(totalsg)+ transDict['29']
data['mtnc_txt'] = mtnc_txt_str
#---------------------- Neighbours
neighbours = None
if "neighbours_grant" in data.keys():
neighbours = data["neighbours_grant"].keys()
if neighbours:
neighbours.remove(data['const_name'])
neighbours_txt_str = ' ' + str(data['const_name']) + ' '
neighbours_txt_str = neighbours_txt_str + transDict['11']
neighbours_txt_str = neighbours_txt_str + ', '.join([str(x) for x in neighbours]) + '. '
neighbours_txt_str = neighbours_txt_str + transDict['12']
data['neighbours_txt'] = neighbours_txt_str + transDict['15']
data['source_txt'] = transDict['16']
return data
def formatIndian(inputNum) :
prefStr = ''
outputString = ''
minus = ''
suf = ''
lastThree = ''
try:
inputString = str(inputNum)
if '.' in inputString:
numberArray = inputString.split('.', 2)
pref = int(numberArray[0])
suf = numberArray[1]
else:
pref = inputString
suf = ''
outputString = ''
minus = ''
if pref < 0:
minus = '-'
prefStr = str(pref)
if len(prefStr) > 3 :
lastThree = prefStr[len(prefStr)-3: len(prefStr)]
prefStr = prefStr[0: len(prefStr)-3]
if len(prefStr) % 2 > 0 :
outputString = outputString + prefStr[0:1] + ','
prefStr = prefStr[1: len(prefStr)]
while (len(prefStr) >= 2) :
outputString = outputString + prefStr[0:2] + ','
prefStr = prefStr[2:len(prefStr)]
outputString = minus + outputString + lastThree + suf
return outputString
except:
print 'Error occurred'
print "Unexpected error:", sys.exc_info()
traceback.print_exc(file=sys.stdout)
return 'NaN'
| 41.358974
| 211
| 0.600537
|
5d21b53988815315ed39adc30c9e83efb6a7ab12
| 6,877
|
py
|
Python
|
src/third_party/wiredtiger/lang/python/wiredtiger/packing.py
|
EdwardPrentice/wrongo
|
1e7c9136f5fab7040b5bd5df51b4946876625c88
|
[
"Apache-2.0"
] | 25
|
2016-12-07T09:39:51.000Z
|
2021-12-16T11:17:37.000Z
|
src/third_party/wiredtiger/lang/python/wiredtiger/packing.py
|
EdwardPrentice/wrongo
|
1e7c9136f5fab7040b5bd5df51b4946876625c88
|
[
"Apache-2.0"
] | 1
|
2022-03-05T02:55:28.000Z
|
2022-03-05T05:28:00.000Z
|
src/third_party/wiredtiger/lang/python/wiredtiger/packing.py
|
EdwardPrentice/wrongo
|
1e7c9136f5fab7040b5bd5df51b4946876625c88
|
[
"Apache-2.0"
] | 23
|
2017-01-22T03:35:26.000Z
|
2021-12-16T11:17:39.000Z
|
#!/usr/bin/env python
#
# Public Domain 2014-2016 MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# WiredTiger variable-length packing and unpacking functions
"""Packing and unpacking functions
The format string uses the following conversions:
Format Python Notes
x N/A pad byte, no associated value
b int signed byte
B int unsigned byte
h int signed 16-bit
H int unsigned 16-bit
i int signed 32-bit
I int unsigned 32-bit
l int signed 32-bit
L int unsigned 32-bit
q int signed 64-bit
Q int unsigned 64-bit
r int record number
s str fixed-length string
S str NUL-terminated string
t int fixed-length bit field
u str raw byte array
"""
from intpacking import pack_int, unpack_int
def __get_type(fmt):
if not fmt:
return None, fmt
# Variable-sized encoding is the default (and only supported format in v1)
if fmt[0] in '.@<>':
tfmt = fmt[0]
fmt = fmt[1:]
else:
tfmt = '.'
return tfmt, fmt
def __unpack_iter_fmt(fmt):
size = 0
havesize = 0
for offset, char in enumerate(fmt):
if char.isdigit():
size = (size * 10) + int(char)
havesize = 1
else:
if not havesize:
size = 1
yield offset, havesize, size, char
size = 0
havesize = 0
def unpack(fmt, s):
tfmt, fmt = __get_type(fmt)
if not fmt:
return ()
if tfmt != '.':
raise ValueError('Only variable-length encoding is currently supported')
result = []
for offset, havesize, size, f in __unpack_iter_fmt(fmt):
if f == 'x':
s = s[size:]
# Note: no value, don't increment i
elif f in 'SsUu':
if not havesize:
if f == 's':
pass
elif f == 'S':
size = s.find('\0')
elif f == 'u' and offset == len(fmt) - 1:
size = len(s)
else:
# Note: 'U' is used internally, and may be exposed to us.
# It indicates that the size is always stored unless there
# is a size in the format.
size, s = unpack_int(s)
result.append(s[:size])
if f == 'S' and not havesize:
size += 1
s = s[size:]
elif f in 't':
# bit type, size is number of bits
result.append(ord(s[0:1]))
s = s[1:]
elif f in 'Bb':
# byte type
for i in xrange(size):
v = ord(s[0:1])
if f != 'B':
v -= 0x80
result.append(v)
s = s[1:]
else:
# integral type
for j in xrange(size):
v, s = unpack_int(s)
result.append(v)
return result
def __pack_iter_fmt(fmt, values):
index = 0
for offset, havesize, size, char in __unpack_iter_fmt(fmt):
if char == 'x': # padding no value
yield offset, havesize, size, char, None
elif char in 'SsUut':
yield offset, havesize, size, char, values[index]
index += 1
else: # integral type
size = size if havesize else 1
for i in xrange(size):
value = values[index]
yield offset, havesize, 1, char, value
index = index + 1
def pack(fmt, *values):
tfmt, fmt = __get_type(fmt)
if not fmt:
return ()
if tfmt != '.':
raise ValueError('Only variable-length encoding is currently supported')
result = ''
i = 0
for offset, havesize, size, f, val in __pack_iter_fmt(fmt, values):
if f == 'x':
if not havesize:
result += '\0'
else:
result += '\0' * size
# Note: no value, don't increment i
elif f in 'SsUu':
if f == 'S' and '\0' in val:
l = val.find('\0')
else:
l = len(val)
if havesize or f == 's':
if l > size:
l = size
elif (f == 'u' and offset != len(fmt) - 1) or f == 'U':
result += pack_int(l)
if type(val) is unicode and f in 'Ss':
result += str(val[:l])
else:
result += val[:l]
if f == 'S' and not havesize:
result += '\0'
elif size > l:
result += '\0' * (size - l)
elif f in 't':
# bit type, size is number of bits
if not havesize:
size = 1
if size > 8:
raise ValueError("bit count cannot be greater than 8 for 't' encoding")
mask = (1 << size) - 1
if (mask & val) != val:
raise ValueError("value out of range for 't' encoding")
result += chr(val)
elif f in 'Bb':
# byte type
if not havesize:
size = 1
for i in xrange(size):
if f == 'B':
v = val
else:
# Translate to maintain ordering with the sign bit.
v = val + 0x80
if v > 255 or v < 0:
raise ValueError("value out of range for 'B' encoding")
result += chr(v)
else:
# integral type
result += pack_int(val)
return result
| 34.21393
| 87
| 0.518685
|
bf67d417114fa5ceceb01f63c396afd82dc19a10
| 2,460
|
py
|
Python
|
userbot/modules/rastick.py
|
Daylight23/oub-remix
|
3a85cbc956af0bd17bb18f8712df4fea25125203
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/rastick.py
|
Daylight23/oub-remix
|
3a85cbc956af0bd17bb18f8712df4fea25125203
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/rastick.py
|
Daylight23/oub-remix
|
3a85cbc956af0bd17bb18f8712df4fea25125203
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2020-09-08T16:22:51.000Z
|
2020-09-08T16:22:51.000Z
|
import random
import re
from userbot import CMD_HELP, bot
from userbot.events import register
from asyncio import sleep
EMOJI_PATTERN = re.compile(
"["
"\U0001F1E0-\U0001F1FF" # flags (iOS)
"\U0001F300-\U0001F5FF" # symbols & pictographs
"\U0001F600-\U0001F64F" # emoticons
"\U0001F680-\U0001F6FF" # transport & map symbols
"\U0001F700-\U0001F77F" # alchemical symbols
"\U0001F780-\U0001F7FF" # Geometric Shapes Extended
"\U0001F800-\U0001F8FF" # Supplemental Arrows-C
"\U0001F900-\U0001F9FF" # Supplemental Symbols and Pictographs
"\U0001FA00-\U0001FA6F" # Chess Symbols
"\U0001FA70-\U0001FAFF" # Symbols and Pictographs Extended-A
"\U00002702-\U000027B0" # Dingbats
"]+"
)
def deEmojify(inputString: str) -> str:
return re.sub(EMOJI_PATTERN, "", inputString)
@register(outgoing=True, pattern=r"^\.rst(?: |$)(.*)")
async def rastick(animu):
text = animu.pattern_match.group(1)
if not text:
if animu.is_reply:
text = (await animu.get_reply_message()).message
else:
await animu.answer("`No text given, hence no stickers.`")
return
animus = [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
]
sticcers = await bot.inline_query(
"stickerizerbot", f"#{random.choice(animus)}{(deEmojify(text))}"
)
try:
await sticcers[0].click(
animu.chat_id,
reply_to=animu.reply_to_msg_id,
silent=True if animu.is_reply else False,
hide_via=True,
)
except Exception:
return await animu.edit(
"`You cannot send inline results in this chat (caused by SendInlineBotResultRequest)`"
)
await sleep(5)
await animu.delete()
| 20.847458
| 98
| 0.492276
|
25f2640ec6acd08c0f8453a9351ee554cc35f058
| 3,162
|
py
|
Python
|
tensorflow_federated/python/research/optimization/emnist_ae/dataset.py
|
matech96/federated
|
b30a26d66162bd02a89a12f119e17925d161a26b
|
[
"Apache-2.0"
] | 1
|
2020-06-22T03:08:16.000Z
|
2020-06-22T03:08:16.000Z
|
tensorflow_federated/python/research/optimization/emnist_ae/dataset.py
|
matech96/federated
|
b30a26d66162bd02a89a12f119e17925d161a26b
|
[
"Apache-2.0"
] | 7
|
2020-04-03T05:32:28.000Z
|
2020-05-15T01:28:25.000Z
|
tensorflow_federated/python/research/optimization/emnist_ae/dataset.py
|
matech96/federated
|
b30a26d66162bd02a89a12f119e17925d161a26b
|
[
"Apache-2.0"
] | 2
|
2020-04-28T17:46:13.000Z
|
2022-02-10T02:40:40.000Z
|
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for loading EMNIST training and testing data."""
import tensorflow as tf
import tensorflow_federated as tff
EMNIST_TRAIN_DIGITS_ONLY_SIZE = 341873
EMNIST_TRAIN_FULL_SIZE = 671585
TEST_BATCH_SIZE = 500
MAX_CLIENT_DATASET_SIZE = 418
def reshape_emnist_element(element):
x = 1 - tf.reshape(element['pixels'], (-1, 28 * 28))
return (x, x)
def get_emnist_datasets(client_batch_size,
client_epochs_per_round,
only_digits=False):
"""Loads and preprocesses EMNIST training and testing sets."""
emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data(
only_digits=only_digits)
def preprocess_train_dataset(dataset):
"""Preprocess EMNIST training dataset."""
return (dataset
# Shuffle according to the largest client dataset
.shuffle(buffer_size=MAX_CLIENT_DATASET_SIZE)
# Repeat to do multiple local epochs
.repeat(client_epochs_per_round)
# Batch to a fixed client batch size
.batch(client_batch_size, drop_remainder=False)
# Preprocessing step
.map(
reshape_emnist_element,
num_parallel_calls=tf.data.experimental.AUTOTUNE))
def preprocess_test_dataset(dataset):
"""Preprocess EMNIST testing dataset."""
return (dataset.batch(TEST_BATCH_SIZE, drop_remainder=False).map(
reshape_emnist_element,
num_parallel_calls=tf.data.experimental.AUTOTUNE).cache())
emnist_train = emnist_train.preprocess(preprocess_train_dataset)
emnist_test = preprocess_test_dataset(
emnist_test.create_tf_dataset_from_all_clients()).cache()
return emnist_train, emnist_test
def get_centralized_emnist_datasets(batch_size, only_digits=False):
"""Loads and preprocesses centralized EMNIST training and testing sets."""
emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data(
only_digits=only_digits)
def preprocess(dataset, batch_size, buffer_size=10000, shuffle_data=True):
if shuffle_data:
dataset = dataset.shuffle(buffer_size=buffer_size)
return (dataset.batch(batch_size).map(
reshape_emnist_element,
num_parallel_calls=tf.data.experimental.AUTOTUNE).cache())
train_dataset = preprocess(
emnist_train.create_tf_dataset_from_all_clients(),
batch_size,
shuffle_data=True)
test_dataset = preprocess(
emnist_test.create_tf_dataset_from_all_clients(),
TEST_BATCH_SIZE,
shuffle_data=False)
return train_dataset, test_dataset
| 37.2
| 76
| 0.732448
|
e3c162691c2247844e2163cf960a5423e112c6bc
| 5,946
|
py
|
Python
|
blog/admin_app/views.py
|
skrsinghrahul/blog
|
ffa1ae6d9b411b2c6ff90fb00ecc0263f436ac18
|
[
"MIT"
] | null | null | null |
blog/admin_app/views.py
|
skrsinghrahul/blog
|
ffa1ae6d9b411b2c6ff90fb00ecc0263f436ac18
|
[
"MIT"
] | 3
|
2020-06-05T19:00:26.000Z
|
2021-06-01T22:31:16.000Z
|
blog/admin_app/views.py
|
skrsinghrahul/blog
|
ffa1ae6d9b411b2c6ff90fb00ecc0263f436ac18
|
[
"MIT"
] | null | null | null |
import time
from django.db.models import Q
from django.utils.text import slugify
from rest_framework import status, serializers
from rest_framework.generics import ListAPIView, CreateAPIView, UpdateAPIView
from rest_framework.response import Response
from admin_app.models import Blog, Topic
from admin_app.serializers import BlogSerializer, TopicSerializer
from base.permissions import IsSuperUser
from base.utils import pagination
class BlogListView(ListAPIView, CreateAPIView):
permission_classes = (IsSuperUser,)
serializer_class = BlogSerializer
def get_filters(self):
filters = {}
query_params = self.request.query_params
reported_date_from = query_params.get('reported_date_from')
reported_date_to = query_params.get('reported_date_to')
if reported_date_from and reported_date_to:
reported_date_to = int(reported_date_to) + 86400
filters.update({"published_time__range": [reported_date_from, reported_date_to]})
topic = query_params.get('topic')
if topic:
filters.update({"related_topics": topic})
self.q = query_params.get('q')
self.page_size = query_params.get('page_size', 10)
return filters
def get(self, request):
queryset = Blog.objects.filter(**self.get_filters())
if self.q:
queryset = queryset.filter(Q(title__icontains=self.q) | Q(description__icontains=self.q) |
Q(related_topics__topic__icontains=self.q)).distinct()
paginator, result = pagination(queryset, request, page_size=self.page_size)
serializer = self.get_serializer(result, many=True)
response_data = serializer.data
return paginator.get_paginated_response(response_data)
def create(self, request, *args, **kwargs):
try:
data = request.data
data['slug'] = slugify(data['title'])
data['published_time'] = int(time.time())
serializer = self.get_serializer(data=data)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
except serializers.ValidationError as e:
return Response(e.detail, status=status.HTTP_400_BAD_REQUEST)
except Exception as e:
print(e)
return Response({"detail": "Something went wrong"}, status=status.HTTP_400_BAD_REQUEST)
class BlogView(ListAPIView, UpdateAPIView):
permission_classes = (IsSuperUser,)
serializer_class = BlogSerializer
def get(self, request, id):
if not id:
return Response({"detail": "Invalid request"}, status=status.HTTP_400_BAD_REQUEST)
try:
queryset = Blog.objects.get(pk=id)
except Blog.DoesNotExist:
return Response({"detail": "Invalid id provided"}, status=status.HTTP_400_BAD_REQUEST)
serializer = self.get_serializer(queryset)
return Response(serializer.data, status=status.HTTP_200_OK)
def put(self, request, id):
if not id:
return Response({"detail": "Invalid request"}, status=status.HTTP_400_BAD_REQUEST)
try:
data = request.data
data['slug'] = slugify(data['title'])
instance = Blog.objects.get(pk=id, creator=request.user)
serializer = self.get_serializer(instance, data=data)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
except Blog.DoesNotExist:
return Response({"detail": "Invalid id provided"}, status=status.HTTP_400_BAD_REQUEST)
except serializers.ValidationError as e:
return Response(e.detail, status=status.HTTP_400_BAD_REQUEST)
except Exception:
return Response({"detail": "Something went wrong"}, status=status.HTTP_400_BAD_REQUEST)
class TopicView(ListAPIView, CreateAPIView, UpdateAPIView):
permission_classes = (IsSuperUser,)
serializer_class = TopicSerializer
def get(self, request):
queryset = Topic.objects.all()
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
def create(self, request, *args, **kwargs):
try:
data = request.data
data['slug'] = slugify(data['topic'])
serializer = self.get_serializer(data=data)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
except serializers.ValidationError as e:
return Response(e.detail, status=status.HTTP_400_BAD_REQUEST)
except Exception as e:
print(e)
return Response({"detail": "Something went wrong"}, status=status.HTTP_400_BAD_REQUEST)
def put(self, request):
try:
data = request.data
id = data.get('id')
if not id:
return Response({"detail": "Invalid request"}, status=status.HTTP_400_BAD_REQUEST)
data['slug'] = slugify(data['topic'])
instance = Topic.objects.get(pk=id)
serializer = self.get_serializer(instance, data=data)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
except Topic.DoesNotExist:
return Response({"detail": "Invalid id provided"}, status=status.HTTP_400_BAD_REQUEST)
except serializers.ValidationError as e:
return Response(e.detail, status=status.HTTP_400_BAD_REQUEST)
except Exception as e:
print(e)
return Response({"detail": "Something went wrong"}, status=status.HTTP_400_BAD_REQUEST)
| 44.044444
| 102
| 0.659267
|
a645a8f724bb429d6811002925d90aacd8a1b978
| 7,770
|
py
|
Python
|
src/eascheduler/jobs/job_base_datetime.py
|
spacemanspiff2007/eascheduler
|
849fe8f43b7bbcb8db3e76c0dda2811eb935cf39
|
[
"Apache-2.0"
] | null | null | null |
src/eascheduler/jobs/job_base_datetime.py
|
spacemanspiff2007/eascheduler
|
849fe8f43b7bbcb8db3e76c0dda2811eb935cf39
|
[
"Apache-2.0"
] | 3
|
2021-04-08T11:02:31.000Z
|
2022-02-14T06:07:56.000Z
|
src/eascheduler/jobs/job_base_datetime.py
|
spacemanspiff2007/eascheduler
|
849fe8f43b7bbcb8db3e76c0dda2811eb935cf39
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import annotations
from datetime import datetime
from datetime import time as dt_time
from datetime import timedelta
from random import uniform
from typing import Callable, Optional, Tuple, Union
from pendulum import DateTime, from_timestamp, instance
from pendulum import now as get_now
from pendulum import UTC
from eascheduler.const import FAR_FUTURE, local_tz, SKIP_EXECUTION
from eascheduler.errors import BoundaryFunctionError, JobAlreadyCanceledException
from eascheduler.executors.executor import ExecutorBase
from eascheduler.jobs.job_base import get_first_timestamp, ScheduledJobBase
from eascheduler.schedulers import AsyncScheduler
try:
from typing import Literal
except ImportError:
from typing import Type
Literal = Type
class DateTimeJobBase(ScheduledJobBase):
def __init__(self, parent: AsyncScheduler, func: ExecutorBase):
super().__init__(parent, func)
# base time when the job gets executed
self._next_run_base: float = FAR_FUTURE
self._last_run_base: float = 0
# adjusting of the boundaries is running
self._adjusting: bool = False
# boundaries
self._earliest: Optional[dt_time] = None
self._latest: Optional[dt_time] = None
self._offset: Optional[timedelta] = None
self._jitter: Optional[Tuple[float, float]] = None
self._boundary_func: Optional[Callable[[datetime], datetime]] = None
def _schedule_first_run(self, first_run: Union[None, int, float, timedelta, dt_time, datetime]):
self._next_run_base = get_first_timestamp(first_run)
self._set_next_run(self._next_run_base)
def _advance_time(self, utc_dt: DateTime) -> DateTime:
raise NotImplementedError()
def _execute(self):
self._last_run_base = self._next_run_base
super()._execute()
def earliest(self, time_obj: Optional[dt_time]) -> DateTimeJobBase:
"""Set earliest boundary as time of day. ``None`` will disable boundary.
:param time_obj: time obj, scheduler will not run earlier
"""
assert isinstance(time_obj, dt_time) or time_obj is None, type(time_obj)
if self._parent is None:
raise JobAlreadyCanceledException()
changed = self._earliest != time_obj
self._earliest = time_obj
if changed and not self._adjusting:
self._apply_boundaries()
return self
def latest(self, time_obj: Optional[dt_time]) -> DateTimeJobBase:
"""Set latest boundary as time of day. ``None`` will disable boundary.
:param time_obj: time obj, scheduler will not run later
"""
assert isinstance(time_obj, dt_time) or time_obj is None, type(time_obj)
if self._parent is None:
raise JobAlreadyCanceledException()
changed = self._latest != time_obj
self._latest = time_obj
if changed and not self._adjusting:
self._apply_boundaries()
return self
def offset(self, timedelta_obj: Optional[timedelta]) -> DateTimeJobBase:
"""Set a constant offset to the calculation of the next run. ``None`` will disable the offset.
:param timedelta_obj: constant offset
"""
assert isinstance(timedelta_obj, timedelta) or timedelta_obj is None, type(timedelta_obj)
if self._parent is None:
raise JobAlreadyCanceledException()
changed = self._offset != timedelta_obj
self._offset = timedelta_obj
if changed and not self._adjusting:
self._apply_boundaries()
return self
def jitter(self, start: Optional[Union[int, float]], stop: Optional[Union[int, float]] = None) -> DateTimeJobBase:
"""Add a random jitter per call in the interval [start <= secs <= stop] to the next run.
If stop is omitted start must be positive and the interval will be [-start <= secs <= start]
Passing ``None`` as start will disable jitter.
:param start: Interval start or ``None`` to disable jitter
:param stop: Interval stop or ``None`` to build interval based on start
"""
assert isinstance(start, (int, float)) or start is None, type(start)
assert isinstance(stop, (int, float)) or stop is None, type(start)
if self._parent is None:
raise JobAlreadyCanceledException()
jitter = None
if start is not None:
if stop is None:
stop = abs(start)
start = stop * -1
assert start < stop, f'{start} < {stop}'
jitter = (start, stop)
changed = self._jitter != jitter
self._jitter = jitter
if changed and not self._adjusting:
self._apply_boundaries()
return self
def boundary_func(self, func: Optional[Callable[[datetime], datetime]]) -> DateTimeJobBase:
"""Add a function which will be called when the datetime changes. Use this to implement custom boundaries.
Use ``None`` to disable the boundary function.
:param func: Function which returns a datetime obj, arg is a datetime with the next run time. Return
``SKIP_EXECUTION`` together with a reoccurring job to skip the proposed run time.
"""
if self._parent is None:
raise JobAlreadyCanceledException()
changed = self._boundary_func != func
self._boundary_func = func
if changed:
if self._adjusting:
raise BoundaryFunctionError('Can not change the boundary function from inside the boundary function!')
self._apply_boundaries()
return self
def _apply_boundaries(self):
self._adjusting = True
# Starting point is always the next call in local time
next_run_local: DateTime = from_timestamp(self._next_run_base, local_tz)
while True:
# custom boundaries first
if self._boundary_func is not None:
naive_obj = next_run_local.in_timezone(local_tz).naive()
custom_obj = self._boundary_func(naive_obj)
if custom_obj is SKIP_EXECUTION:
next_run_local = self._advance_time(next_run_local.in_timezone(UTC)).in_timezone(local_tz)
continue
next_run_local = instance(custom_obj, local_tz).astimezone(local_tz)
if self._offset is not None:
next_run_local += self._offset # offset doesn't have to be localized
if self._jitter is not None:
next_run_local += timedelta(seconds=uniform(self._jitter[0], self._jitter[1]))
if self._earliest is not None:
earliest = next_run_local.set(hour=self._earliest.hour, minute=self._earliest.minute,
second=self._earliest.second, microsecond=self._earliest.microsecond)
if next_run_local < earliest:
next_run_local = earliest
if self._latest is not None:
latest = next_run_local.set(hour=self._latest.hour, minute=self._latest.minute,
second=self._latest.second, microsecond=self._latest.microsecond)
if next_run_local > latest:
next_run_local = latest
# if we are in the future we have the next run
next_run = next_run_local.in_timezone(UTC)
if get_now(UTC) < next_run:
break
# Otherwise we advance a step in the future
next_run_local = self._advance_time(next_run).in_timezone(local_tz)
self._adjusting = False
self._set_next_run(next_run.timestamp())
return next_run_local
| 39.846154
| 118
| 0.65148
|
af24a76ad69c3e81d2fd97da59cad5aa7f2d3243
| 12,449
|
py
|
Python
|
zomato_distribution_api/zomato_wrapper.py
|
chetanrrk/Python_API_for_Zomato
|
7047e498dd6aa11a4a6ba09cd3881f692b193d04
|
[
"MIT"
] | null | null | null |
zomato_distribution_api/zomato_wrapper.py
|
chetanrrk/Python_API_for_Zomato
|
7047e498dd6aa11a4a6ba09cd3881f692b193d04
|
[
"MIT"
] | null | null | null |
zomato_distribution_api/zomato_wrapper.py
|
chetanrrk/Python_API_for_Zomato
|
7047e498dd6aa11a4a6ba09cd3881f692b193d04
|
[
"MIT"
] | null | null | null |
import requests
import ast
import json
base_url = "https://developers.zomato.com/api/v2.1/"
class Zomato:
"""
Wrapper class to the zomato web api.
original source: https://github.com/sharadbhat/Zomatopy
"""
def __init__(self, key):
self.user_key = key
def get_categories(self):
"""
@params: None.
@return: Returns a dictionary of IDs and their respective category names.
"""
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = requests.get(base_url + "categories", headers=headers).content.decode("utf-8")
a = ast.literal_eval(r)
self.is_key_invalid(a)
self.is_rate_exceeded(a)
categories = {}
for category in a['categories']:
categories.update({category['categories']['id']: category['categories']['name']})
return categories
def get_city_id(self, city_name=None, state_name=None):
"""
@params: string, city_name.
@return:
Returns the ID for the city given as input.
If no parameters are passed, returns city id based on current location
"""
if city_name is None or state_name is None:
lat, lon = self.get_geo_coords()
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = requests.get(base_url + "cities?lat=" + str(lat) + "&lon=" + str(lon), headers=headers).\
content.decode("utf-8")
a = json.loads(r)
if len(a['location_suggestions']) == 0:
raise Exception("current city's ID cannot be found!")
else:
return a['location_suggestions'][0]['id']
if not city_name.isalpha():
raise ValueError('InvalidCityName')
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = requests.get(base_url + "cities?q=" + city_name, headers=headers).content.decode("utf-8")
a = json.loads(r)
self.is_key_invalid(a)
self.is_rate_exceeded(a)
if len(a['location_suggestions']) == 0:
raise Exception('invalid_city_name')
elif 'name' in a['location_suggestions'][0]:
city_state = a['location_suggestions'][0]['name'].lower()
city, state = str(city_state.split(',')[0]), str(city_state.split(',')[1])
if city == str(city_name).lower() and state == str(state_name):
return a['location_suggestions'][0]['id']
else:
raise ValueError('InvalidCityId')
def get_city_name(self, city_id):
"""
@params: City ID int or str.
@return: the name of the city ID.
"""
self.is_valid_city_id(city_id)
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = requests.get(base_url + "cities?city_ids=" + str(city_id), headers=headers).content.decode("utf-8")
a = json.loads(r)
self.is_key_invalid(a)
self.is_rate_exceeded(a)
if a['location_suggestions'][0]['country_name'] == "":
raise ValueError('InvalidCityId')
else:
temp_city_id = a['location_suggestions'][0]['id']
if temp_city_id == str(city_id):
return a['location_suggestions'][0]['name']
def get_collections(self, city_id, limit=None):
"""
@param
city_id: int/str City ID as input.
limit: optional parameter. displays limit number of result.
@return
python dictionary of Zomato restaurant collections in a city and their respective URLs.
"""
self.is_valid_city_id(city_id)
headers = {'Accept': 'application/json', 'user-key': self.user_key}
if limit is None:
r = requests.get(base_url + "collections?city_id=" + str(city_id), headers=headers).content.decode(
"utf-8")
else:
if str(limit).isalpha():
raise ValueError('LimitNotInteger')
else:
r = (requests.get(base_url + "collections?city_id=" + str(city_id) + "&count=" + str(limit),
headers=headers).content).decode("utf-8")
a = json.loads(r)
self.is_key_invalid(a)
self.is_rate_exceeded(a)
collections = {}
for collection in a['collections']:
collections.update({collection['collection']['title']: collection['collection']['url']})
return collections
def get_cuisines(self, city_id):
"""
@params: City ID int/str
@return:
a sorted dictionary by ID of all cuisine IDs and their respective cuisine names.
key: cuisine name
value: dictionary
"""
self.is_valid_city_id(city_id)
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = requests.get(base_url + "cuisines?city_id=" + str(city_id), headers=headers).content.decode("utf-8")
a = ast.literal_eval(r)
self.is_key_invalid(a)
self.is_rate_exceeded(a)
if len(a['cuisines']) == 0:
raise ValueError('InvalidCityId')
temp_cuisines = {}
cuisines = {}
for cuisine in a['cuisines']:
# temp_cuisines.update({cuisine['cuisine']['cuisine_id']: cuisine['cuisine']['cuisine_name']})
temp_cuisines.update({cuisine['cuisine']['cuisine_name']: cuisine['cuisine']['cuisine_id']})
for cuisine in sorted(temp_cuisines):
cuisines.update({cuisine: temp_cuisines[cuisine]})
return cuisines
def get_establishment_types(self, city_id):
"""
@params: City ID (int/str).
@return: sorted dictionary of all establishment type IDs and their respective establishment type names.
"""
self.is_valid_city_id(city_id)
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = requests.get(base_url + "establishments?city_id=" + str(city_id), headers=headers).content.decode("utf-8")
a = ast.literal_eval(r)
self.is_key_invalid(a)
self.is_rate_exceeded(a)
temp_establishment_types = {}
establishment_types = {}
if 'establishments' in a:
for establishment_type in a['establishments']:
temp_establishment_types.update(
{establishment_type['establishment']['id']: establishment_type['establishment']['name']})
for establishment_type in sorted(temp_establishment_types):
establishment_types.update({establishment_type: temp_establishment_types[establishment_type]})
return establishment_types
else:
raise ValueError('InvalidCityId')
def get_nearby_restaurants(self, latitude="", longitude=""):
"""
@params: latitude and longitude of current or interested location.
@return: a dictionary of Restaurant IDs and their corresponding Zomato URLs.
"""
"""obtains the current location's latitude and longitude if none is provided"""
if latitude == "" or longitude == "":
latitude, longitude = self.get_geo_coords()
try:
float(latitude)
float(longitude)
except ValueError:
raise ValueError('InvalidLatitudeOrLongitude')
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = (requests.get(base_url + "geocode?lat=" + str(latitude) + "&lon=" + str(longitude),
headers=headers).content).decode("utf-8")
a = json.loads(r)
nearby_restaurants = {}
for nearby_restaurant in a['nearby_restaurants']:
nearby_restaurants.update({nearby_restaurant['restaurant']['id']: nearby_restaurant['restaurant']['url']})
return nearby_restaurants
def get_restaurant(self, restaurant_id):
"""
@params: Restaurant ID (int/str) as input.
@return: a dictionary of restaurant details.
"""
self.is_valid_restaurant_id(restaurant_id)
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = requests.get(base_url + "restaurant?res_id=" + str(restaurant_id), headers=headers).content.decode(
"utf-8")
a = json.loads(r)
if 'code' in a:
if a['code'] == 404:
raise ('InvalidRestaurantId')
restaurant_details = {}
restaurant_details.update({"name": a['name']})
restaurant_details.update({"url": a['url']})
restaurant_details.update({"location": a['location']['address']})
restaurant_details.update({"city": a['location']['city']})
restaurant_details.update({"city_ID": a['location']['city_id']})
restaurant_details.update({"user_rating": a['user_rating']['aggregate_rating']})
restaurant_details = DotDict(restaurant_details)
return restaurant_details
def restaurant_search(self, query="", latitude="", longitude="", radius="", cuisines="", limit=5):
"""
@params
query: string keyword to query.
latitude: latitude of interested place.
longitude: longitude of interested place.
radius: search restaurants within a radius in meters
cuisines: multiple cuisines as input in string format.
@return: a list of Restaurants.
"""
cuisines = "%2C".join(cuisines.split(","))
"""obtains the current location's latitude and longitude if none is provided"""
if latitude == "" or longitude == "":
latitude, longitude = self.get_geo_coords()
if str(limit).isalpha():
raise ValueError('LimitNotInteger')
headers = {'Accept': 'application/json', 'user-key': self.user_key}
r = (requests.get(
base_url + "search?q=" + str(query) + "&count=" + str(limit) + "&lat=" + str(latitude) + "&lon=" + str(
longitude) + "&radius=" + str(radius) + "&cuisines=" + str(cuisines), headers=headers).content).decode("utf-8")
a = json.loads(r)
if a['results_found'] == 0:
return []
else:
return a # dictionary of all restaurants
def is_valid_restaurant_id(self, restaurant_ID):
"""
Checks if the Restaurant ID is valid or invalid.
If invalid, throws a InvalidRestaurantId Exception.
@param: id of a restaurant
@return:
None
"""
restaurant_ID = str(restaurant_ID)
if not restaurant_ID.isnumeric():
raise ValueError('InvalidRestaurantId')
def is_valid_city_id(self, city_ID):
"""
Checks if the City ID is valid or invalid.
If invalid, throws a InvalidCityId Exception.
@param: id of a city
@return: None
"""
city_ID = str(city_ID)
if not city_ID.isnumeric():
raise ValueError('InvalidCityId')
def is_key_invalid(self, a):
"""
Checks if the API key provided is valid or invalid.
If invalid, throws a InvalidKey Exception.
@params: return of the web request in 'json'
@return: None
"""
if 'code' in a:
if a['code'] == 403:
raise ValueError('InvalidKey')
def is_rate_exceeded(self, a):
"""
Checks if the request limit for the API key is exceeded or not.
If exceeded, throws a ApiLimitExceeded Exception.
@params: return of the web request in 'json'
@return: None
"""
if 'code' in a:
if a['code'] == 440:
raise Exception('ApiLimitExceeded')
def get_geo_coords(self):
"""
captures latitude and longitude based on current location
@params: None
@return: latitude, longitude
"""
ip_request = requests.get('https://get.geojs.io/v1/ip.json')
my_ip = ip_request.json()['ip']
geo_request_url = 'https://get.geojs.io/v1/ip/geo/' + my_ip + '.json'
geo_request = requests.get(geo_request_url)
geo_data = geo_request.json()
return geo_data['latitude'], geo_data['longitude'] # latitude, longitude
class DotDict(dict):
"""
Dot notation access to dictionary attributes
"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
| 36.294461
| 127
| 0.594506
|
42f14f540405455089767bc46be4dc000695d2d2
| 60,036
|
bzl
|
Python
|
tensorflow/workspace.bzl
|
bilisun/tensorflow
|
4585e33550e84051ec2f0f1cedcc24894cc42b2f
|
[
"Apache-2.0"
] | 1
|
2022-02-22T08:41:05.000Z
|
2022-02-22T08:41:05.000Z
|
tensorflow/workspace.bzl
|
bilisun/tensorflow
|
4585e33550e84051ec2f0f1cedcc24894cc42b2f
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/workspace.bzl
|
bilisun/tensorflow
|
4585e33550e84051ec2f0f1cedcc24894cc42b2f
|
[
"Apache-2.0"
] | null | null | null |
# TensorFlow external dependencies that can be loaded in WORKSPACE files.
load("//third_party/gpus:cuda_configure.bzl", "cuda_configure")
load("//third_party/gpus:rocm_configure.bzl", "rocm_configure")
load("//third_party/tensorrt:tensorrt_configure.bzl", "tensorrt_configure")
load("//third_party/nccl:nccl_configure.bzl", "nccl_configure")
load("//third_party/mkl:build_defs.bzl", "mkl_repository")
load("//third_party/git:git_configure.bzl", "git_configure")
load("//third_party/py:python_configure.bzl", "python_configure")
load("//third_party/sycl:sycl_configure.bzl", "sycl_configure")
load("//third_party/systemlibs:syslibs_configure.bzl", "syslibs_configure")
load("//third_party/toolchains/remote:configure.bzl", "remote_execution_configure")
load("//third_party/toolchains/clang6:repo.bzl", "clang6_configure")
load("//third_party/toolchains/cpus/arm:arm_compiler_configure.bzl", "arm_compiler_configure")
load("//third_party/toolchains/embedded/arm-linux:arm_linux_toolchain_configure.bzl", "arm_linux_toolchain_configure")
load("//third_party:repo.bzl", "tf_http_archive")
load("//third_party/clang_toolchain:cc_configure_clang.bzl", "cc_download_clang_toolchain")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_file")
load("@bazel_tools//tools/build_defs/repo:java.bzl", "java_import_external")
load("@io_bazel_rules_closure//closure:defs.bzl", "filegroup_external")
load(
"//tensorflow/tools/def_file_filter:def_file_filter_configure.bzl",
"def_file_filter_configure",
)
load("//third_party/FP16:workspace.bzl", FP16 = "repo")
load("//third_party/aws:workspace.bzl", aws = "repo")
load("//third_party/clog:workspace.bzl", clog = "repo")
load("//third_party/cpuinfo:workspace.bzl", cpuinfo = "repo")
load("//third_party/dlpack:workspace.bzl", dlpack = "repo")
load("//third_party/flatbuffers:workspace.bzl", flatbuffers = "repo")
load("//third_party/hexagon:workspace.bzl", hexagon_nn = "repo")
load("//third_party/highwayhash:workspace.bzl", highwayhash = "repo")
load("//third_party/hwloc:workspace.bzl", hwloc = "repo")
load("//third_party/icu:workspace.bzl", icu = "repo")
load("//third_party/jpeg:workspace.bzl", jpeg = "repo")
load("//third_party/nasm:workspace.bzl", nasm = "repo")
load("//third_party/opencl_headers:workspace.bzl", opencl_headers = "repo")
load("//third_party/kissfft:workspace.bzl", kissfft = "repo")
load("//third_party/pasta:workspace.bzl", pasta = "repo")
load("//third_party/psimd:workspace.bzl", psimd = "repo")
load("//third_party/ruy:workspace.bzl", ruy = "repo")
load("//third_party/sobol_data:workspace.bzl", sobol_data = "repo")
load("//third_party/vulkan_headers:workspace.bzl", vulkan_headers = "repo")
load("//third_party/toolchains/remote_config:configs.bzl", "initialize_rbe_configs")
def initialize_third_party():
""" Load third party repositories. See above load() statements. """
FP16()
aws()
clog()
cpuinfo()
dlpack()
flatbuffers()
hexagon_nn()
highwayhash()
hwloc()
icu()
kissfft()
jpeg()
nasm()
opencl_headers()
pasta()
psimd()
sobol_data()
vulkan_headers()
ruy()
# Sanitize a dependency so that it works correctly from code that includes
# TensorFlow as a submodule.
def clean_dep(dep):
return str(Label(dep))
# If TensorFlow is linked as a submodule.
# path_prefix is no longer used.
# tf_repo_name is thought to be under consideration.
def tf_workspace(path_prefix = "", tf_repo_name = ""):
tf_repositories(path_prefix, tf_repo_name)
tf_bind()
# Toolchains & platforms required by Tensorflow to build.
def tf_toolchains():
native.register_execution_platforms("@local_execution_config_platform//:platform")
native.register_toolchains("@local_execution_config_python//:py_toolchain")
# Define all external repositories required by TensorFlow
def tf_repositories(path_prefix = "", tf_repo_name = ""):
"""All external dependencies for TF builds."""
# Initialize toolchains and platforms.
tf_toolchains()
# Loads all external repos to configure RBE builds.
initialize_rbe_configs()
# Note that we check the minimum bazel version in WORKSPACE.
clang6_configure(name = "local_config_clang6")
cc_download_clang_toolchain(name = "local_config_download_clang")
cuda_configure(name = "local_config_cuda")
tensorrt_configure(name = "local_config_tensorrt")
nccl_configure(name = "local_config_nccl")
git_configure(name = "local_config_git")
sycl_configure(name = "local_config_sycl")
syslibs_configure(name = "local_config_syslibs")
python_configure(name = "local_config_python")
rocm_configure(name = "local_config_rocm")
remote_execution_configure(name = "local_config_remote_execution")
initialize_third_party()
# For windows bazel build
# TODO: Remove def file filter when TensorFlow can export symbols properly on Windows.
def_file_filter_configure(name = "local_config_def_file_filter")
# Point //external/local_config_arm_compiler to //external/arm_compiler
arm_compiler_configure(
name = "local_config_arm_compiler",
build_file = clean_dep("//third_party/toolchains/cpus/arm:BUILD"),
remote_config_repo_arm = "../arm_compiler",
remote_config_repo_aarch64 = "../aarch64_compiler",
)
# TFLite crossbuild toolchain for embeddeds Linux
arm_linux_toolchain_configure(
name = "local_config_embedded_arm",
build_file = clean_dep("//third_party/toolchains/embedded/arm-linux:BUILD"),
aarch64_repo = "../aarch64_linux_toolchain",
armhf_repo = "../armhf_linux_toolchain",
)
mkl_repository(
name = "mkl_linux",
build_file = clean_dep("//third_party/mkl:mkl.BUILD"),
sha256 = "a936d6b277a33d2a027a024ea8e65df62bd2e162c7ca52c48486ed9d5dc27160",
strip_prefix = "mklml_lnx_2019.0.5.20190502",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/intel/mkl-dnn/releases/download/v0.21/mklml_lnx_2019.0.5.20190502.tgz",
"https://github.com/intel/mkl-dnn/releases/download/v0.21/mklml_lnx_2019.0.5.20190502.tgz",
],
)
mkl_repository(
name = "mkl_windows",
build_file = clean_dep("//third_party/mkl:mkl.BUILD"),
sha256 = "33cc27652df3b71d7cb84b26718b5a2e8965e2c864a502347db02746d0430d57",
strip_prefix = "mklml_win_2020.0.20190813",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/intel/mkl-dnn/releases/download/v0.21/mklml_win_2020.0.20190813.zip",
"https://github.com/intel/mkl-dnn/releases/download/v0.21/mklml_win_2020.0.20190813.zip",
],
)
mkl_repository(
name = "mkl_darwin",
build_file = clean_dep("//third_party/mkl:mkl.BUILD"),
sha256 = "2fbb71a0365d42a39ea7906568d69b1db3bfc9914fee75eedb06c5f32bf5fa68",
strip_prefix = "mklml_mac_2019.0.5.20190502",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/intel/mkl-dnn/releases/download/v0.21/mklml_mac_2019.0.5.20190502.tgz",
"https://github.com/intel/mkl-dnn/releases/download/v0.21/mklml_mac_2019.0.5.20190502.tgz",
],
)
if path_prefix:
print("path_prefix was specified to tf_workspace but is no longer used " +
"and will be removed in the future.")
tf_http_archive(
name = "XNNPACK",
sha256 = "742eb377e0d304a0bfcb64fccfee2b3fe27932a2d5a95a22bfbc7a6fb4459e1a",
strip_prefix = "XNNPACK-0af63ab36b899559bd1a92bbc327f8137e53c15c",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/XNNPACK/archive/0af63ab36b899559bd1a92bbc327f8137e53c15c.zip",
"https://github.com/google/XNNPACK/archive/0af63ab36b899559bd1a92bbc327f8137e53c15c.zip",
],
)
tf_http_archive(
name = "FXdiv",
sha256 = "ab7dfb08829bee33dca38405d647868fb214ac685e379ec7ef2bebcd234cd44d",
strip_prefix = "FXdiv-b408327ac2a15ec3e43352421954f5b1967701d1",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/Maratyszcza/FXdiv/archive/b408327ac2a15ec3e43352421954f5b1967701d1.zip",
"https://github.com/Maratyszcza/FXdiv/archive/b408327ac2a15ec3e43352421954f5b1967701d1.zip",
],
)
tf_http_archive(
name = "pthreadpool",
sha256 = "03312bd7d8d9e379d685258963ee8820767158b5946cdd00336ff17dae851001",
strip_prefix = "pthreadpool-029c88620802e1361ccf41d1970bd5b07fd6b7bb",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/Maratyszcza/pthreadpool/archive/029c88620802e1361ccf41d1970bd5b07fd6b7bb.zip",
"https://github.com/Maratyszcza/pthreadpool/archive/029c88620802e1361ccf41d1970bd5b07fd6b7bb.zip",
],
)
# Important: If you are upgrading MKL-DNN, then update the version numbers
# in third_party/mkl_dnn/mkldnn.BUILD. In addition, the new version of
# MKL-DNN might require upgrading MKL ML libraries also. If they need to be
# upgraded then update the version numbers on all three versions above
# (Linux, Mac, Windows).
tf_http_archive(
name = "mkl_dnn",
build_file = clean_dep("//third_party/mkl_dnn:mkldnn.BUILD"),
sha256 = "a0211aeb5e7dad50b97fa5dffc1a2fe2fe732572d4164e1ee8750a2ede43fbec",
strip_prefix = "oneDNN-0.21.3",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/oneapi-src/oneDNN/archive/v0.21.3.tar.gz",
"https://github.com/oneapi-src/oneDNN/archive/v0.21.3.tar.gz",
],
)
tf_http_archive(
name = "mkl_dnn_v1",
build_file = clean_dep("//third_party/mkl_dnn:mkldnn_v1.BUILD"),
sha256 = "aef4d2a726f76f5b98902491a1a4ac69954039aa8e5a1d67ef6ce58ed00e23a6",
strip_prefix = "oneDNN-1.5.1",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/oneapi-src/oneDNN/archive/v1.5.1.tar.gz",
"https://github.com/oneapi-src/oneDNN/archive/v1.5.1.tar.gz",
],
)
tf_http_archive(
name = "com_google_absl",
build_file = clean_dep("//third_party:com_google_absl.BUILD"),
# TODO: Remove the patch when https://github.com/abseil/abseil-cpp/issues/326 is resolved
# and when TensorFlow is build against CUDA 10.2
patch_file = clean_dep("//third_party:com_google_absl_fix_mac_and_nvcc_build.patch"),
sha256 = "f368a8476f4e2e0eccf8a7318b98dafbe30b2600f4e3cf52636e5eb145aba06a", # SHARED_ABSL_SHA
strip_prefix = "abseil-cpp-df3ea785d8c30a9503321a3d35ee7d35808f190d",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/abseil/abseil-cpp/archive/df3ea785d8c30a9503321a3d35ee7d35808f190d.tar.gz",
"https://github.com/abseil/abseil-cpp/archive/df3ea785d8c30a9503321a3d35ee7d35808f190d.tar.gz",
],
)
tf_http_archive(
name = "eigen_archive",
build_file = clean_dep("//third_party:eigen.BUILD"),
patch_file = clean_dep("//third_party/eigen3:gpu_packet_math.patch"),
sha256 = "a3c10a8c14f55e9f09f98b0a0ac6874c21bda91f65b7469d9b1f6925990e867b", # SHARED_EIGEN_SHA
strip_prefix = "eigen-d10b27fe37736d2944630ecd7557cefa95cf87c9",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/gitlab.com/libeigen/eigen/-/archive/d10b27fe37736d2944630ecd7557cefa95cf87c9/eigen-d10b27fe37736d2944630ecd7557cefa95cf87c9.tar.gz",
"https://gitlab.com/libeigen/eigen/-/archive/d10b27fe37736d2944630ecd7557cefa95cf87c9/eigen-d10b27fe37736d2944630ecd7557cefa95cf87c9.tar.gz",
],
)
tf_http_archive(
name = "arm_compiler",
build_file = clean_dep("//:arm_compiler.BUILD"),
sha256 = "b9e7d50ffd9996ed18900d041d362c99473b382c0ae049b2fce3290632d2656f",
strip_prefix = "rpi-newer-crosstools-eb68350c5c8ec1663b7fe52c742ac4271e3217c5/x64-gcc-6.5.0/arm-rpi-linux-gnueabihf/",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/rvagg/rpi-newer-crosstools/archive/eb68350c5c8ec1663b7fe52c742ac4271e3217c5.tar.gz",
"https://github.com/rvagg/rpi-newer-crosstools/archive/eb68350c5c8ec1663b7fe52c742ac4271e3217c5.tar.gz",
],
)
tf_http_archive(
# This is the latest `aarch64-none-linux-gnu` compiler provided by ARM
# See https://developer.arm.com/tools-and-software/open-source-software/developer-tools/gnu-toolchain/gnu-a/downloads
# The archive contains GCC version 9.2.1
name = "aarch64_compiler",
build_file = "//:arm_compiler.BUILD",
sha256 = "8dfe681531f0bd04fb9c53cf3c0a3368c616aa85d48938eebe2b516376e06a66",
strip_prefix = "gcc-arm-9.2-2019.12-x86_64-aarch64-none-linux-gnu",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/developer.arm.com/-/media/Files/downloads/gnu-a/9.2-2019.12/binrel/gcc-arm-9.2-2019.12-x86_64-aarch64-none-linux-gnu.tar.xz",
"https://developer.arm.com/-/media/Files/downloads/gnu-a/9.2-2019.12/binrel/gcc-arm-9.2-2019.12-x86_64-aarch64-none-linux-gnu.tar.xz",
],
)
tf_http_archive(
name = "aarch64_linux_toolchain",
build_file = clean_dep("//third_party/toolchains/embedded/arm-linux:aarch64-linux-toolchain.BUILD"),
sha256 = "8ce3e7688a47d8cd2d8e8323f147104ae1c8139520eca50ccf8a7fa933002731",
strip_prefix = "gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/developer.arm.com/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz",
"https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz",
],
)
tf_http_archive(
name = "armhf_linux_toolchain",
build_file = clean_dep("//third_party/toolchains/embedded/arm-linux:armhf-linux-toolchain.BUILD"),
sha256 = "d4f6480ecaa99e977e3833cc8a8e1263f9eecd1ce2d022bb548a24c4f32670f5",
strip_prefix = "gcc-arm-8.3-2019.03-x86_64-arm-linux-gnueabihf",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/developer.arm.com/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-arm-linux-gnueabihf.tar.xz",
"https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-arm-linux-gnueabihf.tar.xz",
],
)
tf_http_archive(
name = "libxsmm_archive",
build_file = clean_dep("//third_party:libxsmm.BUILD"),
sha256 = "9c0af4509ea341d1ee2c6c19fc6f19289318c3bd4b17844efeb9e7f9691abf76",
strip_prefix = "libxsmm-1.14",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/hfp/libxsmm/archive/1.14.tar.gz",
"https://github.com/hfp/libxsmm/archive/1.14.tar.gz",
],
)
tf_http_archive(
name = "com_googlesource_code_re2",
sha256 = "d070e2ffc5476c496a6a872a6f246bfddce8e7797d6ba605a7c8d72866743bf9",
strip_prefix = "re2-506cfa4bffd060c06ec338ce50ea3468daa6c814",
system_build_file = clean_dep("//third_party/systemlibs:re2.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/re2/archive/506cfa4bffd060c06ec338ce50ea3468daa6c814.tar.gz",
"https://github.com/google/re2/archive/506cfa4bffd060c06ec338ce50ea3468daa6c814.tar.gz",
],
)
tf_http_archive(
name = "com_github_google_crc32c",
sha256 = "6b3b1d861bb8307658b2407bc7a4c59e566855ef5368a60b35c893551e4788e9",
build_file = "@com_github_googlecloudplatform_google_cloud_cpp//bazel:crc32c.BUILD",
strip_prefix = "crc32c-1.0.6",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/crc32c/archive/1.0.6.tar.gz",
"https://github.com/google/crc32c/archive/1.0.6.tar.gz",
],
)
tf_http_archive(
name = "com_github_googlecloudplatform_google_cloud_cpp",
sha256 = "ff82045b9491f0d880fc8e5c83fd9542eafb156dcac9ff8c6209ced66ed2a7f0",
strip_prefix = "google-cloud-cpp-1.17.1",
repo_mapping = {
"@com_github_curl_curl": "@curl",
"@com_github_nlohmann_json": "@nlohmann_json_lib",
},
system_build_file = clean_dep("//third_party/systemlibs:google_cloud_cpp.BUILD"),
system_link_files = {
"//third_party/systemlibs:google_cloud_cpp.google.cloud.bigtable.BUILD": "google/cloud/bigtable/BUILD",
},
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/googleapis/google-cloud-cpp/archive/v1.17.1.tar.gz",
"https://github.com/googleapis/google-cloud-cpp/archive/v1.17.1.tar.gz",
],
)
tf_http_archive(
name = "com_github_googlecloudplatform_tensorflow_gcp_tools",
sha256 = "5e9ebe17eaa2895eb7f77fefbf52deeda7c4b63f5a616916b823eb74f3a0c542",
strip_prefix = "tensorflow-gcp-tools-2643d8caeba6ca2a6a0b46bb123953cb95b7e7d5",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/GoogleCloudPlatform/tensorflow-gcp-tools/archive/2643d8caeba6ca2a6a0b46bb123953cb95b7e7d5.tar.gz",
"https://github.com/GoogleCloudPlatform/tensorflow-gcp-tools/archive/2643d8caeba6ca2a6a0b46bb123953cb95b7e7d5.tar.gz",
],
)
tf_http_archive(
name = "com_google_googleapis",
build_file = clean_dep("//third_party/googleapis:googleapis.BUILD"),
sha256 = "7ebab01b06c555f4b6514453dc3e1667f810ef91d1d4d2d3aa29bb9fcb40a900",
strip_prefix = "googleapis-541b1ded4abadcc38e8178680b0677f65594ea6f",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/googleapis/googleapis/archive/541b1ded4abadcc38e8178680b0677f65594ea6f.zip",
"https://github.com/googleapis/googleapis/archive/541b1ded4abadcc38e8178680b0677f65594ea6f.zip",
],
)
tf_http_archive(
name = "gemmlowp",
sha256 = "43146e6f56cb5218a8caaab6b5d1601a083f1f31c06ff474a4378a7d35be9cfb", # SHARED_GEMMLOWP_SHA
strip_prefix = "gemmlowp-fda83bdc38b118cc6b56753bd540caa49e570745",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/gemmlowp/archive/fda83bdc38b118cc6b56753bd540caa49e570745.zip",
"https://github.com/google/gemmlowp/archive/fda83bdc38b118cc6b56753bd540caa49e570745.zip",
],
)
tf_http_archive(
name = "farmhash_archive",
build_file = clean_dep("//third_party:farmhash.BUILD"),
sha256 = "6560547c63e4af82b0f202cb710ceabb3f21347a4b996db565a411da5b17aba0", # SHARED_FARMHASH_SHA
strip_prefix = "farmhash-816a4ae622e964763ca0862d9dbd19324a1eaf45",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz",
"https://github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz",
],
)
tf_http_archive(
name = "png",
build_file = clean_dep("//third_party:png.BUILD"),
patch_file = clean_dep("//third_party:png_fix_rpi.patch"),
sha256 = "ca74a0dace179a8422187671aee97dd3892b53e168627145271cad5b5ac81307",
strip_prefix = "libpng-1.6.37",
system_build_file = clean_dep("//third_party/systemlibs:png.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/glennrp/libpng/archive/v1.6.37.tar.gz",
"https://github.com/glennrp/libpng/archive/v1.6.37.tar.gz",
],
)
tf_http_archive(
name = "org_sqlite",
build_file = clean_dep("//third_party:sqlite.BUILD"),
sha256 = "e9cec01d4519e2d49b3810615237325263fe1feaceae390ee12b4a29bd73dbe2",
strip_prefix = "sqlite-amalgamation-3320300",
system_build_file = clean_dep("//third_party/systemlibs:sqlite.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/www.sqlite.org/2020/sqlite-amalgamation-3320300.zip",
"https://www.sqlite.org/2020/sqlite-amalgamation-3320300.zip",
],
)
tf_http_archive(
name = "gif",
build_file = clean_dep("//third_party:gif.BUILD"),
patch_file = clean_dep("//third_party:gif_fix_strtok_r.patch"),
sha256 = "31da5562f44c5f15d63340a09a4fd62b48c45620cd302f77a6d9acf0077879bd",
strip_prefix = "giflib-5.2.1",
system_build_file = clean_dep("//third_party/systemlibs:gif.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pilotfiber.dl.sourceforge.net/project/giflib/giflib-5.2.1.tar.gz",
"https://pilotfiber.dl.sourceforge.net/project/giflib/giflib-5.2.1.tar.gz",
],
)
tf_http_archive(
name = "six_archive",
build_file = clean_dep("//third_party:six.BUILD"),
sha256 = "30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259",
strip_prefix = "six-1.15.0",
system_build_file = clean_dep("//third_party/systemlibs:six.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/source/s/six/six-1.15.0.tar.gz",
"https://pypi.python.org/packages/source/s/six/six-1.15.0.tar.gz",
],
)
tf_http_archive(
name = "astor_archive",
build_file = clean_dep("//third_party:astor.BUILD"),
sha256 = "95c30d87a6c2cf89aa628b87398466840f0ad8652f88eb173125a6df8533fb8d",
strip_prefix = "astor-0.7.1",
system_build_file = clean_dep("//third_party/systemlibs:astor.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/99/80/f9482277c919d28bebd85813c0a70117214149a96b08981b72b63240b84c/astor-0.7.1.tar.gz",
"https://pypi.python.org/packages/99/80/f9482277c919d28bebd85813c0a70117214149a96b08981b72b63240b84c/astor-0.7.1.tar.gz",
],
)
tf_http_archive(
name = "astunparse_archive",
build_file = clean_dep("//third_party:astunparse.BUILD"),
sha256 = "5ad93a8456f0d084c3456d059fd9a92cce667963232cbf763eac3bc5b7940872",
strip_prefix = "astunparse-1.6.3/lib",
system_build_file = clean_dep("//third_party/systemlibs:astunparse.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/files.pythonhosted.org/packages/f3/af/4182184d3c338792894f34a62672919db7ca008c89abee9b564dd34d8029/astunparse-1.6.3.tar.gz",
"https://files.pythonhosted.org/packages/f3/af/4182184d3c338792894f34a62672919db7ca008c89abee9b564dd34d8029/astunparse-1.6.3.tar.gz",
],
)
filegroup_external(
name = "astunparse_license",
licenses = ["notice"], # PSFL
sha256_urls = {
"92fc0e4f4fa9460558eedf3412b988d433a2dcbb3a9c45402a145a4fab8a6ac6": [
"https://storage.googleapis.com/mirror.tensorflow.org/raw.githubusercontent.com/simonpercivall/astunparse/v1.6.2/LICENSE",
"https://raw.githubusercontent.com/simonpercivall/astunparse/v1.6.2/LICENSE",
],
},
)
tf_http_archive(
name = "functools32_archive",
build_file = clean_dep("//third_party:functools32.BUILD"),
sha256 = "f6253dfbe0538ad2e387bd8fdfd9293c925d63553f5813c4e587745416501e6d",
strip_prefix = "functools32-3.2.3-2",
system_build_file = clean_dep("//third_party/systemlibs:functools32.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/c5/60/6ac26ad05857c601308d8fb9e87fa36d0ebf889423f47c3502ef034365db/functools32-3.2.3-2.tar.gz",
"https://pypi.python.org/packages/c5/60/6ac26ad05857c601308d8fb9e87fa36d0ebf889423f47c3502ef034365db/functools32-3.2.3-2.tar.gz",
],
)
tf_http_archive(
name = "gast_archive",
build_file = clean_dep("//third_party:gast.BUILD"),
sha256 = "b881ef288a49aa81440d2c5eb8aeefd4c2bb8993d5f50edae7413a85bfdb3b57",
strip_prefix = "gast-0.3.3",
system_build_file = clean_dep("//third_party/systemlibs:gast.BUILD"),
urls = [
"http://mirror.tensorflow.org/files.pythonhosted.org/packages/12/59/eaa15ab9710a20e22225efd042cd2d6a0b559a0656d5baba9641a2a4a921/gast-0.3.3.tar.gz",
"https://files.pythonhosted.org/packages/12/59/eaa15ab9710a20e22225efd042cd2d6a0b559a0656d5baba9641a2a4a921/gast-0.3.3.tar.gz",
],
)
tf_http_archive(
name = "termcolor_archive",
build_file = clean_dep("//third_party:termcolor.BUILD"),
sha256 = "1d6d69ce66211143803fbc56652b41d73b4a400a2891d7bf7a1cdf4c02de613b",
strip_prefix = "termcolor-1.1.0",
system_build_file = clean_dep("//third_party/systemlibs:termcolor.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/8a/48/a76be51647d0eb9f10e2a4511bf3ffb8cc1e6b14e9e4fab46173aa79f981/termcolor-1.1.0.tar.gz",
"https://pypi.python.org/packages/8a/48/a76be51647d0eb9f10e2a4511bf3ffb8cc1e6b14e9e4fab46173aa79f981/termcolor-1.1.0.tar.gz",
],
)
tf_http_archive(
name = "typing_extensions_archive",
build_file = clean_dep("//third_party:typing_extensions.BUILD"),
sha256 = "79ee589a3caca649a9bfd2a8de4709837400dfa00b6cc81962a1e6a1815969ae",
strip_prefix = "typing_extensions-3.7.4.2/src_py3",
system_build_file = clean_dep("//third_party/systemlibs:typing_extensions.BUILD"),
urls = [
"http://mirror.tensorflow.org/files.pythonhosted.org/packages/6a/28/d32852f2af6b5ead85d396249d5bdf450833f3a69896d76eb480d9c5e406/typing_extensions-3.7.4.2.tar.gz",
"https://files.pythonhosted.org/packages/6a/28/d32852f2af6b5ead85d396249d5bdf450833f3a69896d76eb480d9c5e406/typing_extensions-3.7.4.2.tar.gz",
],
)
filegroup_external(
name = "typing_extensions_license",
licenses = ["notice"], # PSFL
sha256_urls = {
"ff17ce94e102024deb68773eb1cc74ca76da4e658f373531f0ac22d68a6bb1ad": [
"http://mirror.tensorflow.org/raw.githubusercontent.com/python/typing/master/typing_extensions/LICENSE",
"https://raw.githubusercontent.com/python/typing/master/typing_extensions/LICENSE",
],
},
)
tf_http_archive(
name = "opt_einsum_archive",
build_file = clean_dep("//third_party:opt_einsum.BUILD"),
sha256 = "d3d464b4da7ef09e444c30e4003a27def37f85ff10ff2671e5f7d7813adac35b",
strip_prefix = "opt_einsum-2.3.2",
system_build_file = clean_dep("//third_party/systemlibs:opt_einsum.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/f6/d6/44792ec668bcda7d91913c75237314e688f70415ab2acd7172c845f0b24f/opt_einsum-2.3.2.tar.gz",
"https://pypi.python.org/packages/f6/d6/44792ec668bcda7d91913c75237314e688f70415ab2acd7172c845f0b24f/opt_einsum-2.3.2.tar.gz",
],
)
tf_http_archive(
name = "absl_py",
sha256 = "603febc9b95a8f2979a7bdb77d2f5e4d9b30d4e0d59579f88eba67d4e4cc5462",
strip_prefix = "abseil-py-pypi-v0.9.0",
system_build_file = clean_dep("//third_party/systemlibs:absl_py.BUILD"),
system_link_files = {
"//third_party/systemlibs:absl_py.absl.BUILD": "absl/BUILD",
"//third_party/systemlibs:absl_py.absl.flags.BUILD": "absl/flags/BUILD",
"//third_party/systemlibs:absl_py.absl.testing.BUILD": "absl/testing/BUILD",
"//third_party/systemlibs:absl_py.absl.logging.BUILD": "absl/logging/BUILD",
},
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/abseil/abseil-py/archive/pypi-v0.9.0.tar.gz",
"https://github.com/abseil/abseil-py/archive/pypi-v0.9.0.tar.gz",
],
)
tf_http_archive(
name = "enum34_archive",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/bf/3e/31d502c25302814a7c2f1d3959d2a3b3f78e509002ba91aea64993936876/enum34-1.1.6.tar.gz",
"https://pypi.python.org/packages/bf/3e/31d502c25302814a7c2f1d3959d2a3b3f78e509002ba91aea64993936876/enum34-1.1.6.tar.gz",
],
sha256 = "8ad8c4783bf61ded74527bffb48ed9b54166685e4230386a9ed9b1279e2df5b1",
build_file = clean_dep("//third_party:enum34.BUILD"),
system_build_file = clean_dep("//third_party/systemlibs:enum34.BUILD"),
strip_prefix = "enum34-1.1.6/enum",
)
tf_http_archive(
name = "org_python_pypi_backports_weakref",
build_file = clean_dep("//third_party:backports_weakref.BUILD"),
sha256 = "8813bf712a66b3d8b85dc289e1104ed220f1878cf981e2fe756dfaabe9a82892",
strip_prefix = "backports.weakref-1.0rc1/src",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/bc/cc/3cdb0a02e7e96f6c70bd971bc8a90b8463fda83e264fa9c5c1c98ceabd81/backports.weakref-1.0rc1.tar.gz",
"https://pypi.python.org/packages/bc/cc/3cdb0a02e7e96f6c70bd971bc8a90b8463fda83e264fa9c5c1c98ceabd81/backports.weakref-1.0rc1.tar.gz",
],
)
tf_http_archive(
name = "dill_archive",
build_file = clean_dep("//third_party:dill.BUILD"),
urls = [
"http://mirror.tensorflow.org/files.pythonhosted.org/packages/c7/11/345f3173809cea7f1a193bfbf02403fff250a3360e0e118a1630985e547d/dill-0.3.1.1.tar.gz",
"https://files.pythonhosted.org/packages/c7/11/345f3173809cea7f1a193bfbf02403fff250a3360e0e118a1630985e547d/dill-0.3.1.1.tar.gz",
],
sha256 = "42d8ef819367516592a825746a18073ced42ca169ab1f5f4044134703e7a049c",
strip_prefix = "dill-0.3.1.1",
)
tf_http_archive(
name = "tblib_archive",
build_file = clean_dep("//third_party:tblib.BUILD"),
urls = [
"http://mirror.tensorflow.org/files.pythonhosted.org/packages/ec/c4/8c651f3240a73c28a218194f3d527eb2be5a173d08501060cdee84ade33f/tblib-1.3.2.tar.gz",
"https://files.pythonhosted.org/packages/ec/c4/8c651f3240a73c28a218194f3d527eb2be5a173d08501060cdee84ade33f/tblib-1.3.2.tar.gz",
],
sha256 = "436e4200e63d92316551179dc540906652878df4ff39b43db30fcf6400444fe7",
strip_prefix = "tblib-1.3.2",
)
filegroup_external(
name = "org_python_license",
licenses = ["notice"], # Python 2.0
sha256_urls = {
"e76cacdf0bdd265ff074ccca03671c33126f597f39d0ed97bc3e5673d9170cf6": [
"https://storage.googleapis.com/mirror.tensorflow.org/docs.python.org/2.7/_sources/license.rst.txt",
"https://docs.python.org/2.7/_sources/license.rst.txt",
],
},
)
tf_http_archive(
name = "com_google_protobuf",
patch_file = clean_dep("//third_party/protobuf:protobuf.patch"),
sha256 = "cfcba2df10feec52a84208693937c17a4b5df7775e1635c1e3baffc487b24c9b",
strip_prefix = "protobuf-3.9.2",
system_build_file = clean_dep("//third_party/systemlibs:protobuf.BUILD"),
system_link_files = {
"//third_party/systemlibs:protobuf.bzl": "protobuf.bzl",
},
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/protocolbuffers/protobuf/archive/v3.9.2.zip",
"https://github.com/protocolbuffers/protobuf/archive/v3.9.2.zip",
],
)
tf_http_archive(
name = "nsync",
sha256 = "caf32e6b3d478b78cff6c2ba009c3400f8251f646804bcb65465666a9cea93c4",
strip_prefix = "nsync-1.22.0",
system_build_file = clean_dep("//third_party/systemlibs:nsync.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/nsync/archive/1.22.0.tar.gz",
"https://github.com/google/nsync/archive/1.22.0.tar.gz",
],
)
tf_http_archive(
name = "com_google_googletest",
sha256 = "ff7a82736e158c077e76188232eac77913a15dac0b22508c390ab3f88e6d6d86",
strip_prefix = "googletest-b6cd405286ed8635ece71c72f118e659f4ade3fb",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/googletest/archive/b6cd405286ed8635ece71c72f118e659f4ade3fb.zip",
"https://github.com/google/googletest/archive/b6cd405286ed8635ece71c72f118e659f4ade3fb.zip",
],
)
tf_http_archive(
name = "com_github_gflags_gflags",
sha256 = "ae27cdbcd6a2f935baa78e4f21f675649271634c092b1be01469440495609d0e",
strip_prefix = "gflags-2.2.1",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/gflags/gflags/archive/v2.2.1.tar.gz",
"https://github.com/gflags/gflags/archive/v2.2.1.tar.gz",
],
)
tf_http_archive(
name = "pcre",
build_file = clean_dep("//third_party:pcre.BUILD"),
sha256 = "69acbc2fbdefb955d42a4c606dfde800c2885711d2979e356c0636efde9ec3b5",
strip_prefix = "pcre-8.42",
system_build_file = clean_dep("//third_party/systemlibs:pcre.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/ftp.exim.org/pub/pcre/pcre-8.42.tar.gz",
"https://ftp.exim.org/pub/pcre/pcre-8.42.tar.gz",
],
)
tf_http_archive(
name = "curl",
build_file = clean_dep("//third_party:curl.BUILD"),
sha256 = "01ae0c123dee45b01bbaef94c0bc00ed2aec89cb2ee0fd598e0d302a6b5e0a98",
strip_prefix = "curl-7.69.1",
system_build_file = clean_dep("//third_party/systemlibs:curl.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/curl.haxx.se/download/curl-7.69.1.tar.gz",
"https://curl.haxx.se/download/curl-7.69.1.tar.gz",
],
)
# WARNING: make sure ncteisen@ and vpai@ are cc-ed on any CL to change the below rule
tf_http_archive(
name = "com_github_grpc_grpc",
sha256 = "b956598d8cbe168b5ee717b5dafa56563eb5201a947856a6688bbeac9cac4e1f",
strip_prefix = "grpc-b54a5b338637f92bfcf4b0bc05e0f57a5fd8fadd",
system_build_file = clean_dep("//third_party/systemlibs:grpc.BUILD"),
patch_file = clean_dep("//third_party/grpc:generate_cc_env_fix.patch"),
system_link_files = {
"//third_party/systemlibs:BUILD": "bazel/BUILD",
"//third_party/systemlibs:grpc.BUILD": "src/compiler/BUILD",
"//third_party/systemlibs:grpc.bazel.grpc_deps.bzl": "bazel/grpc_deps.bzl",
},
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/grpc/grpc/archive/b54a5b338637f92bfcf4b0bc05e0f57a5fd8fadd.tar.gz",
"https://github.com/grpc/grpc/archive/b54a5b338637f92bfcf4b0bc05e0f57a5fd8fadd.tar.gz",
],
)
tf_http_archive(
name = "linenoise",
build_file = clean_dep("//third_party:linenoise.BUILD"),
sha256 = "7f51f45887a3d31b4ce4fa5965210a5e64637ceac12720cfce7954d6a2e812f7",
strip_prefix = "linenoise-c894b9e59f02203dbe4e2be657572cf88c4230c3",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/antirez/linenoise/archive/c894b9e59f02203dbe4e2be657572cf88c4230c3.tar.gz",
"https://github.com/antirez/linenoise/archive/c894b9e59f02203dbe4e2be657572cf88c4230c3.tar.gz",
],
)
# Check out LLVM and MLIR from llvm-project.
LLVM_COMMIT = "be7cef789e75a354831d528ecc76b325f0f5da68"
LLVM_SHA256 = "d1ca99c9a64d76e47d9f6068749f8f7f551cd6f1e5a6557a55b00d3d23b12f53"
LLVM_URLS = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
]
tf_http_archive(
name = "llvm-project",
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = LLVM_URLS,
additional_build_files = {
clean_dep("//third_party/llvm:llvm.autogenerated.BUILD"): "llvm/BUILD",
"//third_party/mlir:BUILD": "mlir/BUILD",
"//third_party/mlir:test.BUILD": "mlir/test/BUILD",
},
)
tf_http_archive(
name = "lmdb",
build_file = clean_dep("//third_party:lmdb.BUILD"),
sha256 = "f3927859882eb608868c8c31586bb7eb84562a40a6bf5cc3e13b6b564641ea28",
strip_prefix = "lmdb-LMDB_0.9.22/libraries/liblmdb",
system_build_file = clean_dep("//third_party/systemlibs:lmdb.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/LMDB/lmdb/archive/LMDB_0.9.22.tar.gz",
"https://github.com/LMDB/lmdb/archive/LMDB_0.9.22.tar.gz",
],
)
tf_http_archive(
name = "jsoncpp_git",
build_file = clean_dep("//third_party:jsoncpp.BUILD"),
sha256 = "77a402fb577b2e0e5d0bdc1cf9c65278915cdb25171e3452c68b6da8a561f8f0",
strip_prefix = "jsoncpp-1.9.2",
system_build_file = clean_dep("//third_party/systemlibs:jsoncpp.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/open-source-parsers/jsoncpp/archive/1.9.2.tar.gz",
"https://github.com/open-source-parsers/jsoncpp/archive/1.9.2.tar.gz",
],
)
tf_http_archive(
name = "boringssl",
sha256 = "a9c3b03657d507975a32732f04563132b4553c20747cec6dc04de475c8bdf29f",
strip_prefix = "boringssl-80ca9f9f6ece29ab132cce4cf807a9465a18cfac",
system_build_file = clean_dep("//third_party/systemlibs:boringssl.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/boringssl/archive/80ca9f9f6ece29ab132cce4cf807a9465a18cfac.tar.gz",
"https://github.com/google/boringssl/archive/80ca9f9f6ece29ab132cce4cf807a9465a18cfac.tar.gz",
],
)
tf_http_archive(
name = "zlib",
build_file = clean_dep("//third_party:zlib.BUILD"),
sha256 = "c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1",
strip_prefix = "zlib-1.2.11",
system_build_file = clean_dep("//third_party/systemlibs:zlib.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/zlib.net/zlib-1.2.11.tar.gz",
"https://zlib.net/zlib-1.2.11.tar.gz",
],
)
tf_http_archive(
name = "fft2d",
build_file = clean_dep("//third_party/fft2d:fft2d.BUILD"),
sha256 = "5f4dabc2ae21e1f537425d58a49cdca1c49ea11db0d6271e2a4b27e9697548eb",
strip_prefix = "OouraFFT-1.0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/petewarden/OouraFFT/archive/v1.0.tar.gz",
"https://github.com/petewarden/OouraFFT/archive/v1.0.tar.gz",
],
)
tf_http_archive(
name = "snappy",
build_file = clean_dep("//third_party:snappy.BUILD"),
sha256 = "16b677f07832a612b0836178db7f374e414f94657c138e6993cbfc5dcc58651f",
strip_prefix = "snappy-1.1.8",
system_build_file = clean_dep("//third_party/systemlibs:snappy.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/snappy/archive/1.1.8.tar.gz",
"https://github.com/google/snappy/archive/1.1.8.tar.gz",
],
)
tf_http_archive(
name = "nccl_archive",
build_file = clean_dep("//third_party:nccl/archive.BUILD"),
patch_file = clean_dep("//third_party/nccl:archive.patch"),
sha256 = "b8eaed1fb2d0cc2f951625dc4e17185bab9ff3ab188ba4d34a6e3a01ce9f0d57",
strip_prefix = "nccl-195232556936b39b01cc908296e1650b80d4a3e9",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/nvidia/nccl/archive/195232556936b39b01cc908296e1650b80d4a3e9.tar.gz",
"https://github.com/nvidia/nccl/archive/195232556936b39b01cc908296e1650b80d4a3e9.tar.gz",
],
)
java_import_external(
name = "junit",
jar_sha256 = "59721f0805e223d84b90677887d9ff567dc534d7c502ca903c0c2b17f05c116a",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/junit/junit/4.12/junit-4.12.jar",
"https://repo1.maven.org/maven2/junit/junit/4.12/junit-4.12.jar",
"https://maven.ibiblio.org/maven2/junit/junit/4.12/junit-4.12.jar",
],
licenses = ["reciprocal"], # Common Public License Version 1.0
testonly_ = True,
deps = ["@org_hamcrest_core"],
)
java_import_external(
name = "org_hamcrest_core",
jar_sha256 = "66fdef91e9739348df7a096aa384a5685f4e875584cce89386a7a47251c4d8e9",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
"https://repo1.maven.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
"https://maven.ibiblio.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
],
licenses = ["notice"], # New BSD License
testonly_ = True,
)
java_import_external(
name = "com_google_testing_compile",
jar_sha256 = "edc180fdcd9f740240da1a7a45673f46f59c5578d8cd3fbc912161f74b5aebb8",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/com/google/testing/compile/compile-testing/0.11/compile-testing-0.11.jar",
"https://repo1.maven.org/maven2/com/google/testing/compile/compile-testing/0.11/compile-testing-0.11.jar",
],
licenses = ["notice"], # New BSD License
testonly_ = True,
deps = ["@com_google_guava", "@com_google_truth"],
)
java_import_external(
name = "com_google_truth",
jar_sha256 = "032eddc69652b0a1f8d458f999b4a9534965c646b8b5de0eba48ee69407051df",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/com/google/truth/truth/0.32/truth-0.32.jar",
"https://repo1.maven.org/maven2/com/google/truth/truth/0.32/truth-0.32.jar",
],
licenses = ["notice"], # Apache 2.0
testonly_ = True,
deps = ["@com_google_guava"],
)
java_import_external(
name = "org_checkerframework_qual",
jar_sha256 = "d261fde25d590f6b69db7721d469ac1b0a19a17ccaaaa751c31f0d8b8260b894",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/org/checkerframework/checker-qual/2.10.0/checker-qual-2.10.0.jar",
"https://repo1.maven.org/maven2/org/checkerframework/checker-qual/2.10.0/checker-qual-2.10.0.jar",
],
licenses = ["notice"], # Apache 2.0
)
java_import_external(
name = "com_squareup_javapoet",
jar_sha256 = "5bb5abdfe4366c15c0da3332c57d484e238bd48260d6f9d6acf2b08fdde1efea",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/com/squareup/javapoet/1.9.0/javapoet-1.9.0.jar",
"https://repo1.maven.org/maven2/com/squareup/javapoet/1.9.0/javapoet-1.9.0.jar",
],
licenses = ["notice"], # Apache 2.0
)
tf_http_archive(
name = "com_google_pprof",
build_file = clean_dep("//third_party:pprof.BUILD"),
sha256 = "e0928ca4aa10ea1e0551e2d7ce4d1d7ea2d84b2abbdef082b0da84268791d0c4",
strip_prefix = "pprof-c0fb62ec88c411cc91194465e54db2632845b650",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/pprof/archive/c0fb62ec88c411cc91194465e54db2632845b650.tar.gz",
"https://github.com/google/pprof/archive/c0fb62ec88c411cc91194465e54db2632845b650.tar.gz",
],
)
# The CUDA 11 toolkit ships with CUB. We should be able to delete this rule
# once TF drops support for CUDA 10.
tf_http_archive(
name = "cub_archive",
build_file = clean_dep("//third_party:cub.BUILD"),
sha256 = "162514b3cc264ac89d91898b58450190b8192e2af1142cf8ccac2d59aa160dda",
strip_prefix = "cub-1.9.9",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/NVlabs/cub/archive/1.9.9.zip",
"https://github.com/NVlabs/cub/archive/1.9.9.zip",
],
)
tf_http_archive(
name = "cython",
build_file = clean_dep("//third_party:cython.BUILD"),
delete = ["BUILD.bazel"],
sha256 = "bccc9aa050ea02595b2440188813b936eaf345e85fb9692790cecfe095cf91aa",
strip_prefix = "cython-0.28.4",
system_build_file = clean_dep("//third_party/systemlibs:cython.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/cython/cython/archive/0.28.4.tar.gz",
"https://github.com/cython/cython/archive/0.28.4.tar.gz",
],
)
tf_http_archive(
name = "arm_neon_2_x86_sse",
build_file = clean_dep("//third_party:arm_neon_2_x86_sse.BUILD"),
sha256 = "213733991310b904b11b053ac224fee2d4e0179e46b52fe7f8735b8831e04dcc",
strip_prefix = "ARM_NEON_2_x86_SSE-1200fe90bb174a6224a525ee60148671a786a71f",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/intel/ARM_NEON_2_x86_SSE/archive/1200fe90bb174a6224a525ee60148671a786a71f.tar.gz",
"https://github.com/intel/ARM_NEON_2_x86_SSE/archive/1200fe90bb174a6224a525ee60148671a786a71f.tar.gz",
],
)
tf_http_archive(
name = "double_conversion",
build_file = clean_dep("//third_party:double_conversion.BUILD"),
sha256 = "2f7fbffac0d98d201ad0586f686034371a6d152ca67508ab611adc2386ad30de",
strip_prefix = "double-conversion-3992066a95b823efc8ccc1baf82a1cfc73f6e9b8",
system_build_file = clean_dep("//third_party/systemlibs:double_conversion.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/double-conversion/archive/3992066a95b823efc8ccc1baf82a1cfc73f6e9b8.zip",
"https://github.com/google/double-conversion/archive/3992066a95b823efc8ccc1baf82a1cfc73f6e9b8.zip",
],
)
tf_http_archive(
name = "tflite_mobilenet_float",
build_file = clean_dep("//third_party:tflite_mobilenet_float.BUILD"),
sha256 = "2fadeabb9968ec6833bee903900dda6e61b3947200535874ce2fe42a8493abc0",
urls = [
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz",
],
)
tf_http_archive(
name = "tflite_mobilenet_quant",
build_file = clean_dep("//third_party:tflite_mobilenet_quant.BUILD"),
sha256 = "d32432d28673a936b2d6281ab0600c71cf7226dfe4cdcef3012555f691744166",
urls = [
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
],
)
tf_http_archive(
name = "tflite_mobilenet_ssd",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "767057f2837a46d97882734b03428e8dd640b93236052b312b2f0e45613c1cf0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_ssd_tflite_v1.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_ssd_tflite_v1.zip",
],
)
tf_http_archive(
name = "tflite_mobilenet_ssd_quant",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "a809cd290b4d6a2e8a9d5dad076e0bd695b8091974e0eed1052b480b2f21b6dc",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_0.75_quant_2018_06_29.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_0.75_quant_2018_06_29.zip",
],
)
tf_http_archive(
name = "tflite_mobilenet_ssd_quant_protobuf",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "09280972c5777f1aa775ef67cb4ac5d5ed21970acd8535aeca62450ef14f0d79",
strip_prefix = "ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18.tar.gz",
"https://storage.googleapis.com/download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18.tar.gz",
],
)
tf_http_archive(
name = "tflite_conv_actions_frozen",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "d947b38cba389b5e2d0bfc3ea6cc49c784e187b41a071387b3742d1acac7691e",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/conv_actions_tflite.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/conv_actions_tflite.zip",
],
)
tf_http_archive(
name = "tflite_ovic_testdata",
build_file = clean_dep("//third_party:tflite_ovic_testdata.BUILD"),
sha256 = "033c941b7829b05ca55a124a26a6a0581b1ececc154a2153cafcfdb54f80dca2",
strip_prefix = "ovic",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/data/ovic_2019_04_30.zip",
"https://storage.googleapis.com/download.tensorflow.org/data/ovic_2019_04_30.zip",
],
)
tf_http_archive(
name = "rules_cc",
sha256 = "cf3b76a90c86c0554c5b10f4b160f05af71d252026b71362c4674e2fb9936cf9",
strip_prefix = "rules_cc-01d4a48911d5e7591ecb1c06d3b8af47fe872371",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_cc/archive/01d4a48911d5e7591ecb1c06d3b8af47fe872371.zip",
"https://github.com/bazelbuild/rules_cc/archive/01d4a48911d5e7591ecb1c06d3b8af47fe872371.zip",
],
)
tf_http_archive(
name = "rules_python",
sha256 = "aa96a691d3a8177f3215b14b0edc9641787abaaa30363a080165d06ab65e1161",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_python/releases/download/0.0.1/rules_python-0.0.1.tar.gz",
"https://github.com/bazelbuild/rules_python/releases/download/0.0.1/rules_python-0.0.1.tar.gz",
],
)
tf_http_archive(
name = "build_bazel_rules_android",
sha256 = "cd06d15dd8bb59926e4d65f9003bfc20f9da4b2519985c27e190cddc8b7a7806",
strip_prefix = "rules_android-0.1.1",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_android/archive/v0.1.1.zip",
"https://github.com/bazelbuild/rules_android/archive/v0.1.1.zip",
],
)
# Apple and Swift rules.
# https://github.com/bazelbuild/rules_apple/releases
tf_http_archive(
name = "build_bazel_rules_apple",
sha256 = "ee9e6073aeb5a65c100cb9c44b0017c937706a4ae03176e14a7e78620a198079",
strip_prefix = "rules_apple-5131f3d46794bf227d296c82f30c2499c9de3c5b",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_apple/archive/5131f3d46794bf227d296c82f30c2499c9de3c5b.tar.gz",
"https://github.com/bazelbuild/rules_apple/archive/5131f3d46794bf227d296c82f30c2499c9de3c5b.tar.gz",
],
)
# https://github.com/bazelbuild/rules_swift/releases
tf_http_archive(
name = "build_bazel_rules_swift",
sha256 = "d0833bc6dad817a367936a5f902a0c11318160b5e80a20ece35fb85a5675c886",
strip_prefix = "rules_swift-3eeeb53cebda55b349d64c9fc144e18c5f7c0eb8",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_swift/archive/3eeeb53cebda55b349d64c9fc144e18c5f7c0eb8.tar.gz",
"https://github.com/bazelbuild/rules_swift/archive/3eeeb53cebda55b349d64c9fc144e18c5f7c0eb8.tar.gz",
],
)
# https://github.com/bazelbuild/apple_support/releases
tf_http_archive(
name = "build_bazel_apple_support",
sha256 = "ad8ae80e93612b8151019367a3d1604d7a51c14480dae1254e10252007e8260c",
strip_prefix = "apple_support-501b4afb27745c4813a88ffa28acd901408014e4",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/apple_support/archive/501b4afb27745c4813a88ffa28acd901408014e4.tar.gz",
"https://github.com/bazelbuild/apple_support/archive/501b4afb27745c4813a88ffa28acd901408014e4.tar.gz",
],
)
# https://github.com/bazelbuild/bazel-skylib/releases
tf_http_archive(
name = "bazel_skylib",
sha256 = "1dde365491125a3db70731e25658dfdd3bc5dbdfd11b840b3e987ecf043c7ca0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/bazel-skylib/releases/download/0.9.0/bazel_skylib-0.9.0.tar.gz",
"https://github.com/bazelbuild/bazel-skylib/releases/download/0.9.0/bazel_skylib-0.9.0.tar.gz",
],
)
# https://github.com/apple/swift-protobuf/releases
tf_http_archive(
name = "com_github_apple_swift_swift_protobuf",
strip_prefix = "swift-protobuf-1.6.0/",
sha256 = "4ccf6e5ea558e8287bf6331f9f6e52b3c321fca5f1d181d03680f415c32a6bba",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/apple/swift-protobuf/archive/1.6.0.zip",
"https://github.com/apple/swift-protobuf/archive/1.6.0.zip",
],
)
# https://github.com/google/xctestrunner/releases
http_file(
name = "xctestrunner",
executable = 1,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/xctestrunner/releases/download/0.2.9/ios_test_runner.par",
"https://github.com/google/xctestrunner/releases/download/0.2.9/ios_test_runner.par",
],
)
tf_http_archive(
name = "tbb",
build_file = clean_dep("//third_party/ngraph:tbb.BUILD"),
sha256 = "c3245012296f09f1418b78a8c2f17df5188b3bd0db620f7fd5fabe363320805a",
strip_prefix = "tbb-2019_U1",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/01org/tbb/archive/2019_U1.zip",
"https://github.com/01org/tbb/archive/2019_U1.zip",
],
)
tf_http_archive(
name = "ngraph",
build_file = clean_dep("//third_party/ngraph:ngraph.BUILD"),
sha256 = "a1780f24a1381fc25e323b4b2d08b6ef5129f42e011305b2a34dcf43a48030d5",
strip_prefix = "ngraph-0.11.0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/NervanaSystems/ngraph/archive/v0.11.0.tar.gz",
"https://github.com/NervanaSystems/ngraph/archive/v0.11.0.tar.gz",
],
)
tf_http_archive(
name = "nlohmann_json_lib",
build_file = clean_dep("//third_party/ngraph:nlohmann_json.BUILD"),
sha256 = "c377963a95989270c943d522bfefe7b889ef5ed0e1e15d535fd6f6f16ed70732",
strip_prefix = "json-3.4.0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/nlohmann/json/archive/v3.4.0.tar.gz",
"https://github.com/nlohmann/json/archive/v3.4.0.tar.gz",
],
)
tf_http_archive(
name = "ngraph_tf",
build_file = clean_dep("//third_party/ngraph:ngraph_tf.BUILD"),
sha256 = "742a642d2c6622277df4c902b6830d616d0539cc8cd843d6cdb899bb99e66e36",
strip_prefix = "ngraph-tf-0.9.0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/NervanaSystems/ngraph-tf/archive/v0.9.0.zip",
"https://github.com/NervanaSystems/ngraph-tf/archive/v0.9.0.zip",
],
)
tf_http_archive(
name = "pybind11",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/pybind/pybind11/archive/v2.4.3.tar.gz",
"https://github.com/pybind/pybind11/archive/v2.4.3.tar.gz",
],
sha256 = "1eed57bc6863190e35637290f97a20c81cfe4d9090ac0a24f3bbf08f265eb71d",
strip_prefix = "pybind11-2.4.3",
build_file = clean_dep("//third_party:pybind11.BUILD"),
system_build_file = clean_dep("//third_party/systemlibs:pybind11.BUILD"),
)
tf_http_archive(
name = "wrapt",
build_file = clean_dep("//third_party:wrapt.BUILD"),
sha256 = "8a6fb40e8f8b6a66b4ba81a4044c68e6a7b1782f21cfabc06fb765332b4c3e51",
strip_prefix = "wrapt-1.11.1/src/wrapt",
system_build_file = clean_dep("//third_party/systemlibs:wrapt.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/GrahamDumpleton/wrapt/archive/1.11.1.tar.gz",
"https://github.com/GrahamDumpleton/wrapt/archive/1.11.1.tar.gz",
],
)
tf_http_archive(
name = "coremltools",
sha256 = "0d594a714e8a5fd5bd740ad112ef59155c0482e25fdc8f8efa5758f90abdcf1e",
strip_prefix = "coremltools-3.3",
build_file = clean_dep("//third_party:coremltools.BUILD"),
urls = [
"http://mirror.tensorflow.org/github.com/apple/coremltools/archive/3.3.zip",
"https://github.com/apple/coremltools/archive/3.3.zip",
],
)
def tf_bind():
"""Bind targets for some external repositories"""
##############################################################################
# BIND DEFINITIONS
#
# Please do not add bind() definitions unless we have no other choice.
# If that ends up being the case, please leave a comment explaining
# why we can't depend on the canonical build target.
# Needed by Protobuf
native.bind(
name = "grpc_cpp_plugin",
actual = "@com_github_grpc_grpc//src/compiler:grpc_cpp_plugin",
)
native.bind(
name = "grpc_python_plugin",
actual = "@com_github_grpc_grpc//src/compiler:grpc_python_plugin",
)
native.bind(
name = "grpc_lib",
actual = "@com_github_grpc_grpc//:grpc++",
)
native.bind(
name = "grpc_lib_unsecure",
actual = "@com_github_grpc_grpc//:grpc++_unsecure",
)
# Needed by Protobuf
native.bind(
name = "python_headers",
actual = clean_dep("//third_party/python_runtime:headers"),
)
# Needed by Protobuf
native.bind(
name = "six",
actual = "@six_archive//:six",
)
| 48.809756
| 203
| 0.688937
|
1d44359e1302ad145ce6a95b5c28195425abaaca
| 3,241
|
py
|
Python
|
NeuralNets/backpropagation.py
|
lucassid/nn
|
a53213a3670088772212f5108cd36c9e8e5fb7b7
|
[
"MIT"
] | 2
|
2018-05-05T16:45:18.000Z
|
2018-06-08T13:40:53.000Z
|
NeuralNets/backpropagation.py
|
lucassid/nn
|
a53213a3670088772212f5108cd36c9e8e5fb7b7
|
[
"MIT"
] | 1
|
2018-05-08T22:47:23.000Z
|
2018-05-08T23:17:44.000Z
|
NeuralNets/backpropagation.py
|
lucassid/nn
|
a53213a3670088772212f5108cd36c9e8e5fb7b7
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib as mpl
#mpl.use("agg")
import matplotlib.style as style
import matplotlib.pyplot as plt
sigmoid = lambda z:np.tanh(z)
Dsigmoid = lambda z: (1/ np.cosh(z)) ** 2
gaussian = lambda x: np.exp(-1 * x**(2)) * 2 - 1
test = lambda x: 0.2 + 0.4 * x**2 + 0.3 * np.sin(15 * x) + 0.05 * np.cos(50 * x)
def erro(y, d):
error = 0
for o, t in zip(y, d):
for a, b in zip(o, t):
error = error + (a - b)**2
return error / 2
x = []
d = []
linspace = np.arange(0, 10, 0.1)
#x = [[0,0], [0,1],[1,0],[1,1]]
#d = [[0], [1], [1], [0]]
for i in linspace:
x.append([i])
d.append([np.sin(i)])
P = 12
M = len(x[0])
C = len(d[0])
w1 = np.random.rand(P, M + 1)
w2 = np.random.rand(C, P + 1)
nEpocas = 500
mi = 0.1
Erro = []
for n in range(nEpocas):
response = []
for N in range(len(x)):
h = []
y = []
e = []
delta2 = []
delta1 = []
#forward pass
#para cada neurônio na camada oculta
for i in range(len(w1)):
s = 0
#para cada peso do neurônio
for k in range(len(w1[0]) - 1):
s = s + w1[i][k] * x[N][k]
s = s + w1[i][k+1]
h.append(sigmoid(s))
#para cada neurônio na camada de saída
for c in range(len(w2)):
r = 0
#para cada peso do neurônio
for l in range(len(w2[0]) - 1):
r = r + w2[c][l] * h[l]
r = r + w2[c][l+1]
y.append(sigmoid(r))
e.append(y[c] - d[N][c])
response.append(y)
#cálculo dos deltas
for c in range(len(w2)):
delta2.append(e[c] * Dsigmoid(h[i]))
for i in range(len(w1)):
soma = 0
for c in range(len(w2)):
soma = soma + delta2[c] * w2[c][i]
delta1.append(Dsigmoid(h[i])* soma)
for c in range(len(w2)):
for i in range(len(w1)):
w2[c][i] = w2[c][i] - mi * h[i] * delta2[c]
w2[c][i+1] = w2[c][i+1] - mi * delta2[c]
for c in range(len(w1)):
for i in range(len(x[0])):
w1[c][i] = w1[c][i] - mi * h[i] * delta1[c]
w1[c][i+1] = w1[c][i+1] - mi * delta1[c]
Erro.append(erro(response,d))
print (Erro[-1])
mpl.style.use("ggplot")
plt.figure()
plt.xlabel("Epoch")
plt.ylabel("MSE")
plt.grid(True, color="#9467bd")
plt.plot( np.arange(0, nEpocas, nEpocas / len(Erro)), Erro)
plt.savefig("mse.png")
plt.figure()
plt.xlabel("Entrada")
plt.ylabel("Saída")
plt.grid(True, color="#9467bd")
plt.plot(linspace,[i[0] for i in response], "--", label="Valor Previsto", linewidth=2)
plt.plot(linspace, np.sin(linspace), "-.", label="Valor Alvo", linewidth=2)
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc='center',
ncol=2, borderaxespad=0)
plt.show()
'''
plt.figure()
plt.xlabel("Entrada")
plt.ylabel("Saída")
plt.grid(True, color="#9467bd")
plt.plot(linspace,[i[1] for i in response[::2]], "--", label="Valor Previsto", linewidth=2)
plt.plot(linspace, sigmoid(linspace), "-.", label="Valor Alvo", linewidth=2)
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc='center',
ncol=2, borderaxespad=0)
'''
| 26.349593
| 91
| 0.511262
|
9339567dbee502bab6ef8a06457daa04ba8fc8fd
| 1,195
|
py
|
Python
|
scripts/buffer_core_demo.py
|
lucasw/tf_demo
|
a9f333f14ec3e866c9a23aee46593b87efaa90ab
|
[
"BSD-3-Clause"
] | 2
|
2018-02-27T18:16:39.000Z
|
2020-07-19T10:41:09.000Z
|
scripts/buffer_core_demo.py
|
lucasw/tf_demo
|
a9f333f14ec3e866c9a23aee46593b87efaa90ab
|
[
"BSD-3-Clause"
] | 2
|
2018-02-18T05:02:08.000Z
|
2018-02-18T21:16:17.000Z
|
scripts/buffer_core_demo.py
|
lucasw/tf_demo
|
a9f333f14ec3e866c9a23aee46593b87efaa90ab
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import rospy
import tf2_ros
from geometry_msgs.msg import TransformStamped
buffer_core = tf2_ros.BufferCore(rospy.Duration(10.0))
ts1 = TransformStamped()
ts1.header.stamp = rospy.Time(0)
ts1.header.frame_id = 'map'
ts1.child_frame_id = 'frame1'
ts1.transform.translation.x = 2.71828183
ts1.transform.rotation.w = 1.0
# TODO(lucasw) does the authority matter at all? Could it be set to anything?
buffer_core.set_transform(ts1, "default_authority")
# print(dir(buffer_core))
# why no lookup_transform(
a = buffer_core.lookup_transform_core('map', 'frame1', rospy.Time(0))
print(a)
# ((2.71828183, 0.0, 0.0), (0.0, 0.0, 0.0, 1.0))
b = buffer_core.lookup_transform_core('frame1', 'map', rospy.Time(0))
print(b)
# ((-2.71828183, 0.0, 0.0), (0.0, 0.0, 0.0, 1.0))
ts2 = TransformStamped()
ts2.header.stamp = rospy.Time(0)
ts2.header.frame_id = 'frame1'
ts2.child_frame_id = 'frame2'
ts2.transform.translation.x = 0
ts2.transform.translation.y = 0.5
# TODO(lucasw) example rotation using transform3d/transformations.py
ts2.transform.rotation.w = 1.0
buffer_core.set_transform(ts2, "default_authority")
print(buffer_core.lookup_transform_core('map', 'frame2', rospy.Time(0)))
| 33.194444
| 78
| 0.743933
|
0b1ff24c56bf398caf8b5a1d05922600ecdacd1b
| 634
|
py
|
Python
|
julie/mathemathics/equations.py
|
MarcelloBB/julieutils
|
cffba53a1561d05660c2274ce0a9485bf9e0ddcf
|
[
"MIT"
] | 2
|
2021-08-23T15:16:43.000Z
|
2021-11-01T15:29:02.000Z
|
julie/mathemathics/equations.py
|
MarcelloBB/julieutils
|
cffba53a1561d05660c2274ce0a9485bf9e0ddcf
|
[
"MIT"
] | null | null | null |
julie/mathemathics/equations.py
|
MarcelloBB/julieutils
|
cffba53a1561d05660c2274ce0a9485bf9e0ddcf
|
[
"MIT"
] | null | null | null |
import math
class Equation:
def __init__(self, a : float, b : float, c : float) -> None:
self.a, self.b, self.c, self.delta = a, b, c, (b**2 - 4 * a * c);
def solve(self) -> float:
# Case 1: [Delta > 0]
if self.delta > 0:
x1 = ((-self.b + math.sqrt(self.delta)) / (2 * self.a));
x2 = ((-self.b - math.sqrt(self.delta)) / (2 * self.a));
return x1, x2
# Case 2: [Delta = 0]
elif self.delta == 0:
x = ((-self.b) / (2 * self.a));
return x
# Case 3: [Delta < 0]
elif self.delta < 0:
return None
| 24.384615
| 73
| 0.443218
|
a87d59679d946d87fecbf7a5e7ff91ed8398d509
| 795
|
py
|
Python
|
sensors/pi_config.py
|
TerryHowe/coopernetes
|
32b4c3926bde9deb2fa98c1595cf54afbcfa0387
|
[
"MIT"
] | null | null | null |
sensors/pi_config.py
|
TerryHowe/coopernetes
|
32b4c3926bde9deb2fa98c1595cf54afbcfa0387
|
[
"MIT"
] | null | null | null |
sensors/pi_config.py
|
TerryHowe/coopernetes
|
32b4c3926bde9deb2fa98c1595cf54afbcfa0387
|
[
"MIT"
] | null | null | null |
import os
import yaml
class PiConfig(object):
def __init__(self):
try:
config_file = './config.yaml'
sample_config_file = './sample_config.yaml'
if (not os.path.exists(config_file) and
os.path.exists(sample_config_file)):
config_file = sample_config_file
print('Warning: Using {}'.format(config_file))
with open(config_file, 'r') as stream:
try:
self.config = yaml.safe_load(stream)
return
except yaml.YAMLError as exc:
print(exc)
except FileNotFoundError as exc:
print(exc)
self.config = {}
def get_sensors(self):
return self.config.get('sensors', [])
| 31.8
| 62
| 0.532075
|
1539730e23bf40a2622e342ea929013b0fd25c80
| 1,858
|
py
|
Python
|
h2o-py/tests/testdir_munging/binop/pyunit_binop2_gt.py
|
ahmedengu/h2o-3
|
ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11
|
[
"Apache-2.0"
] | 6,098
|
2015-05-22T02:46:12.000Z
|
2022-03-31T16:54:51.000Z
|
h2o-py/tests/testdir_munging/binop/pyunit_binop2_gt.py
|
ahmedengu/h2o-3
|
ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11
|
[
"Apache-2.0"
] | 2,517
|
2015-05-23T02:10:54.000Z
|
2022-03-30T17:03:39.000Z
|
h2o-py/tests/testdir_munging/binop/pyunit_binop2_gt.py
|
ahmedengu/h2o-3
|
ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11
|
[
"Apache-2.0"
] | 2,199
|
2015-05-22T04:09:55.000Z
|
2022-03-28T22:20:45.000Z
|
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
def binop_gt():
iris = h2o.import_file(path=pyunit_utils.locate("smalldata/iris/iris_wheader.csv"))
rows, cols = iris.dim
iris.show()
#frame/scaler
res = iris > 5
res_rows, res_cols = res.dim
assert res_rows == rows and res_cols == cols, "dimension mismatch"
new_rows = iris[res[0]].nrow
assert new_rows == 118, "wrong number of rows returned"
res = 5 < iris
res_rows, res_cols = res.dim
assert res_rows == rows and res_cols == cols, "dimension mismatch"
new_rows = iris[res[0]].nrow
assert new_rows == 118, "wrong number of rows returned"
#frame/vec
#try:
# res = iris > iris[0]
# res.show()
# assert False, "expected error. objects of different dimensions not supported."
#except EnvironmentError:
# pass
#try:
# res = iris[2] > iris
# res.show()
# assert False, "expected error. objects of different dimensions not supported."
#except EnvironmentError:
# pass
#vec/vec
res = iris[0] > iris[1]
res_rows = res.nrow
assert res_rows == rows, "dimension mismatch"
new_rows = iris[res].nrow
assert new_rows == 150, "wrong number of rows returned"
# frame/frame
res = iris > iris
res_rows, res_cols = res.dim
assert res_rows == rows and res_cols == cols, "dimension mismatch"
res = iris[0:2] > iris[1:3]
res_rows, res_cols = res.dim
assert res_rows == rows and res_cols == 2, "dimension mismatch"
#try:
# res = iris > iris[0:3]
# res.show()
# assert False, "expected error. frames are different dimensions."
#except EnvironmentError:
# pass
if __name__ == "__main__":
pyunit_utils.standalone_test(binop_gt)
else:
binop_gt()
| 25.108108
| 87
| 0.624865
|
a80352d71a6be4fd2a790fa10569c8a689d6ee20
| 37,884
|
py
|
Python
|
aiogram/types/inline_query_result.py
|
Nebulino/aiogram
|
30a1fdc940338adb12fea4494758750174e79223
|
[
"MIT"
] | 3
|
2020-12-06T16:55:53.000Z
|
2021-11-19T19:25:57.000Z
|
aiogram/types/inline_query_result.py
|
Nebulino/aiogram
|
30a1fdc940338adb12fea4494758750174e79223
|
[
"MIT"
] | 1
|
2019-10-18T19:33:20.000Z
|
2019-10-18T19:33:20.000Z
|
aiogram/types/inline_query_result.py
|
Nebulino/aiogram
|
30a1fdc940338adb12fea4494758750174e79223
|
[
"MIT"
] | 2
|
2020-12-30T09:51:30.000Z
|
2021-11-10T16:50:28.000Z
|
import typing
from . import base
from . import fields
from .inline_keyboard import InlineKeyboardMarkup
from .input_message_content import InputMessageContent
class InlineQueryResult(base.TelegramObject):
"""
This object represents one result of an inline query.
Telegram clients currently support results of the following 20 types
https://core.telegram.org/bots/api#inlinequeryresult
"""
id: base.String = fields.Field()
reply_markup: InlineKeyboardMarkup = fields.Field(base=InlineKeyboardMarkup)
def safe_get_parse_mode(self):
try:
return self.bot.parse_mode
except RuntimeError:
pass
def __init__(self, **kwargs):
if 'parse_mode' in kwargs and kwargs['parse_mode'] is None:
kwargs['parse_mode'] = self.safe_get_parse_mode()
super(InlineQueryResult, self).__init__(**kwargs)
class InlineQueryResultArticle(InlineQueryResult):
"""
Represents a link to an article or web page.
https://core.telegram.org/bots/api#inlinequeryresultarticle
"""
type: base.String = fields.Field(alias='type', default='article')
title: base.String = fields.Field()
input_message_content: InputMessageContent = fields.Field(base=InputMessageContent)
url: base.String = fields.Field()
hide_url: base.Boolean = fields.Field()
description: base.String = fields.Field()
thumb_url: base.String = fields.Field()
thumb_width: base.Integer = fields.Field()
thumb_height: base.Integer = fields.Field()
def __init__(self, *,
id: base.String,
title: base.String,
input_message_content: InputMessageContent,
reply_markup: typing.Optional[InlineKeyboardMarkup] = None,
url: typing.Optional[base.String] = None,
hide_url: typing.Optional[base.Boolean] = None,
description: typing.Optional[base.String] = None,
thumb_url: typing.Optional[base.String] = None,
thumb_width: typing.Optional[base.Integer] = None,
thumb_height: typing.Optional[base.Integer] = None):
super(InlineQueryResultArticle, self).__init__(id=id, title=title,
input_message_content=input_message_content,
reply_markup=reply_markup, url=url, hide_url=hide_url,
description=description, thumb_url=thumb_url,
thumb_width=thumb_width, thumb_height=thumb_height)
class InlineQueryResultPhoto(InlineQueryResult):
"""
Represents a link to a photo.
By default, this photo will be sent by the user with optional caption.
Alternatively, you can use input_message_content to send a message with the specified content
instead of the photo.
https://core.telegram.org/bots/api#inlinequeryresultphoto
"""
type: base.String = fields.Field(alias='type', default='photo')
photo_url: base.String = fields.Field()
thumb_url: base.String = fields.Field()
photo_width: base.Integer = fields.Field()
photo_height: base.Integer = fields.Field()
title: base.String = fields.Field()
description: base.String = fields.Field()
caption: base.String = fields.Field()
input_message_content: InputMessageContent = fields.Field(base=InputMessageContent)
def __init__(self, *,
id: base.String,
photo_url: base.String,
thumb_url: base.String,
photo_width: typing.Optional[base.Integer] = None,
photo_height: typing.Optional[base.Integer] = None,
title: typing.Optional[base.String] = None,
description: typing.Optional[base.String] = None,
caption: typing.Optional[base.String] = None,
reply_markup: typing.Optional[InlineKeyboardMarkup] = None,
input_message_content: typing.Optional[InputMessageContent] = None):
super(InlineQueryResultPhoto, self).__init__(id=id, photo_url=photo_url, thumb_url=thumb_url,
photo_width=photo_width, photo_height=photo_height, title=title,
description=description, caption=caption,
reply_markup=reply_markup,
input_message_content=input_message_content)
class InlineQueryResultGif(InlineQueryResult):
"""
Represents a link to an animated GIF file.
By default, this animated GIF file will be sent by the user with optional caption.
Alternatively, you can use input_message_content to send a message with the specified content
instead of the animation.
https://core.telegram.org/bots/api#inlinequeryresultgif
"""
type: base.String = fields.Field(alias='type', default='gif')
gif_url: base.String = fields.Field()
gif_width: base.Integer = fields.Field()
gif_height: base.Integer = fields.Field()
gif_duration: base.Integer = fields.Field()
thumb_url: base.String = fields.Field()
title: base.String = fields.Field()
caption: base.String = fields.Field()
input_message_content: InputMessageContent = fields.Field(base=InputMessageContent)
def __init__(self, *,
id: base.String,
gif_url: base.String,
gif_width: typing.Optional[base.Integer] = None,
gif_height: typing.Optional[base.Integer] = None,
gif_duration: typing.Optional[base.Integer] = None,
thumb_url: typing.Optional[base.String] = None,
title: typing.Optional[base.String] = None,
caption: typing.Optional[base.String] = None,
parse_mode: typing.Optional[base.String] = None,
reply_markup: typing.Optional[InlineKeyboardMarkup] = None,
input_message_content: typing.Optional[InputMessageContent] = None):
super(InlineQueryResultGif, self).__init__(id=id, gif_url=gif_url, gif_width=gif_width,
gif_height=gif_height, gif_duration=gif_duration,
thumb_url=thumb_url, title=title, caption=caption,
parse_mode=parse_mode, reply_markup=reply_markup,
input_message_content=input_message_content)
class InlineQueryResultMpeg4Gif(InlineQueryResult):
"""
Represents a link to a video animation (H.264/MPEG-4 AVC video without sound).
By default, this animated MPEG-4 file will be sent by the user with optional caption.
Alternatively, you can use input_message_content to send a message with the specified content
instead of the animation.
https://core.telegram.org/bots/api#inlinequeryresultmpeg4gif
"""
type: base.String = fields.Field(alias='type', default='mpeg4_gif')
mpeg4_url: base.String = fields.Field()
mpeg4_width: base.Integer = fields.Field()
mpeg4_height: base.Integer = fields.Field()
mpeg4_duration: base.Integer = fields.Field()
thumb_url: base.String = fields.Field()
title: base.String = fields.Field()
caption: base.String = fields.Field()
input_message_content: InputMessageContent = fields.Field(base=InputMessageContent)
def __init__(self, *,
id: base.String,
mpeg4_url: base.String,
thumb_url: base.String,
mpeg4_width: typing.Optional[base.Integer] = None,
mpeg4_height: typing.Optional[base.Integer] = None,
mpeg4_duration: typing.Optional[base.Integer] = None,
title: typing.Optional[base.String] = None,
caption: typing.Optional[base.String] = None,
parse_mode: typing.Optional[base.String] = None,
reply_markup: typing.Optional[InlineKeyboardMarkup] = None,
input_message_content: typing.Optional[InputMessageContent] = None):
super(InlineQueryResultMpeg4Gif, self).__init__(id=id, mpeg4_url=mpeg4_url, mpeg4_width=mpeg4_width,
mpeg4_height=mpeg4_height, mpeg4_duration=mpeg4_duration,
thumb_url=thumb_url, title=title, caption=caption,
parse_mode=parse_mode, reply_markup=reply_markup,
input_message_content=input_message_content)
class InlineQueryResultVideo(InlineQueryResult):
"""
Represents a link to a page containing an embedded video player or a video file.
By default, this video file will be sent by the user with an optional caption.
Alternatively, you can use input_message_content to send a message with the specified content
instead of the video.
If an InlineQueryResultVideo message contains an embedded video (e.g., YouTube),
you must replace its content using input_message_content.
https://core.telegram.org/bots/api#inlinequeryresultvideo
"""
type: base.String = fields.Field(alias='type', default='video')
video_url: base.String = fields.Field()
mime_type: base.String = fields.Field()
thumb_url: base.String = fields.Field()
title: base.String = fields.Field()
caption: base.String = fields.Field()
video_width: base.Integer = fields.Field()
video_height: base.Integer = fields.Field()
video_duration: base.Integer = fields.Field()
description: base.String = fields.Field()
input_message_content: InputMessageContent = fields.Field(base=InputMessageContent)
def __init__(self, *,
id: base.String,
video_url: base.String,
mime_type: base.String,
thumb_url: base.String,
title: base.String,
caption: typing.Optional[base.String] = None,
parse_mode: typing.Optional[base.String] = None,
video_width: typing.Optional[base.Integer] = None,
video_height: typing.Optional[base.Integer] = None,
video_duration: typing.Optional[base.Integer] = None,
description: typing.Optional[base.String] = None,
reply_markup: typing.Optional[InlineKeyboardMarkup] = None,
input_message_content: typing.Optional[InputMessageContent] = None):
super(InlineQueryResultVideo, self).__init__(id=id, video_url=video_url, mime_type=mime_type,
thumb_url=thumb_url, title=title, caption=caption,
video_width=video_width, video_height=video_height,
video_duration=video_duration, description=description,
parse_mode=parse_mode, reply_markup=reply_markup,
input_message_content=input_message_content)
class InlineQueryResultAudio(InlineQueryResult):
"""
Represents a link to an mp3 audio file. By default, this audio file will be sent by the user.
Alternatively, you can use input_message_content to send a message with the specified content
instead of the audio.
Note: This will only work in Telegram versions released after 9 April, 2016.
Older clients will ignore them.
https://core.telegram.org/bots/api#inlinequeryresultaudio
"""
type: base.String = fields.Field(alias='type', default='audio')
audio_url: base.String = fields.Field()
title: base.String = fields.Field()
caption: base.String = fields.Field()
performer: base.String = fields.Field()
audio_duration: base.Integer = fields.Field()
input_message_content: InputMessageContent = fields.Field(base=InputMessageContent)
def __init__(self, *,
id: base.String,
audio_url: base.String,
title: base.String,
caption: typing.Optional[base.String] = None,
parse_mode: typing.Optional[base.String] = None,
performer: typing.Optional[base.String] = None,
audio_duration: typing.Optional[base.Integer] = None,
reply_markup: typing.Optional[InlineKeyboardMarkup] = None,
input_message_content: typing.Optional[InputMessageContent] = None):
super(InlineQueryResultAudio, self).__init__(id=id, audio_url=audio_url, title=title,
caption=caption, parse_mode=parse_mode,
performer=performer, audio_duration=audio_duration,
reply_markup=reply_markup,
input_message_content=input_message_content)
class InlineQueryResultVoice(InlineQueryResult):
"""
Represents a link to a voice recording in an .ogg container encoded with OPUS.
By default, this voice recording will be sent by the user.
Alternatively, you can use input_message_content to send a message with the specified content
instead of the the voice message.
Note: This will only work in Telegram versions released after 9 April, 2016.
Older clients will ignore them.
https://core.telegram.org/bots/api#inlinequeryresultvoice
"""
type: base.String = fields.Field(alias='type', default='voice')
voice_url: base.String = fields.Field()
title: base.String = fields.Field()
caption: base.String = fields.Field()
voice_duration: base.Integer = fields.Field()
input_message_content: InputMessageContent = fields.Field(base=InputMessageContent)
def __init__(self, *,
id: base.String,
voice_url: base.String,
title: base.String,
caption: typing.Optional[base.String] = None,
parse_mode: typing.Optional[base.String] = None,
voice_duration: typing.Optional[base.Integer] = None,
reply_markup: typing.Optional[InlineKeyboardMarkup] = None,
input_message_content: typing.Optional[InputMessageContent] = None):
super(InlineQueryResultVoice, self).__init__(id=id, voice_url=voice_url, title=title,
caption=caption, voice_duration=voice_duration,
parse_mode=parse_mode, reply_markup=reply_markup,
input_message_content=input_message_content)
class InlineQueryResultDocument(InlineQueryResult):
"""
Represents a link to a file.
By default, this file will be sent by the user with an optional caption.
Alternatively, you can use input_message_content to send a message with the specified content
instead of the file. Currently, only .PDF and .ZIP files can be sent using this method.
Note: This will only work in Telegram versions released after 9 April, 2016. Older clients will ignore them.
https://core.telegram.org/bots/api#inlinequeryresultdocument
"""
type: base.String = fields.Field(alias='type', default='document')
title: base.String = fields.Field()
caption: base.String = fields.Field()
document_url: base.String = fields.Field()
mime_type: base.String = fields.Field()
description: base.String = fields.Field()
input_message_content: InputMessageContent = fields.Field(base=InputMessageContent)
thumb_url: base.String = fields.Field()
thumb_width: base.Integer = fields.Field()
thumb_height: base.Integer = fields.Field()
def __init__(self, *,
id: base.String,
title: base.String,
caption: typing.Optional[base.String] = None,
parse_mode: typing.Optional[base.String] = None,
document_url: typing.Optional[base.String] = None,
mime_type: typing.Optional[base.String] = None,
description: typing.Optional[base.String] = None,
reply_markup: typing.Optional[InlineKeyboardMarkup] = None,
input_message_content: typing.Optional[InputMessageContent] = None,
thumb_url: typing.Optional[base.String] = None,
thumb_width: typing.Optional[base.Integer] = None,
thumb_height: typing.Optional[base.Integer] = None):
super(InlineQueryResultDocument, self).__init__(id=id, title=title, caption=caption,
document_url=document_url, mime_type=mime_type,
description=description, reply_markup=reply_markup,
input_message_content=input_message_content,
thumb_url=thumb_url, thumb_width=thumb_width,
thumb_height=thumb_height, parse_mode=parse_mode)
class InlineQueryResultLocation(InlineQueryResult):
"""
Represents a location on a map.
By default, the location will be sent by the user.
Alternatively, you can use input_message_content to send a message with the specified content
instead of the location.
Note: This will only work in Telegram versions released after 9 April, 2016.
Older clients will ignore them.
https://core.telegram.org/bots/api#inlinequeryresultlocation
"""
type: base.String = fields.Field(alias='type', default='location')
latitude: base.Float = fields.Field()
longitude: base.Float = fields.Field()
title: base.String = fields.Field()
live_period: base.Integer = fields.Field()
input_message_content: InputMessageContent = fields.Field(base=InputMessageContent)
thumb_url: base.String = fields.Field()
thumb_width: base.Integer = fields.Field()
thumb_height: base.Integer = fields.Field()
def __init__(self, *,
id: base.String,
latitude: base.Float,
longitude: base.Float,
title: base.String,
live_period: typing.Optional[base.Integer] = None,
reply_markup: typing.Optional[InlineKeyboardMarkup] = None,
input_message_content: typing.Optional[InputMessageContent] = None,
thumb_url: typing.Optional[base.String] = None,
thumb_width: typing.Optional[base.Integer] = None,
thumb_height: typing.Optional[base.Integer] = None):
super(InlineQueryResultLocation, self).__init__(id=id, latitude=latitude, longitude=longitude,
title=title, live_period=live_period,
reply_markup=reply_markup,
input_message_content=input_message_content,
thumb_url=thumb_url, thumb_width=thumb_width,
thumb_height=thumb_height)
class InlineQueryResultVenue(InlineQueryResult):
"""
Represents a venue. By default, the venue will be sent by the user.
Alternatively, you can use input_message_content to send a message with the specified content
instead of the venue.
Note: This will only work in Telegram versions released after 9 April, 2016.
Older clients will ignore them.
https://core.telegram.org/bots/api#inlinequeryresultvenue
"""
type: base.String = fields.Field(alias='type', default='venue')
latitude: base.Float = fields.Field()
longitude: base.Float = fields.Field()
title: base.String = fields.Field()
address: base.String = fields.Field()
foursquare_id: base.String = fields.Field()
input_message_content: InputMessageContent = fields.Field(base=InputMessageContent)
thumb_url: base.String = fields.Field()
thumb_width: base.Integer = fields.Field()
thumb_height: base.Integer = fields.Field()
foursquare_type: base.String = fields.Field()
def __init__(self, *,
id: base.String,
latitude: base.Float,
longitude: base.Float,
title: base.String,
address: base.String,
foursquare_id: typing.Optional[base.String] = None,
reply_markup: typing.Optional[InlineKeyboardMarkup] = None,
input_message_content: typing.Optional[InputMessageContent] = None,
thumb_url: typing.Optional[base.String] = None,
thumb_width: typing.Optional[base.Integer] = None,
thumb_height: typing.Optional[base.Integer] = None,
foursquare_type: typing.Optional[base.String] = None):
super(InlineQueryResultVenue, self).__init__(id=id, latitude=latitude, longitude=longitude,
title=title, address=address, foursquare_id=foursquare_id,
reply_markup=reply_markup,
input_message_content=input_message_content, thumb_url=thumb_url,
thumb_width=thumb_width, thumb_height=thumb_height,
foursquare_type=foursquare_type)
class InlineQueryResultContact(InlineQueryResult):
"""
Represents a contact with a phone number.
By default, this contact will be sent by the user.
Alternatively, you can use input_message_content to send a message with the specified content
instead of the contact.
Note: This will only work in Telegram versions released after 9 April, 2016. Older clients will ignore them.
https://core.telegram.org/bots/api#inlinequeryresultcontact
"""
type: base.String = fields.Field(alias='type', default='contact')
phone_number: base.String = fields.Field()
first_name: base.String = fields.Field()
last_name: base.String = fields.Field()
vcard: base.String = fields.Field()
input_message_content: InputMessageContent = fields.Field(base=InputMessageContent)
thumb_url: base.String = fields.Field()
thumb_width: base.Integer = fields.Field()
thumb_height: base.Integer = fields.Field()
foursquare_type: base.String = fields.Field()
def __init__(self, *,
id: base.String,
phone_number: base.String,
first_name: base.String,
last_name: typing.Optional[base.String] = None,
reply_markup: typing.Optional[InlineKeyboardMarkup] = None,
input_message_content: typing.Optional[InputMessageContent] = None,
thumb_url: typing.Optional[base.String] = None,
thumb_width: typing.Optional[base.Integer] = None,
thumb_height: typing.Optional[base.Integer] = None,
foursquare_type: typing.Optional[base.String] = None):
super(InlineQueryResultContact, self).__init__(id=id, phone_number=phone_number,
first_name=first_name, last_name=last_name,
reply_markup=reply_markup,
input_message_content=input_message_content, thumb_url=thumb_url,
thumb_width=thumb_width, thumb_height=thumb_height,
foursquare_type=foursquare_type)
class InlineQueryResultGame(InlineQueryResult):
"""
Represents a Game.
Note: This will only work in Telegram versions released after October 1, 2016.
Older clients will not display any inline results if a game result is among them.
https://core.telegram.org/bots/api#inlinequeryresultgame
"""
type: base.String = fields.Field(alias='type', default='game')
game_short_name: base.String = fields.Field()
def __init__(self, *,
id: base.String,
game_short_name: base.String,
reply_markup: typing.Optional[InlineKeyboardMarkup] = None):
super(InlineQueryResultGame, self).__init__(id=id, game_short_name=game_short_name,
reply_markup=reply_markup)
class InlineQueryResultCachedPhoto(InlineQueryResult):
"""
Represents a link to a photo stored on the Telegram servers.
By default, this photo will be sent by the user with an optional caption.
Alternatively, you can use input_message_content to send a message with the specified content
instead of the photo.
https://core.telegram.org/bots/api#inlinequeryresultcachedphoto
"""
type: base.String = fields.Field(alias='type', default='photo')
photo_file_id: base.String = fields.Field()
title: base.String = fields.Field()
description: base.String = fields.Field()
caption: base.String = fields.Field()
input_message_content: InputMessageContent = fields.Field(base=InputMessageContent)
def __init__(self, *,
id: base.String,
photo_file_id: base.String,
title: typing.Optional[base.String] = None,
description: typing.Optional[base.String] = None,
caption: typing.Optional[base.String] = None,
parse_mode: typing.Optional[base.String] = None,
reply_markup: typing.Optional[InlineKeyboardMarkup] = None,
input_message_content: typing.Optional[InputMessageContent] = None):
super(InlineQueryResultCachedPhoto, self).__init__(id=id, photo_file_id=photo_file_id, title=title,
description=description, caption=caption,
parse_mode=parse_mode, reply_markup=reply_markup,
input_message_content=input_message_content)
class InlineQueryResultCachedGif(InlineQueryResult):
"""
Represents a link to an animated GIF file stored on the Telegram servers.
By default, this animated GIF file will be sent by the user with an optional caption.
Alternatively, you can use input_message_content to send a message with specified content
instead of the animation.
https://core.telegram.org/bots/api#inlinequeryresultcachedgif
"""
type: base.String = fields.Field(alias='type', default='gif')
gif_file_id: base.String = fields.Field()
title: base.String = fields.Field()
caption: base.String = fields.Field()
input_message_content: InputMessageContent = fields.Field(base=InputMessageContent)
def __init__(self, *,
id: base.String,
gif_file_id: base.String,
title: typing.Optional[base.String] = None,
caption: typing.Optional[base.String] = None,
parse_mode: typing.Optional[base.String] = None,
reply_markup: typing.Optional[InlineKeyboardMarkup] = None,
input_message_content: typing.Optional[InputMessageContent] = None):
super(InlineQueryResultCachedGif, self).__init__(id=id, gif_file_id=gif_file_id,
title=title, caption=caption,
parse_mode=parse_mode, reply_markup=reply_markup,
input_message_content=input_message_content)
class InlineQueryResultCachedMpeg4Gif(InlineQueryResult):
"""
Represents a link to a video animation (H.264/MPEG-4 AVC video without sound) stored on the Telegram servers.
By default, this animated MPEG-4 file will be sent by the user with an optional caption.
Alternatively, you can use input_message_content to send a message with the specified content
instead of the animation.
https://core.telegram.org/bots/api#inlinequeryresultcachedmpeg4gif
"""
type: base.String = fields.Field(alias='type', default='mpeg4_gif')
mpeg4_file_id: base.String = fields.Field()
title: base.String = fields.Field()
caption: base.String = fields.Field()
input_message_content: InputMessageContent = fields.Field(base=InputMessageContent)
def __init__(self, *,
id: base.String,
mpeg4_file_id: base.String,
title: typing.Optional[base.String] = None,
caption: typing.Optional[base.String] = None,
parse_mode: typing.Optional[base.String] = None,
reply_markup: typing.Optional[InlineKeyboardMarkup] = None,
input_message_content: typing.Optional[InputMessageContent] = None):
super(InlineQueryResultCachedMpeg4Gif, self).__init__(id=id, mpeg4_file_id=mpeg4_file_id,
title=title, caption=caption,
parse_mode=parse_mode, reply_markup=reply_markup,
input_message_content=input_message_content)
class InlineQueryResultCachedSticker(InlineQueryResult):
"""
Represents a link to a sticker stored on the Telegram servers.
By default, this sticker will be sent by the user.
Alternatively, you can use input_message_content to send a message with the specified content
instead of the sticker.
Note: This will only work in Telegram versions released after 9 April, 2016.
Older clients will ignore them.
https://core.telegram.org/bots/api#inlinequeryresultcachedsticker
"""
type: base.String = fields.Field(alias='type', default='sticker')
sticker_file_id: base.String = fields.Field()
input_message_content: InputMessageContent = fields.Field(base=InputMessageContent)
def __init__(self, *,
id: base.String,
sticker_file_id: base.String,
reply_markup: typing.Optional[InlineKeyboardMarkup] = None,
input_message_content: typing.Optional[InputMessageContent] = None):
super(InlineQueryResultCachedSticker, self).__init__(id=id, sticker_file_id=sticker_file_id,
reply_markup=reply_markup,
input_message_content=input_message_content)
class InlineQueryResultCachedDocument(InlineQueryResult):
"""
Represents a link to a file stored on the Telegram servers.
By default, this file will be sent by the user with an optional caption.
Alternatively, you can use input_message_content to send a message with the specified content
instead of the file.
Note: This will only work in Telegram versions released after 9 April, 2016.
Older clients will ignore them.
https://core.telegram.org/bots/api#inlinequeryresultcacheddocument
"""
type: base.String = fields.Field(alias='type', default='document')
title: base.String = fields.Field()
document_file_id: base.String = fields.Field()
description: base.String = fields.Field()
caption: base.String = fields.Field()
input_message_content: InputMessageContent = fields.Field(base=InputMessageContent)
def __init__(self, *,
id: base.String,
title: base.String,
document_file_id: base.String,
description: typing.Optional[base.String] = None,
caption: typing.Optional[base.String] = None,
parse_mode: typing.Optional[base.String] = None,
reply_markup: typing.Optional[InlineKeyboardMarkup] = None,
input_message_content: typing.Optional[InputMessageContent] = None):
super(InlineQueryResultCachedDocument, self).__init__(id=id, title=title,
document_file_id=document_file_id,
description=description, caption=caption,
parse_mode=parse_mode, reply_markup=reply_markup,
input_message_content=input_message_content)
class InlineQueryResultCachedVideo(InlineQueryResult):
"""
Represents a link to a video file stored on the Telegram servers.
By default, this video file will be sent by the user with an optional caption.
Alternatively, you can use input_message_content to send a message with the specified content
instead of the video.
https://core.telegram.org/bots/api#inlinequeryresultcachedvideo
"""
type: base.String = fields.Field(alias='type', default='video')
video_file_id: base.String = fields.Field()
title: base.String = fields.Field()
description: base.String = fields.Field()
caption: base.String = fields.Field()
input_message_content: InputMessageContent = fields.Field(base=InputMessageContent)
def __init__(self, *,
id: base.String,
video_file_id: base.String,
title: base.String,
description: typing.Optional[base.String] = None,
caption: typing.Optional[base.String] = None,
parse_mode: typing.Optional[base.String] = None,
reply_markup: typing.Optional[InlineKeyboardMarkup] = None,
input_message_content: typing.Optional[InputMessageContent] = None):
super(InlineQueryResultCachedVideo, self).__init__(id=id, video_file_id=video_file_id, title=title,
description=description, caption=caption,
parse_mode=parse_mode, reply_markup=reply_markup,
input_message_content=input_message_content)
class InlineQueryResultCachedVoice(InlineQueryResult):
"""
Represents a link to a voice message stored on the Telegram servers.
By default, this voice message will be sent by the user.
Alternatively, you can use input_message_content to send a message with the specified content
instead of the voice message.
Note: This will only work in Telegram versions released after 9 April, 2016. Older clients will ignore them.
https://core.telegram.org/bots/api#inlinequeryresultcachedvoice
"""
type: base.String = fields.Field(alias='type', default='voice')
voice_file_id: base.String = fields.Field()
title: base.String = fields.Field()
caption: base.String = fields.Field()
input_message_content: InputMessageContent = fields.Field(base=InputMessageContent)
def __init__(self, *,
id: base.String,
voice_file_id: base.String,
title: base.String,
caption: typing.Optional[base.String] = None,
parse_mode: typing.Optional[base.String] = None,
reply_markup: typing.Optional[InlineKeyboardMarkup] = None,
input_message_content: typing.Optional[InputMessageContent] = None):
super(InlineQueryResultCachedVoice, self).__init__(id=id, voice_file_id=voice_file_id,
title=title, caption=caption,
parse_mode=parse_mode, reply_markup=reply_markup,
input_message_content=input_message_content)
class InlineQueryResultCachedAudio(InlineQueryResult):
"""
Represents a link to an mp3 audio file stored on the Telegram servers.
By default, this audio file will be sent by the user.
Alternatively, you can use input_message_content to send a message with
the specified content instead of the audio.
Note: This will only work in Telegram versions released after 9 April, 2016.
Older clients will ignore them.
https://core.telegram.org/bots/api#inlinequeryresultcachedaudio
"""
type: base.String = fields.Field(alias='type', default='audio')
audio_file_id: base.String = fields.Field()
caption: base.String = fields.Field()
input_message_content: InputMessageContent = fields.Field(base=InputMessageContent)
def __init__(self, *,
id: base.String,
audio_file_id: base.String,
caption: typing.Optional[base.String] = None,
parse_mode: typing.Optional[base.String] = None,
reply_markup: typing.Optional[InlineKeyboardMarkup] = None,
input_message_content: typing.Optional[InputMessageContent] = None):
super(InlineQueryResultCachedAudio, self).__init__(id=id, audio_file_id=audio_file_id,
caption=caption, parse_mode=parse_mode,
reply_markup=reply_markup,
input_message_content=input_message_content)
| 51.194595
| 120
| 0.622189
|
82599c4d39d4d6a4e020b37707b946c6e6c0afab
| 101
|
py
|
Python
|
Backend/DungeonDirector/Direction.py
|
LukasKlein00/SWEProjekt2021
|
70144e1436a86c476302754c0233a4e4c8180457
|
[
"MIT"
] | 6
|
2021-03-13T09:07:16.000Z
|
2021-04-24T11:59:39.000Z
|
Backend/DungeonDirector/Direction.py
|
LukasKlein00/SWEProjekt2021
|
70144e1436a86c476302754c0233a4e4c8180457
|
[
"MIT"
] | 50
|
2021-05-01T15:59:02.000Z
|
2021-05-20T18:57:28.000Z
|
Backend/DungeonDirector/Direction.py
|
LukasKlein00/SWEProjekt2021
|
70144e1436a86c476302754c0233a4e4c8180457
|
[
"MIT"
] | 2
|
2021-04-21T23:04:07.000Z
|
2021-05-01T09:56:12.000Z
|
from enum import Enum
class Direction(Enum):
north = 0
south = 1
west = 2
east = 3
| 11.222222
| 22
| 0.574257
|
1c16feceb4475df0acfaa5e232507e59611abe2d
| 2,046
|
py
|
Python
|
evogtk/gui/accessclasslib/checkbutton.py
|
R3v1L/evogtk
|
9f951a08770e99ffd701a1994ba948aa8014f2af
|
[
"MIT"
] | null | null | null |
evogtk/gui/accessclasslib/checkbutton.py
|
R3v1L/evogtk
|
9f951a08770e99ffd701a1994ba948aa8014f2af
|
[
"MIT"
] | null | null | null |
evogtk/gui/accessclasslib/checkbutton.py
|
R3v1L/evogtk
|
9f951a08770e99ffd701a1994ba948aa8014f2af
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (C) 2008 EVO Sistemas Libres <central@evosistemas.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
###############################################################################
# checkbutton
# EVOGTK Access class for gtk.CheckButton like widgets
###############################################################################
# GTK Imports
import gtk
# DBWidgets imports
from evogtk.widgets import DBCheckButton
class AccessClass:
"""
Class for gtk.CheckButton like widgets
"""
def supported_widgets(self):
"""
Supported widgets for this access class
"""
return [gtk.CheckButton,gtk.ToggleButton,gtk.ToggleAction,gtk.ToggleToolButton,gtk.CheckMenuItem,gtk.ToggleAction,
gtk.RadioButton,gtk.RadioAction,gtk.RadioMenuItem,gtk.RadioToolButton,
DBCheckButton]
def supported_types(self):
"""
Supported types for this access class
"""
return [bool]
def set_content(self,widget,content):
"""
Method for setting the widget content
"""
widget.set_active(content)
def get_content(self,widget):
"""
Method for setting the widget content
"""
return widget.get_active()
| 35.275862
| 122
| 0.607038
|
d7874b074f4cbadc856e44338e9aa029ce17dfc2
| 15,864
|
py
|
Python
|
pysal/explore/pointpats/process.py
|
ocefpaf/pysal
|
7e397bdb4c22d4e2442b4ee88bcd691d2421651d
|
[
"BSD-3-Clause"
] | 1
|
2021-08-16T02:47:35.000Z
|
2021-08-16T02:47:35.000Z
|
pysal/explore/pointpats/process.py
|
ocefpaf/pysal
|
7e397bdb4c22d4e2442b4ee88bcd691d2421651d
|
[
"BSD-3-Clause"
] | null | null | null |
pysal/explore/pointpats/process.py
|
ocefpaf/pysal
|
7e397bdb4c22d4e2442b4ee88bcd691d2421651d
|
[
"BSD-3-Clause"
] | 1
|
2016-11-11T19:20:51.000Z
|
2016-11-11T19:20:51.000Z
|
"""
Simulation of planar point processes
TODO
- inhibition process(es)
- optimize draws for complex windows
- documentation
"""
__author__ = "Serge Rey sjsrey@gmail.com"
__all__ = ['PointProcess', 'PoissonPointProcess', 'PoissonClusterPointProcess']
import numpy as np
import pysal.lib as ps
from numpy.random import poisson
from .pointpattern import PointPattern as PP
def runif_in_circle(n, radius=1.0, center=(0., 0.), burn=2, verbose=False):
"""
Generate n points within a circle of given radius.
Parameters
----------
n : int
Number of points.
radius : float
Radius of the circle.
center : tuple
Coordinates of the center.
Returns
-------
: array
(n+1, 2), coordinates of generated points as well as
the center.
"""
good = np.zeros((n, 2), float)
c = 0
r = radius
r2 = r * r
it = 0
while c < n:
x = np.random.uniform(-r, r, (burn*n, 1))
y = np.random.uniform(-r, r, (burn*n, 1))
ids = np.where(x*x + y*y <= r2)
candidates = np.hstack((x, y))[ids[0]]
nc = candidates.shape[0]
need = n - c
if nc > need: # more than we need
good[c:] = candidates[:need]
else: # use them all and keep going
good[c:c+nc] = candidates
c += nc
it += 1
if verbose:
print('Iterations: {}'.format(it))
return good + np.asarray(center)
class PointProcess(object):
"""
Point Process base class.
Parameters
----------
window : :py:class:`~.window.Window`
Bounding geometric object to contain point process
realizations.
n : int
Size of each realization.
samples : list
Number of realizations.
asPP : bool
Control the data type of value in the "realizations"
dictionary. If True, the data type is point
pattern as defined in pointpattern.py; if False,
the data type is an two-dimensional array.
Attributes
----------
realizations : dictionary
The key is the index of each realization, and the
value is simulated event points for each
realization. The data type of the value is
controlled by the parameter "asPP".
parameters : dictionary
Dictionary of a dictionary.
The key is the index of each realization, and the
value is a dictionary with the key 'n' and the
value size of each realization.
"""
def __init__(self, window, n, samples, asPP=False, **args):
super(PointProcess, self).__init__()
self.window = window
self.n = n
self.samples = samples
self.args = args
self.realizations = {}
self.setup()
for sample in range(samples):
self.realizations[sample] = self.draw(self.parameters[sample])
if asPP:
for sample in self.realizations:
points = self.realizations[sample]
self.realizations[sample] = PP(points, window=self.window)
def draw(self, parameter):
"""
Generate a series of point coordinates within the given window.
Parameters
----------
parameter : dictionary
Key: 'n'.
Value: size of the realization.
Returns
-------
: array
A series of point coordinates.
"""
c = 0
sample = []
n = parameter['n']
while c < n:
pnts = self.realize(n)
pnts = [ps.cg.shapes.Point((x, y)) for x, y in pnts]
pins = self.window.filter_contained(pnts)
sample.extend(pins)
c = len(sample)
return np.array([np.asarray(p) for p in sample[:n]])
def realize(self):
pass
def setup(self):
pass
class PoissonPointProcess(PointProcess):
"""
Poisson point process including :math:`N`-conditioned CSR process and
:math:`\lambda`-conditioned CSR process.
Parameters
----------
window : :py:class:`~.window.Window`
Bounding geometric object to contain point process
realizations.
n : int
Size of each realization.
samples : list
Number of realizations.
conditioning : bool
If True, use the :math:`\lambda`-conditioned CSR process,
number of events would vary across realizations;
if False, use the :math:`N`-conditioned CSR process.
asPP : bool
Control the data type of value in the "realizations"
dictionary. If True, the data type is point
pattern as defined in pointpattern.py; if False,
the data type is an two-dimensional array.
Attributes
----------
realizations : dictionary
The key is the index of each realization, and the
value is simulated event points for each
realization. The data type of the value is
controlled by the parameter "asPP".
parameters : dictionary
Dictionary of a dictionary.
The key is the index of each realization, and the
value is a dictionary with the key 'n' and the
value:
1. always equal to the parameter n in the case of
N-conditioned process.
For example, {0:{'n':100},1:{'n':100},2:{'n':100}}
2. randomly generated from a Possion process in
the case of lambda-conditioned process.
For example, {0:{'n':97},1:{'n':100},2:{'n':98}}
Examples
--------
>>> import pysal.lib as ps
>>> import numpy as np
>>> from pointpats import Window
>>> from pysal.lib.cg import shapely_ext
Open the virginia polygon shapefile
>>> va = ps.io.open(ps.examples.get_path("virginia.shp"))
Create the exterior polygons for VA from the union of the county shapes
>>> polys = [shp for shp in va]
>>> state = shapely_ext.cascaded_union(polys)
Create window from virginia state boundary
>>> window = Window(state.parts)
1. Simulate a :math:`N`-conditioned csr process in the same window (10
points, 2 realizations)
>>> np.random.seed(5)
>>> samples1 = PoissonPointProcess(window, 10, 2, conditioning=False, asPP=False)
>>> samples1.realizations[0] # the first realized event points
array([[-81.80326547, 36.77687577],
[-78.5166233 , 37.34055832],
[-77.21660795, 37.7491503 ],
[-79.30361037, 37.40467853],
[-78.61625258, 36.61234487],
[-81.43369537, 37.13784646],
[-80.91302108, 36.60834063],
[-76.90806444, 37.95525903],
[-76.33475868, 36.62635347],
[-79.71621808, 37.27396618]])
2. Simulate a :math:`\lambda`-conditioned csr process in the same window (10
points, 2 realizations)
>>> np.random.seed(5)
>>> samples2 = PoissonPointProcess(window, 10, 2, conditioning=True, asPP=True)
>>> samples2.realizations[0].n # the size of first realized point pattern
10
>>> samples2.realizations[1].n # the size of second realized point pattern
13
"""
def __init__(self, window, n, samples, conditioning=False, asPP=False):
self.conditioning = conditioning
super(PoissonPointProcess, self).__init__(window, n, samples, asPP)
def setup(self):
"""
Generate the number of events for each realization. If
"conditioning" is False, all the event numbers are the same;
if it is True, the event number is a random variable
following a Poisson distribution.
"""
self.parameters = {}
if self.conditioning:
lambdas = poisson(self.n, self.samples)
for i, l in enumerate(lambdas):
self.parameters[i] = {'n': l}
else:
for i in range(self.samples):
self.parameters[i] = {'n': self.n}
def realize(self, n):
"""
Generate n points which are randomly and independently
distributed in the minimum bounding box of "window".
Parameters
----------
n : int
Number of point events.
Returns
-------
: array
(n,2), n point coordinates.
"""
l, b, r, t = self.window.bbox
xs = np.random.uniform(l, r, (n, 1))
ys = np.random.uniform(b, t, (n, 1))
return zip(xs, ys)
class PoissonClusterPointProcess(PointProcess):
"""
Poisson cluster point process (Neyman Scott).
Two stages:
1. parent CSR process: :math:`N`-conditioned or
:math:`\lambda`-conditioned. If parent events follow a
:math:`\lambda`-conditioned CSR process,
the number of parent events varies across realizations.
2. child process: fixed number of points in circle centered
on each parent.
Parameters
----------
window : :py:class:`~.window.Window`
Bounding geometric object to contain point process
realizations.
n : int
Size of each realization.
parents : int
Number of parents.
radius : float
Radius of the circle centered on each parent.
samples : list
Number of realizations.
asPP : bool
Control the data type of value in the "realizations"
dictionary. If True, the data type is point
pattern as defined in pointpattern.py; if False,
the data type is an two-dimensional array.
conditioning : bool
If True, use the :math:`lambda`-conditioned CSR process
for parent events, leading to varied number of
parent events across realizations;
if False, use the :math:`N`-conditioned CSR process.
Attributes
----------
children : int
Number of childrens centered on each parent. Can
be considered as local intensity.
num_parents : dictionary
The key is the index of each realization. The
value is the number of parent events for each
realization.
realizations : dictionary
The key is the index of each realization, and the
value is simulated event points for each
realization. The data type of the value is
controlled by the parameter "asPP".
parameters : dictionary
Dictionary of a dictionary.
The key is the index of each realization, and the
value is a dictionary with the key 'n' and the
value always equal to the parameter n in the
case of
N-conditioned process.
For example, {0:{'n':100},1:{'n':100},2:{'n':100}}
2. randomly generated from a Possion process in
the case of lambda-conditioned process.
For example, {0:{'n':97},1:{'n':100},2:{'n':98}}
Examples
--------
>>> import pysal.lib as ps
>>> import numpy as np
>>> from pointpats import Window
>>> from pysal.lib.cg import shapely_ext
Open the virginia polygon shapefile
>>> va = ps.io.open(ps.examples.get_path("virginia.shp"))
Create the exterior polygons for VA from the union of the county shapes
>>> polys = [shp for shp in va]
>>> state = shapely_ext.cascaded_union(polys)
Create window from virginia state boundary
>>> window = Window(state.parts)
1. Simulate a Poisson cluster process of size 200 with 10 parents
and 20 children within 0.5 units of each parent
(parent events: :math:`N`-conditioned CSR)
>>> np.random.seed(10)
>>> samples1 = PoissonClusterPointProcess(window, 200, 10, 0.5, 1, asPP=True, conditioning=False)
>>> samples1.parameters # number of events for the realization
{0: {'n': 200}}
>>> samples1.num_parents #number of parent events for each realization
{0: 10}
>>> samples1.children # number of children events centered on each parent event
20
2. Simulate a Poisson cluster process of size 200 with 10 parents
and 20 children within 0.5 units of each parent
(parent events: :math:`\lambda`-conditioned CSR)
>>> np.random.seed(10)
>>> samples2 = PoissonClusterPointProcess(window, 200, 10, 0.5, 1, asPP=True, conditioning=True)
>>> samples2.parameters # number of events for the realization might not be equal to 200
{0: {'n': 260}}
>>> samples2.num_parents #number of parent events for each realization
{0: 13}
>>> samples2.children # number of children events centered on each parent event
20
"""
def __init__(self, window, n, parents, radius, samples, keep=False,
asPP=False, conditioning=False):
self.conditioning = conditioning
self.parents = parents
self.children = int(np.ceil(n * 1. / parents))
self.radius = radius
self.keep = keep
super(PoissonClusterPointProcess, self).__init__(window, n, samples,
asPP)
def setup(self):
"""
Generate the number of events for each realization. If
"conditioning" is False, all the event numbers are the same;
if it is True, the number of parents is a random variable
following a Poisson distribution, resulting in varied number
of events.
"""
self.parameters = {}
self.num_parents = {}
if self.conditioning:
lambdas = poisson(self.parents, self.samples)
for i, l in enumerate(lambdas):
num = l * self.children
self.parameters[i] = {'n': num}
self.num_parents[i] = l
else:
for i in range(self.samples):
self.parameters[i] = {'n': self.n}
self.num_parents[i] = self.parents
def realize(self, n):
"""
Generate n points which are distributed in a clustered
fashion in the minimum bounding box of "window".
Parameters
----------
n : int
Number of point events.
Returns
-------
res : array
(n,2), n point coordinates.
"""
l, b, r, t = self.window.bbox
d = self.radius
# get parent points
pxs = np.random.uniform(l, r, (int(n/self.children), 1))
pys = np.random.uniform(b, t, (int(n/self.children), 1))
cents = np.hstack((pxs, pys))
# generate children points
pnts = [runif_in_circle(self.children, d, center) for center in cents]
res = np.vstack(np.asarray(pnts))
if self.keep:
res = np.vstack((np.asarray(cents), res))
np.random.shuffle(res) # so we don't truncate in a biased fashion
return res
| 34.713348
| 101
| 0.551437
|
c973eb37b57816bd162f4c4018e34745e78863d6
| 4,305
|
py
|
Python
|
userbot/plugins/markdown_IQ.py
|
TeleOniOn/TeleOniOn
|
9d6c676267e3dd991952e2d7166fac646fe7f2fc
|
[
"Apache-2.0"
] | null | null | null |
userbot/plugins/markdown_IQ.py
|
TeleOniOn/TeleOniOn
|
9d6c676267e3dd991952e2d7166fac646fe7f2fc
|
[
"Apache-2.0"
] | null | null | null |
userbot/plugins/markdown_IQ.py
|
TeleOniOn/TeleOniOn
|
9d6c676267e3dd991952e2d7166fac646fe7f2fc
|
[
"Apache-2.0"
] | null | null | null |
#@TeleOniOn
import re
from functools import partial
from telethon import events
from telethon.tl.functions.messages import EditMessageRequest
from telethon.extensions.markdown import DEFAULT_URL_RE
from telethon.utils import add_surrogate, del_surrogate
from telethon.tl.types import (
MessageEntityBold, MessageEntityItalic, MessageEntityCode,
MessageEntityPre, MessageEntityTextUrl
)
def parse_url_match(m):
entity = MessageEntityTextUrl(
offset=m.start(),
length=len(m.group(1)),
url=del_surrogate(m.group(2))
)
return m.group(1), entity
def get_tag_parser(tag, entity):
# TODO unescape escaped tags?
def tag_parser(m):
return m.group(1), entity(offset=m.start(), length=len(m.group(1)))
tag = re.escape(tag)
return re.compile(tag + r'(.+?)' + tag, re.DOTALL), tag_parser
PRINTABLE_ASCII = range(0x21, 0x7f)
def parse_aesthetics(m):
def aesthetify(string):
for c in string:
c = ord(c)
if c in PRINTABLE_ASCII:
c += 0xFF00 - 0x20
elif c == ord(" "):
c = 0x3000
yield chr(c)
return "".join(aesthetify(m[1])), None
def parse_subreddit(m):
text = '/' + m.group(3)
entity = MessageEntityTextUrl(
offset=m.start(2),
length=len(text),
url=f'reddit.com{text}'
)
return m.group(1) + text, entity
def parse_strikethrough(m):
text = m.group(2)
text = "\u0336".join(text) + "\u0336 "
return text, None
PARSED_ENTITIES = (
MessageEntityBold, MessageEntityItalic, MessageEntityCode,
MessageEntityPre, MessageEntityTextUrl
)
# A matcher is a tuple of (regex pattern, parse function)
# where the parse function takes the match and returns (text, entity)
MATCHERS = [
(DEFAULT_URL_RE, parse_url_match),
(get_tag_parser('**', MessageEntityBold)),
(get_tag_parser('__', MessageEntityItalic)),
(get_tag_parser('```', partial(MessageEntityPre, language=''))),
(get_tag_parser('`', MessageEntityCode)),
(re.compile(r'\+\+(.+?)\+\+'), parse_aesthetics),
(re.compile(r'([^/\w]|^)(/?(r/\w+))'), parse_subreddit),
(re.compile(r"(?<!\w)(~{2})(?!~~)(.+?)(?<!~)\1(?!\w)"), parse_strikethrough)
]
def parse(message, old_entities=None):
entities = []
old_entities = sorted(old_entities or [], key=lambda e: e.offset)
i = 0
after = 0
message = add_surrogate(message)
while i < len(message):
for after, e in enumerate(old_entities[after:], start=after):
# If the next entity is strictly to our right, we're done here
if i < e.offset:
break
# Skip already existing entities if we're at one
if i == e.offset:
i += e.length
# Find the first pattern that matches
for pattern, parser in MATCHERS:
match = pattern.match(message, pos=i)
if match:
break
else:
i += 1
continue
text, entity = parser(match)
# Shift old entities after our current position (so they stay in place)
shift = len(text) - len(match[0])
if shift:
for e in old_entities[after:]:
e.offset += shift
# Replace whole match with text from parser
message = ''.join((
message[:match.start()],
text,
message[match.end():]
))
# Append entity if we got one
if entity:
entities.append(entity)
# Skip past the match
i += len(text)
return del_surrogate(message), entities + old_entities
@borg.on(events.MessageEdited(outgoing=True))
@borg.on(events.NewMessage(outgoing=True))
async def reparse(event):
old_entities = event.message.entities or []
parser = partial(parse, old_entities=old_entities)
message, msg_entities = await borg._parse_message_text(event.raw_text, parser)
if len(old_entities) >= len(msg_entities) and event.raw_text == message:
return
await borg(EditMessageRequest(
peer=await event.get_input_chat(),
id=event.message.id,
message=message,
no_webpage=not bool(event.message.media),
entities=msg_entities
))
raise events.StopPropagation
| 29.486301
| 82
| 0.61626
|
3447f9e459485c55d24cca265df52ade0a2ea701
| 4,896
|
py
|
Python
|
demo/uncertainty_demo.py
|
bottydim/dginn
|
c6b638b2df1e1fe57a76961b18c68dceee55ac38
|
[
"BSD-3-Clause"
] | null | null | null |
demo/uncertainty_demo.py
|
bottydim/dginn
|
c6b638b2df1e1fe57a76961b18c68dceee55ac38
|
[
"BSD-3-Clause"
] | null | null | null |
demo/uncertainty_demo.py
|
bottydim/dginn
|
c6b638b2df1e1fe57a76961b18c68dceee55ac38
|
[
"BSD-3-Clause"
] | null | null | null |
if __name__ == '__main__':
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
import os
from collections import defaultdict
import dill as pickle
from aggregator_utils import compute_dg_per_datapoint
from aggregator_utils import get_count_aggregators, extract_dgs_by_ids
from core import *
from core import Activations_Computer
from data_visualizers import visualize_samples
from dataset_utils import filter_dataset
from mnist_loader import load_mnist, get_mnist_model
import matplotlib.pyplot as plt
def compare_points(x, aggregators):
raise NotImplementedError()
def sort_uncertain_points(query_x, model, aggregators, show=False):
'''
:param aggregators:
:param query_x: samples to compare
:param model:
:return:
'''
# Run samples through model to get predicted labels
predictions = np.argmax(model.predict(query_x), axis=1)
similarities = defaultdict(int)
dg_collection_query = compute_dg_per_datapoint(query_x, model, Activations_Computer)
for i, x_sample in enumerate(query_x):
# print("Iteration ", i)
# Compute dep. graph of new sample
x_sample = np.expand_dims(x_sample, axis=0)
indices = [i]
dg_query = extract_dgs_by_ids(dg_collection_query, indices)
# Obtain the sample predicted label
y_pred = predictions[i]
# Compute similarity of the test point to the sampled points
similarities[i] = aggregators[y_pred].similarity(dg_query)
# Sort points by their similarity
sorted_keys = sorted(similarities, key=similarities.get, reverse=True)
sorted_vals = [query_x[i] for i in sorted_keys]
similarity_list = [similarities.get(key) for key in sorted_keys]
# Visualise samples
# Extract least similar 40 points
fig_most = visualize_samples(sorted_vals[:40], similarity_list[:40], title="Most Similar to Original Class")
# Idea: samples with lower similarity will seem stranger
fig_least = visualize_samples(sorted_vals[::-1][:40], similarity_list[::-1][:40],
title="Least Similar to Original Class")
if show:
plt.show(block=False)
return fig_most, fig_least
def random_points():
"""
Script demonstrating uncertainty functionality offered by dep. graphs.
Sorts MNIST points by their uncertainty and prints them out.
Idea: high uncertainty points are "strange" and seem atypical, compared to training data
"""
# Load dataset
train_x, train_y, test_x, test_y = load_mnist()
# Filter out subset of classes
selected_classes = [0, 1, 2, 3]
train_x, train_y = filter_dataset((train_x, train_y), selected_classes)
test_x, test_y = filter_dataset((test_x, test_y), selected_classes)
# Create model
# Create model
model = get_mnist_model(train_x, train_y)
# Select points to inspect
n_samples = 100
selected_points = test_x[:n_samples]
# Create aggregators from the training samples
aggregators = get_count_aggregators(train_x, train_y, model, n_samples)
# Visualise points, sorted by their uncertainty
sort_uncertain_points(selected_points, model, aggregators)
def same_class_points(cls_list, n_samples=1000):
"""
Script demonstrating uncertainty functionality offered by dep. graphs.
Sorts MNIST points by their uncertainty and prints them out.
Idea: high uncertainty points are "strange" and seem atypical, compared to training data
"""
# Load dataset
train_x, train_y, test_x, test_y = load_mnist()
# Create model
model = get_mnist_model(train_x, train_y)
# Create aggregators from the training samples
aggregators = get_count_aggregators(train_x, train_y, model, n_samples)
for cls in cls_list:
# Filter out subset of classes
selected_classes = [cls]
# sub_train_x, train_y = filter_dataset((train_x, train_y), selected_classes)
sub_test_x, sub_test_y = filter_dataset((test_x, test_y), selected_classes)
# Select points to inspect
idx = np.where(sub_test_y == cls)
selected_points = sub_test_x[idx][:n_samples]
# Visualise points, sorted by their uncertainty
fig_most, fig_least = sort_uncertain_points(selected_points, model, aggregators)
save_fig(cls, fig_most, "most", "mnist")
save_fig(cls, fig_least, "least", "mnist")
def save_fig(cls, fig, identifier, dataset):
plt.figure(fig.number)
plt.savefig(os.path.join(FIG_FOLDER, "{}/{}_{}.png".format(dataset, cls, identifier)))
with open(os.path.join(FIG_FOLDER, "{}/{}_{}.fig".format(dataset, cls, identifier)), "wb+") as f:
pickle.dump(fig, f)
def main():
same_class_points(list(range(10)), n_samples=1000)
# informetis(n_samples=10)
if __name__ == '__main__':
main()
# print(os.listdir(os.path.abspath("../")))
| 33.081081
| 112
| 0.705882
|
62fdc94af2d52ce5ddd6690a881e26bef034856d
| 21,290
|
py
|
Python
|
trac/ticket/notification.py
|
arielnetworks/trac
|
1a96a3c39961a8952358cb8c64ccf9e94079c26b
|
[
"BSD-3-Clause"
] | 1
|
2017-08-03T07:04:28.000Z
|
2017-08-03T07:04:28.000Z
|
trac/ticket/notification.py
|
arielnetworks/trac
|
1a96a3c39961a8952358cb8c64ccf9e94079c26b
|
[
"BSD-3-Clause"
] | null | null | null |
trac/ticket/notification.py
|
arielnetworks/trac
|
1a96a3c39961a8952358cb8c64ccf9e94079c26b
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2005 Daniel Lundin <daniel@edgewall.com>
# Copyright (C) 2005-2006 Emmanuel Blot <emmanuel.blot@free.fr>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Daniel Lundin <daniel@edgewall.com>
#
from __future__ import with_statement
from hashlib import md5
from genshi.template.text import NewTextTemplate
from trac.core import *
from trac.config import *
from trac.notification import NotifyEmail
from trac.ticket.api import TicketSystem
from trac.ticket.model import Ticket
from trac.util.datefmt import to_utimestamp
from trac.util.text import obfuscate_email_address, shorten_line, \
text_width, wrap
from trac.util.translation import deactivate, reactivate
class TicketNotificationSystem(Component):
always_notify_owner = BoolOption('notification', 'always_notify_owner',
'false',
"""Always send notifications to the ticket owner (''since 0.9'').""")
always_notify_reporter = BoolOption('notification',
'always_notify_reporter',
'false',
"""Always send notifications to any address in the ''reporter''
field.""")
always_notify_updater = BoolOption('notification', 'always_notify_updater',
'true',
"""Always send notifications to the person who causes the ticket
property change and to any previous updater of that ticket.""")
ticket_subject_template = Option('notification', 'ticket_subject_template',
'$prefix #$ticket.id: $summary',
"""A Genshi text template snippet used to get the notification subject.
By default, the subject template is `$prefix #$ticket.id: $summary`.
`$prefix` being the value of the `smtp_subject_prefix` option.
''(since 0.11)''""")
batch_subject_template = Option('notification', 'batch_subject_template',
'$prefix Batch modify: $tickets_descr',
"""Like ticket_subject_template but for batch modifications.
By default, the template is `$prefix Batch modify: $tickets_descr`.
''(since 1.0)''""")
ambiguous_char_width = Option('notification', 'ambiguous_char_width',
'single',
"""Which width of ambiguous characters (e.g. 'single' or
'double') should be used in the table of notification mail.
If 'single', the same width as characters in US-ASCII. This is
expected by most users. If 'double', twice the width of
US-ASCII characters. This is expected by CJK users. ''(since
0.12.2)''""")
def get_ticket_notification_recipients(env, config, tktid, prev_cc=None,
modtime=None):
"""Returns notifications recipients.
:since 1.0.2: the `config` parameter is no longer used.
:since 1.0.2: the `prev_cc` parameter is deprecated.
"""
section = env.config['notification']
always_notify_reporter = section.getbool('always_notify_reporter')
always_notify_owner = section.getbool('always_notify_owner')
always_notify_updater = section.getbool('always_notify_updater')
cc_recipients = set(prev_cc or [])
to_recipients = set()
tkt = Ticket(env, tktid)
# CC field is stored as comma-separated string. Parse to list.
to_list = lambda cc: cc.replace(',', ' ').split()
# Backward compatibility
if not modtime:
modtime = tkt['changetime']
# Harvest email addresses from the cc, reporter, and owner fields
if tkt['cc']:
cc_recipients.update(to_list(tkt['cc']))
if always_notify_reporter:
to_recipients.add(tkt['reporter'])
if always_notify_owner:
to_recipients.add(tkt['owner'])
# Harvest email addresses from the author field of ticket_change(s)
if always_notify_updater:
for author, ticket in env.db_query("""
SELECT DISTINCT author, ticket FROM ticket_change
WHERE ticket=%s
""", (tktid, )):
to_recipients.add(author)
# Harvest previous owner and cc list
author = None
for changelog in tkt.get_changelog(modtime):
author, field, old = changelog[1:4]
if field == 'owner' and always_notify_owner:
to_recipients.add(old)
elif field == 'cc':
cc_recipients.update(to_list(old))
# Suppress the updater from the recipients if necessary
updater = author or tkt['reporter']
if not always_notify_updater:
filter_out = True
if always_notify_reporter and updater == tkt['reporter']:
filter_out = False
if always_notify_owner and updater == tkt['owner']:
filter_out = False
if filter_out:
to_recipients.discard(updater)
elif updater:
to_recipients.add(updater)
return list(to_recipients), list(cc_recipients), \
tkt['reporter'], tkt['owner']
class TicketNotifyEmail(NotifyEmail):
"""Notification of ticket changes."""
template_name = "ticket_notify_email.txt"
ticket = None
newticket = None
modtime = 0
from_email = 'trac+ticket@localhost'
COLS = 75
def __init__(self, env):
NotifyEmail.__init__(self, env)
ambiguous_char_width = env.config.get('notification',
'ambiguous_char_width',
'single')
self.ambiwidth = 2 if ambiguous_char_width == 'double' else 1
def notify(self, ticket, newticket=True, modtime=None):
"""Send ticket change notification e-mail (untranslated)"""
t = deactivate()
translated_fields = ticket.fields
try:
ticket.fields = TicketSystem(self.env).get_ticket_fields()
self._notify(ticket, newticket, modtime)
finally:
ticket.fields = translated_fields
reactivate(t)
def _notify(self, ticket, newticket=True, modtime=None):
self.ticket = ticket
self.modtime = modtime
self.newticket = newticket
changes_body = ''
self.reporter = ''
self.owner = ''
changes_descr = ''
change_data = {}
link = self.env.abs_href.ticket(ticket.id)
summary = self.ticket['summary']
author = None
if not self.newticket and modtime: # Ticket change
from trac.ticket.web_ui import TicketModule
for change in TicketModule(self.env).grouped_changelog_entries(
ticket, when=modtime):
if not change['permanent']: # attachment with same time...
continue
author = change['author']
change_data.update({
'author': self.obfuscate_email(author),
'comment': wrap(change['comment'], self.COLS, ' ', ' ',
'\n', self.ambiwidth)
})
link += '#comment:%s' % str(change.get('cnum', ''))
for field, values in change['fields'].iteritems():
old = values['old']
new = values['new']
newv = ''
if field == 'description':
new_descr = wrap(new, self.COLS, ' ', ' ', '\n',
self.ambiwidth)
old_descr = wrap(old, self.COLS, '> ', '> ', '\n',
self.ambiwidth)
old_descr = old_descr.replace(2 * '\n', '\n' + '>' + \
'\n')
cdescr = '\n'
cdescr += 'Old description:' + 2 * '\n' + old_descr + \
2 * '\n'
cdescr += 'New description:' + 2 * '\n' + new_descr + \
'\n'
changes_descr = cdescr
elif field == 'summary':
summary = "%s (was: %s)" % (new, old)
elif field == 'cc':
(addcc, delcc) = self.diff_cc(old, new)
chgcc = ''
if delcc:
chgcc += wrap(" * cc: %s (removed)" %
', '.join(delcc),
self.COLS, ' ', ' ', '\n',
self.ambiwidth) + '\n'
if addcc:
chgcc += wrap(" * cc: %s (added)" %
', '.join(addcc),
self.COLS, ' ', ' ', '\n',
self.ambiwidth) + '\n'
if chgcc:
changes_body += chgcc
else:
if field in ['owner', 'reporter']:
old = self.obfuscate_email(old)
new = self.obfuscate_email(new)
newv = new
length = 7 + len(field)
spacer_old, spacer_new = ' ', ' '
if len(old + new) + length > self.COLS:
length = 5
if len(old) + length > self.COLS:
spacer_old = '\n'
if len(new) + length > self.COLS:
spacer_new = '\n'
chg = '* %s: %s%s%s=>%s%s' % (field, spacer_old, old,
spacer_old, spacer_new,
new)
chg = chg.replace('\n', '\n' + length * ' ')
chg = wrap(chg, self.COLS, '', length * ' ', '\n',
self.ambiwidth)
changes_body += ' %s%s' % (chg, '\n')
if newv:
change_data[field] = {'oldvalue': old, 'newvalue': new}
if newticket:
author = ticket['reporter']
ticket_values = ticket.values.copy()
ticket_values['id'] = ticket.id
ticket_values['description'] = wrap(
ticket_values.get('description', ''), self.COLS,
initial_indent=' ', subsequent_indent=' ', linesep='\n',
ambiwidth=self.ambiwidth)
ticket_values['new'] = self.newticket
ticket_values['link'] = link
subject = self.format_subj(summary)
if not self.newticket:
subject = 'Re: ' + subject
self.data.update({
'ticket_props': self.format_props(),
'ticket_body_hdr': self.format_hdr(),
'subject': subject,
'ticket': ticket_values,
'changes_body': changes_body,
'changes_descr': changes_descr,
'change': change_data
})
NotifyEmail.notify(self, ticket.id, subject, author)
def format_props(self):
tkt = self.ticket
fields = [f for f in tkt.fields
if f['name'] not in ('summary', 'cc', 'time', 'changetime')]
width = [0, 0, 0, 0]
i = 0
for f in fields:
if f['type'] == 'textarea':
continue
fname = f['name']
if not fname in tkt.values:
continue
fval = tkt[fname] or ''
if fval.find('\n') != -1:
continue
if fname in ['owner', 'reporter']:
fval = self.obfuscate_email(fval)
idx = 2 * (i % 2)
width[idx] = max(self.get_text_width(f['label']), width[idx])
width[idx + 1] = max(self.get_text_width(fval), width[idx + 1])
i += 1
width_l = width[0] + width[1] + 5
width_r = width[2] + width[3] + 5
half_cols = (self.COLS - 1) / 2
if width_l + width_r + 1 > self.COLS:
if ((width_l > half_cols and width_r > half_cols) or
(width[0] > half_cols / 2 or width[2] > half_cols / 2)):
width_l = half_cols
width_r = half_cols
elif width_l > width_r:
width_l = min((self.COLS - 1) * 2 / 3, width_l)
width_r = self.COLS - width_l - 1
else:
width_r = min((self.COLS - 1) * 2 / 3, width_r)
width_l = self.COLS - width_r - 1
sep = width_l * '-' + '+' + width_r * '-'
txt = sep + '\n'
vals_lr = ([], [])
big = []
i = 0
width_lr = [width_l, width_r]
for f in [f for f in fields if f['name'] != 'description']:
fname = f['name']
if fname not in tkt.values:
continue
fval = tkt[fname] or ''
if fname in ['owner', 'reporter']:
fval = self.obfuscate_email(fval)
if f['type'] == 'textarea' or '\n' in unicode(fval):
big.append((f['label'], '\n'.join(fval.splitlines())))
else:
# Note: f['label'] is a Babel's LazyObject, make sure its
# __str__ method won't be called.
str_tmp = u'%s: %s' % (f['label'], unicode(fval))
idx = i % 2
initial_indent = ' ' * (width[2 * idx] -
self.get_text_width(f['label']) +
2 * idx)
wrapped = wrap(str_tmp, width_lr[idx] - 2 + 2 * idx,
initial_indent, ' ', '\n', self.ambiwidth)
vals_lr[idx].append(wrapped.splitlines())
i += 1
if len(vals_lr[0]) > len(vals_lr[1]):
vals_lr[1].append([])
cell_l = []
cell_r = []
for i in xrange(len(vals_lr[0])):
vals_l = vals_lr[0][i]
vals_r = vals_lr[1][i]
vals_diff = len(vals_l) - len(vals_r)
diff = len(cell_l) - len(cell_r)
if diff > 0:
# add padding to right side if needed
if vals_diff < 0:
diff += vals_diff
cell_r.extend([''] * max(diff, 0))
elif diff < 0:
# add padding to left side if needed
if vals_diff > 0:
diff += vals_diff
cell_l.extend([''] * max(-diff, 0))
cell_l.extend(vals_l)
cell_r.extend(vals_r)
for i in range(max(len(cell_l), len(cell_r))):
if i >= len(cell_l):
cell_l.append(width_l * ' ')
elif i >= len(cell_r):
cell_r.append('')
fmt_width = width_l - self.get_text_width(cell_l[i]) \
+ len(cell_l[i])
txt += u'%-*s|%s%s' % (fmt_width, cell_l[i], cell_r[i], '\n')
if big:
txt += sep
for name, value in big:
txt += '\n'.join(['', name + ':', value, '', ''])
txt += sep
return txt
def parse_cc(self, txt):
return filter(lambda x: '@' in x, txt.replace(',', ' ').split())
def diff_cc(self, old, new):
oldcc = NotifyEmail.addrsep_re.split(old)
newcc = NotifyEmail.addrsep_re.split(new)
added = [self.obfuscate_email(x) \
for x in newcc if x and x not in oldcc]
removed = [self.obfuscate_email(x) \
for x in oldcc if x and x not in newcc]
return (added, removed)
def format_hdr(self):
return '#%s: %s' % (self.ticket.id, wrap(self.ticket['summary'],
self.COLS, linesep='\n',
ambiwidth=self.ambiwidth))
def format_subj(self, summary):
template = self.config.get('notification','ticket_subject_template')
template = NewTextTemplate(template.encode('utf8'))
prefix = self.config.get('notification', 'smtp_subject_prefix')
if prefix == '__default__':
prefix = '[%s]' % self.env.project_name
data = {
'prefix': prefix,
'summary': summary,
'ticket': self.ticket,
'env': self.env,
}
return template.generate(**data).render('text', encoding=None).strip()
def get_recipients(self, tktid):
torecipients, ccrecipients, reporter, owner = \
get_ticket_notification_recipients(self.env, self.config, tktid,
modtime=self.modtime)
self.reporter = reporter
self.owner = owner
return (torecipients, ccrecipients)
def get_message_id(self, rcpt, modtime=None):
"""Generate a predictable, but sufficiently unique message ID."""
s = '%s.%08d.%d.%s' % (self.env.project_url.encode('utf-8'),
int(self.ticket.id), to_utimestamp(modtime),
rcpt.encode('ascii', 'ignore'))
dig = md5(s).hexdigest()
host = self.from_email[self.from_email.find('@') + 1:]
msgid = '<%03d.%s@%s>' % (len(s), dig, host)
return msgid
def send(self, torcpts, ccrcpts):
dest = self.reporter or 'anonymous'
hdrs = {}
hdrs['Message-ID'] = self.get_message_id(dest, self.modtime)
hdrs['X-Trac-Ticket-ID'] = str(self.ticket.id)
hdrs['X-Trac-Ticket-URL'] = self.data['ticket']['link']
if not self.newticket:
msgid = self.get_message_id(dest)
hdrs['In-Reply-To'] = msgid
hdrs['References'] = msgid
NotifyEmail.send(self, torcpts, ccrcpts, hdrs)
def get_text_width(self, text):
return text_width(text, ambiwidth=self.ambiwidth)
def obfuscate_email(self, text):
""" Obfuscate text when `show_email_addresses` is disabled in config.
Obfuscation happens once per email, regardless of recipients, so
cannot use permission-based obfuscation.
"""
if self.env.config.getbool('trac', 'show_email_addresses'):
return text
else:
return obfuscate_email_address(text)
class BatchTicketNotifyEmail(NotifyEmail):
"""Notification of ticket batch modifications."""
template_name = "batch_ticket_notify_email.txt"
def __init__(self, env):
NotifyEmail.__init__(self, env)
def notify(self, tickets, new_values, comment, action, author):
"""Send batch ticket change notification e-mail (untranslated)"""
t = deactivate()
try:
self._notify(tickets, new_values, comment, action, author)
finally:
reactivate(t)
def _notify(self, tickets, new_values, comment, action, author):
self.tickets = tickets
self.reporter = ''
self.owner = ''
changes_descr = '\n'.join(['%s to %s' % (prop, val)
for (prop, val) in new_values.iteritems()])
tickets_descr = ', '.join(['#%s' % t for t in tickets])
subject = self.format_subj(tickets_descr)
link = self.env.abs_href.query(id=','.join([str(t) for t in tickets]))
self.data.update({
'tickets_descr': tickets_descr,
'changes_descr': changes_descr,
'comment': comment,
'action': action,
'author': author,
'subject': subject,
'ticket_query_link': link,
})
NotifyEmail.notify(self, tickets, subject, author)
def format_subj(self, tickets_descr):
template = self.config.get('notification','batch_subject_template')
template = NewTextTemplate(template.encode('utf8'))
prefix = self.config.get('notification', 'smtp_subject_prefix')
if prefix == '__default__':
prefix = '[%s]' % self.env.project_name
data = {
'prefix': prefix,
'tickets_descr': tickets_descr,
'env': self.env,
}
subj = template.generate(**data).render('text', encoding=None).strip()
return shorten_line(subj)
def get_recipients(self, tktids):
alltorecipients = set()
allccrecipients = set()
for t in tktids:
torecipients, ccrecipients, reporter, owner = \
get_ticket_notification_recipients(self.env, self.config, t)
alltorecipients.update(torecipients)
allccrecipients.update(ccrecipients)
return list(alltorecipients), list(allccrecipients)
| 40.863724
| 79
| 0.519822
|
afe61f47a159dfc0fe69c69ca9542e8ee7bfc5af
| 9,355
|
py
|
Python
|
modules/api/functional_test/live_tests/recordsets/get_recordset_test.py
|
slandry90/vinyldns
|
bf3122bdd5058af53561224adb1984e3b9f1f5bc
|
[
"Apache-2.0"
] | 333
|
2018-07-27T12:58:40.000Z
|
2022-03-16T23:05:37.000Z
|
modules/api/functional_test/live_tests/recordsets/get_recordset_test.py
|
slandry90/vinyldns
|
bf3122bdd5058af53561224adb1984e3b9f1f5bc
|
[
"Apache-2.0"
] | 1,010
|
2018-07-27T14:43:14.000Z
|
2022-03-25T09:29:43.000Z
|
modules/api/functional_test/live_tests/recordsets/get_recordset_test.py
|
slandry90/vinyldns
|
bf3122bdd5058af53561224adb1984e3b9f1f5bc
|
[
"Apache-2.0"
] | 123
|
2018-07-26T20:06:04.000Z
|
2022-02-16T17:12:17.000Z
|
import pytest
import uuid
from utils import *
from hamcrest import *
from vinyldns_python import VinylDNSClient
def test_get_recordset_no_authorization(shared_zone_test_context):
"""
Test getting a recordset without authorization
"""
client = shared_zone_test_context.ok_vinyldns_client
client.get_recordset(shared_zone_test_context.ok_zone['id'], '12345', sign_request=False, status=401)
def test_get_recordset(shared_zone_test_context):
"""
Test getting a recordset
"""
client = shared_zone_test_context.ok_vinyldns_client
result_rs = None
try:
new_rs = {
'zoneId': shared_zone_test_context.ok_zone['id'],
'name': 'test_get_recordset',
'type': 'A',
'ttl': 100,
'records': [
{
'address': '10.1.1.1'
},
{
'address': '10.2.2.2'
}
]
}
result = client.create_recordset(new_rs, status=202)
result_rs = client.wait_until_recordset_change_status(result, 'Complete')['recordSet']
# Get the recordset we just made and verify
result = client.get_recordset(result_rs['zoneId'], result_rs['id'])
result_rs = result['recordSet']
verify_recordset(result_rs, new_rs)
records = [x['address'] for x in result_rs['records']]
assert_that(records, has_length(2))
assert_that('10.1.1.1', is_in(records))
assert_that('10.2.2.2', is_in(records))
finally:
if result_rs:
delete_result = client.delete_recordset(result_rs['zoneId'], result_rs['id'], status=202)
client.wait_until_recordset_change_status(delete_result, 'Complete')
def test_get_recordset_zone_doesnt_exist(shared_zone_test_context):
"""
Test getting a recordset in a zone that doesn't exist should return a 404
"""
client = shared_zone_test_context.ok_vinyldns_client
new_rs = {
'zoneId': shared_zone_test_context.ok_zone['id'],
'name': 'test_get_recordset_zone_doesnt_exist',
'type': 'A',
'ttl': 100,
'records': [
{
'address': '10.1.1.1'
},
{
'address': '10.2.2.2'
}
]
}
result_rs = None
try:
result = client.create_recordset(new_rs, status=202)
result_rs = client.wait_until_recordset_change_status(result, 'Complete')['recordSet']
client.get_recordset('5678', result_rs['id'], status=404)
finally:
if result_rs:
delete_result = client.delete_recordset(result_rs['zoneId'], result_rs['id'], status=202)
client.wait_until_recordset_change_status(delete_result, 'Complete')
def test_get_recordset_doesnt_exist(shared_zone_test_context):
"""
Test getting a new recordset that doesn't exist should return a 404
"""
client = shared_zone_test_context.ok_vinyldns_client
client.get_recordset(shared_zone_test_context.ok_zone['id'], '123', status=404)
@pytest.mark.serial
def test_at_get_recordset(shared_zone_test_context):
"""
Test getting a recordset with name @
"""
client = shared_zone_test_context.ok_vinyldns_client
ok_zone = shared_zone_test_context.ok_zone
result_rs = None
try:
new_rs = {
'zoneId': ok_zone['id'],
'name': '@',
'type': 'TXT',
'ttl': 100,
'records': [
{
'text': 'someText'
}
]
}
result = client.create_recordset(new_rs, status=202)
result_rs = client.wait_until_recordset_change_status(result, 'Complete')['recordSet']
# Get the recordset we just made and verify
result = client.get_recordset(result_rs['zoneId'], result_rs['id'])
result_rs = result['recordSet']
expected_rs = new_rs
expected_rs['name'] = ok_zone['name']
verify_recordset(result_rs, expected_rs)
records = result_rs['records']
assert_that(records, has_length(1))
assert_that(records[0]['text'], is_('someText'))
finally:
if result_rs:
delete_result = client.delete_recordset(result_rs['zoneId'], result_rs['id'], status=202)
client.wait_until_recordset_change_status(delete_result, 'Complete')
def test_get_recordset_from_shared_zone(shared_zone_test_context):
"""
Test getting a recordset as the record group owner
"""
client = shared_zone_test_context.shared_zone_vinyldns_client
retrieved_rs = None
try:
new_rs = get_recordset_json(shared_zone_test_context.shared_zone,
"test_get_recordset", "TXT", [{'text':'should-work'}],
100,
shared_zone_test_context.shared_record_group['id'])
result = client.create_recordset(new_rs, status=202)
result_rs = client.wait_until_recordset_change_status(result, 'Complete')['recordSet']
# Get the recordset we just made and verify
ok_client = shared_zone_test_context.ok_vinyldns_client
retrieved = ok_client.get_recordset(result_rs['zoneId'], result_rs['id'])
retrieved_rs = retrieved['recordSet']
verify_recordset(retrieved_rs, new_rs)
assert_that(retrieved_rs['ownerGroupId'], is_(shared_zone_test_context.shared_record_group['id']))
assert_that(retrieved_rs['ownerGroupName'], is_('record-ownergroup'))
finally:
if retrieved_rs:
delete_result = client.delete_recordset(retrieved_rs['zoneId'], retrieved_rs['id'], status=202)
client.wait_until_recordset_change_status(delete_result, 'Complete')
def test_get_unowned_recordset_from_shared_zone_succeeds_if_record_type_approved(shared_zone_test_context):
"""
Test getting an unowned recordset with no admin rights succeeds if the record type is approved
"""
client = shared_zone_test_context.shared_zone_vinyldns_client
ok_client = shared_zone_test_context.ok_vinyldns_client
result_rs = None
try:
new_rs = get_recordset_json(shared_zone_test_context.shared_zone,
"test_get_unowned_recordset_approved_type", "A", [{"address": "1.2.3.4"}])
result = client.create_recordset(new_rs, status=202)
result_rs = client.wait_until_recordset_change_status(result, 'Complete')['recordSet']
# Get the recordset we just made and verify
retrieved = ok_client.get_recordset(result_rs['zoneId'], result_rs['id'], status=200)
retrieved_rs = retrieved['recordSet']
verify_recordset(retrieved_rs, new_rs)
finally:
if result_rs:
delete_result = ok_client.delete_recordset(result_rs['zoneId'], result_rs['id'], status=202)
ok_client.wait_until_recordset_change_status(delete_result, 'Complete')
def test_get_unowned_recordset_from_shared_zone_fails_if_record_type_not_approved(shared_zone_test_context):
"""
Test getting an unowned recordset with no admin rights fails if the record type is not approved
"""
client = shared_zone_test_context.shared_zone_vinyldns_client
result_rs = None
try:
new_rs = get_recordset_json(shared_zone_test_context.shared_zone,
"test_get_unowned_recordset", "MX", [{'preference': 3, 'exchange': 'mx'}])
result = client.create_recordset(new_rs, status=202)
result_rs = client.wait_until_recordset_change_status(result, 'Complete')['recordSet']
# Get the recordset we just made and verify
ok_client = shared_zone_test_context.ok_vinyldns_client
error = ok_client.get_recordset(result_rs['zoneId'], result_rs['id'], status=403)
assert_that(error, is_("User ok does not have access to view test-get-unowned-recordset.shared."))
finally:
if result_rs:
delete_result = client.delete_recordset(result_rs['zoneId'], result_rs['id'], status=202)
client.wait_until_recordset_change_status(delete_result, 'Complete')
def test_get_owned_recordset_from_not_shared_zone(shared_zone_test_context):
"""
Test getting a recordset as the record group owner not in a shared zone fails
"""
client = shared_zone_test_context.ok_vinyldns_client
result_rs = None
try:
new_rs = get_recordset_json(shared_zone_test_context.ok_zone,
"test_cant_get_owned_recordset", "TXT", [{'text':'should-work'}],
100,
shared_zone_test_context.shared_record_group['id'])
result = client.create_recordset(new_rs, status=202)
result_rs = client.wait_until_recordset_change_status(result, 'Complete')['recordSet']
# Get the recordset we just made and verify
shared_client = shared_zone_test_context.shared_zone_vinyldns_client
shared_client.get_recordset(result_rs['zoneId'], result_rs['id'], status=403)
finally:
if result_rs:
delete_result = client.delete_recordset(result_rs['zoneId'], result_rs['id'], status=202)
client.wait_until_recordset_change_status(delete_result, 'Complete')
| 40.497835
| 110
| 0.656547
|
f65d6e2d53d9176145030b1c5ac550c129f5a5ae
| 263
|
py
|
Python
|
src/python/374.guessNumber.py
|
witimlfl/leetcode-exercise
|
9449c41fa03b996a37923f1dede0933753691282
|
[
"MIT"
] | null | null | null |
src/python/374.guessNumber.py
|
witimlfl/leetcode-exercise
|
9449c41fa03b996a37923f1dede0933753691282
|
[
"MIT"
] | null | null | null |
src/python/374.guessNumber.py
|
witimlfl/leetcode-exercise
|
9449c41fa03b996a37923f1dede0933753691282
|
[
"MIT"
] | null | null | null |
##
#我们正在玩一个猜数字游戏。 游戏规则如下:
#我从 1 到 n 选择一个数字。 你需要猜我选择了哪个数字。
#每次你猜错了,我会告诉你这个数字是大了还是小了。
#你调用一个预先定义好的接口 guess(int num),它会返回 3 个可能的结果(-1,1 或 0):
#-1 : 我的数字比较小
# 1 : 我的数字比较大
# 0 : 恭喜!你猜对了!
# #
class Solution:
def guessNumber(self, n: int) -> int:
return n;
| 18.785714
| 54
| 0.646388
|
3fa1b93cc6f9df37c4ed9c0133fbacbac5784c6c
| 1,082
|
py
|
Python
|
py3/tflib/ops/mlp.py
|
fr42k/gap-wgan-gp
|
4e373c43d606a1b83f76893d93f9cf8be8cd460d
|
[
"MIT"
] | null | null | null |
py3/tflib/ops/mlp.py
|
fr42k/gap-wgan-gp
|
4e373c43d606a1b83f76893d93f9cf8be8cd460d
|
[
"MIT"
] | null | null | null |
py3/tflib/ops/mlp.py
|
fr42k/gap-wgan-gp
|
4e373c43d606a1b83f76893d93f9cf8be8cd460d
|
[
"MIT"
] | null | null | null |
import tflib as lib
import tflib.ops.linear
import tensorflow as tf
def _ReLULayer(name, input_dim, output_dim, inputs):
output = lib.ops.linear.Linear(
name+'.Linear',
input_dim=input_dim,
output_dim=output_dim,
inputs=inputs,
initialization='glorot_he'
)
# output = tf.nn.relu(output)
# output = tf.tanh(output)
return output
def MLP(name, input_dim, hidden_dim, output_dim, n_layers, inputs):
if n_layers < 3:
raise Exception("An MLP with <3 layers isn't an MLP!")
output = _ReLULayer(
name+'.Input',
input_dim=input_dim,
output_dim=hidden_dim,
inputs=inputs
)
for i in xrange(1,n_layers-2):
output = _ReLULayer(
name+'.Hidden'+str(i),
input_dim=hidden_dim,
output_dim=hidden_dim,
inputs=output
)
# output = tf.stop_gradient(output)
return lib.ops.linear.Linear(
name+'.Output',
hidden_dim,
output_dim,
output,
initialization='glorot'
)
| 23.021277
| 67
| 0.59427
|
6480c6fd234378be80d778c442f12fe46365d118
| 812
|
py
|
Python
|
ChinaDistrict/ChinaDistrict/urls.py
|
singleye/Grocery
|
d009ea4a4447c9cbb913e16a66fa9e8f3ec99ffc
|
[
"MIT"
] | null | null | null |
ChinaDistrict/ChinaDistrict/urls.py
|
singleye/Grocery
|
d009ea4a4447c9cbb913e16a66fa9e8f3ec99ffc
|
[
"MIT"
] | null | null | null |
ChinaDistrict/ChinaDistrict/urls.py
|
singleye/Grocery
|
d009ea4a4447c9cbb913e16a66fa9e8f3ec99ffc
|
[
"MIT"
] | null | null | null |
"""ChinaDistrict URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('district', include('district.urls')),
]
| 35.304348
| 77
| 0.708128
|
35a5d21bd6b2213ec7af85882d4067297695df17
| 2,891
|
py
|
Python
|
users/webservice/sensor/bought_history.py
|
RaphaelPrevost/Back2Shops
|
5f2d369e82fe2a7b9b3a6c55782319b23d142dfd
|
[
"CECILL-B"
] | null | null | null |
users/webservice/sensor/bought_history.py
|
RaphaelPrevost/Back2Shops
|
5f2d369e82fe2a7b9b3a6c55782319b23d142dfd
|
[
"CECILL-B"
] | 6
|
2021-03-31T19:21:50.000Z
|
2022-01-13T01:46:09.000Z
|
users/webservice/sensor/bought_history.py
|
RaphaelPrevost/Back2Shops
|
5f2d369e82fe2a7b9b3a6c55782319b23d142dfd
|
[
"CECILL-B"
] | null | null | null |
# -*- coding: utf-8 -*-
#############################################################################
#
# Copyright © Dragon Dollar Limited
# contact: contact@dragondollar.com
#
# This software is a collection of webservices designed to provide a secure
# and scalable framework to build e-commerce websites.
#
# This software is governed by the CeCILL-B license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/ or redistribute the software under the terms of the CeCILL-B
# license as circulated by CEA, CNRS and INRIA at the following URL
# " http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL-B license and that you accept its terms.
#
#############################################################################
import logging
from models.stats_log import get_bought_history
from webservice.sensor.base import SensorBaseResource
from B2SProtocol.constants import RESP_RESULT
from B2SUtils.db_utils import delete
class SensorBoughtHistoryResource(SensorBaseResource):
template = "sensor_bought_history.xml"
def _on_get(self, req, resp, conn, **kwargs):
histories = get_bought_history(conn)
return {'GET_R': {'objects': histories}}
def _on_post(self, req, resp, conn, **kwargs):
try:
super(SensorBoughtHistoryResource, self)._on_post(req, resp, conn, **kwargs)
id_list = req.get_param('id_list')
if id_list:
where = {'id__in': tuple(id_list)}
delete(self.conn, 'bought_history', where=where)
logging.info('bought_history_del: %s', id_list)
return {'POST_R': {'res': RESP_RESULT.S}}
except Exception, e:
logging.error('bought_history_del_err: %s', e, exc_info=True)
return {'POST_R': {'res': RESP_RESULT.F}}
| 43.149254
| 88
| 0.690419
|
9ba48cc0103c66e24e2340ab53fb72c6c47e68d4
| 9,937
|
py
|
Python
|
pyswarms/single/global_best.py
|
msat59/pyswarms
|
0196301cee4a6c46e98186e3dc451393f0854ce8
|
[
"MIT"
] | null | null | null |
pyswarms/single/global_best.py
|
msat59/pyswarms
|
0196301cee4a6c46e98186e3dc451393f0854ce8
|
[
"MIT"
] | null | null | null |
pyswarms/single/global_best.py
|
msat59/pyswarms
|
0196301cee4a6c46e98186e3dc451393f0854ce8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
r"""
A Global-best Particle Swarm Optimization (gbest PSO) algorithm.
It takes a set of candidate solutions, and tries to find the best
solution using a position-velocity update method. Uses a
star-topology where each particle is attracted to the best
performing particle.
The position update can be defined as:
.. math::
x_{i}(t+1) = x_{i}(t) + v_{i}(t+1)
Where the position at the current timestep :math:`t` is updated using
the computed velocity at :math:`t+1`. Furthermore, the velocity update
is defined as:
.. math::
v_{ij}(t + 1) = w * v_{ij}(t) + c_{1}r_{1j}(t)[y_{ij}(t) − x_{ij}(t)]
+ c_{2}r_{2j}(t)[\hat{y}_{j}(t) − x_{ij}(t)]
Here, :math:`c1` and :math:`c2` are the cognitive and social parameters
respectively. They control the particle's behavior given two choices: (1) to
follow its *personal best* or (2) follow the swarm's *global best* position.
Overall, this dictates if the swarm is explorative or exploitative in nature.
In addition, a parameter :math:`w` controls the inertia of the swarm's
movement.
An example usage is as follows:
.. code-block:: python
import pyswarms as ps
from pyswarms.utils.functions import single_obj as fx
# Set-up hyperparameters
options = {'c1': 0.5, 'c2': 0.3, 'w':0.9}
# Call instance of GlobalBestPSO
optimizer = ps.single.GlobalBestPSO(n_particles=10, dimensions=2,
options=options)
# Perform optimization
stats = optimizer.optimize(fx.sphere, iters=100)
This algorithm was adapted from the earlier works of J. Kennedy and
R.C. Eberhart in Particle Swarm Optimization [IJCNN1995]_.
.. [IJCNN1995] J. Kennedy and R.C. Eberhart, "Particle Swarm Optimization,"
Proceedings of the IEEE International Joint Conference on Neural
Networks, 1995, pp. 1942-1948.
"""
# Import standard library
import logging
# Import modules
import numpy as np
import multiprocessing as mp
from collections import deque
from ..backend.operators import compute_pbest, compute_objective_function
from ..backend.topology import Star
from ..backend.handlers import BoundaryHandler, VelocityHandler, OptionsHandler
from ..base import SwarmOptimizer
from ..utils.reporter import Reporter
class GlobalBestPSO(SwarmOptimizer):
def __init__(
self,
n_particles,
dimensions,
options,
bounds=None,
oh_strategy=None,
bh_strategy="periodic",
velocity_clamp=None,
vh_strategy="unmodified",
center=1.00,
ftol=-np.inf,
ftol_iter=50,
init_pos=None,
):
"""Initialize the swarm
Attributes
----------
n_particles : int
number of particles in the swarm.
dimensions : int
number of dimensions in the space.
options : dict with keys :code:`{'c1', 'c2', 'w'}`
a dictionary containing the parameters for the specific
optimization technique.
* c1 : float
cognitive parameter
* c2 : float
social parameter
* w : float
inertia parameter
bounds : tuple of numpy.ndarray, optional
a tuple of size 2 where the first entry is the minimum bound while
the second entry is the maximum bound. Each array must be of shape
:code:`(dimensions,)`.
oh_strategy : dict, optional, default=None(constant options)
a dict of update strategies for each option.
bh_strategy : str
a strategy for the handling of out-of-bounds particles.
velocity_clamp : tuple, optional
a tuple of size 2 where the first entry is the minimum velocity and
the second entry is the maximum velocity. It sets the limits for
velocity clamping.
vh_strategy : str
a strategy for the handling of the velocity of out-of-bounds particles.
center : list (default is :code:`None`)
an array of size :code:`dimensions`
ftol : float
relative error in objective_func(best_pos) acceptable for convergence.
To deactivate it in order to process all iterations, use any negative value.
Deactivating ftol also disables ftol_iter.
Default is :code: `-np.inf` to disable ftol
ftol_iter : int
number of consecutive iterations over which the relative change in
objective_func(best_pos) is stalled or less than ftol. It works
when ftol is greater than zero (e.g. 1e-6)
Default is :code:`50`
init_pos : numpy.ndarray, optional
option to explicitly set the particles' initial positions. Set to
:code:`None` if you wish to generate the particles randomly.
"""
super(GlobalBestPSO, self).__init__(
n_particles=n_particles,
dimensions=dimensions,
options=options,
bounds=bounds,
velocity_clamp=velocity_clamp,
center=center,
ftol=ftol,
ftol_iter=ftol_iter,
init_pos=init_pos,
)
if oh_strategy is None:
oh_strategy = {}
# Initialize logger
self.rep = Reporter(logger=logging.getLogger(__name__))
# Initialize the resettable attributes
self.reset()
# Initialize the topology
self.top = Star()
self.bh = BoundaryHandler(strategy=bh_strategy)
self.vh = VelocityHandler(strategy=vh_strategy)
self.oh = OptionsHandler(strategy=oh_strategy)
self.name = __name__
def optimize(
self, objective_func, iters=1000, n_processes=None, verbose=True, **kwargs
):
"""Optimize the swarm for a number of iterations
Performs the optimization to evaluate the objective
function :code:`f` for a number of iterations :code:`iter.`
Parameters
----------
objective_func : callable
objective function to be evaluated
iters : int
number of iterations. Default is :code:`1000`
n_processes : int
number of processes to use for parallel particle evaluation (default: None = no parallelization)
verbose : bool
enable or disable the logs and progress bar (default: True = enable logs)
kwargs : dict
arguments for the objective function
Returns
-------
tuple
the global best cost and the global best position.
"""
# Apply verbosity
if verbose:
log_level = logging.INFO
else:
log_level = logging.NOTSET
self.rep.log("Obj. func. args: {}".format(kwargs), lvl=logging.DEBUG)
self.rep.log(
"Optimize for {} iters with {}".format(iters, self.options),
lvl=log_level,
)
# Populate memory of the handlers
self.bh.memory = self.swarm.position
self.vh.memory = self.swarm.position
# Setup Pool of processes for parallel evaluation
pool = None if n_processes is None else mp.Pool(n_processes)
self.swarm.pbest_cost = np.full(self.swarm_size[0], np.inf)
ftol_history = deque(maxlen=self.ftol_iter)
for i in self.rep.pbar(iters, self.name) if verbose else range(iters):
# Compute cost for current position and personal best
# fmt: off
self.swarm.current_cost = compute_objective_function(self.swarm, objective_func, pool=pool, **kwargs)
self.swarm.pbest_pos, self.swarm.pbest_cost = compute_pbest(self.swarm)
# Set best_cost_yet_found for ftol
best_cost_yet_found = self.swarm.best_cost
self.swarm.best_pos, self.swarm.best_cost = self.top.compute_gbest(self.swarm)
# fmt: on
if verbose:
self.rep.hook(best_cost=self.swarm.best_cost)
# Save to history
hist = self.ToHistory(
best_cost=self.swarm.best_cost,
mean_pbest_cost=np.mean(self.swarm.pbest_cost),
mean_neighbor_cost=self.swarm.best_cost,
position=self.swarm.position,
velocity=self.swarm.velocity,
)
self._populate_history(hist)
# Verify stop criteria based on the relative acceptable cost ftol
relative_measure = self.ftol * (1 + np.abs(best_cost_yet_found))
delta = (
np.abs(self.swarm.best_cost - best_cost_yet_found)
< relative_measure
)
ftol_history.append(delta)
if i > self.ftol_iter:
if all(ftol_history):
break
# Perform options update
self.swarm.options = self.oh(
self.options, iternow=i, itermax=iters
)
# Perform velocity and position updates
self.swarm.velocity = self.top.compute_velocity(
self.swarm, self.velocity_clamp, self.vh, self.bounds
)
self.swarm.position = self.top.compute_position(
self.swarm, self.bounds, self.bh
)
# Obtain the final best_cost and the final best_position
final_best_cost = self.swarm.best_cost.copy()
final_best_pos = self.swarm.pbest_pos[
self.swarm.pbest_cost.argmin()
].copy()
# Write report in log and return final cost and position
self.rep.log(
"Optimization finished | best cost: {}, best pos: {}".format(
final_best_cost, final_best_pos
),
lvl=log_level,
)
# Close Pool of Processes
if n_processes is not None:
pool.close()
return (final_best_cost, final_best_pos)
| 37.357143
| 113
| 0.615377
|
800e6bef32823c09dc095cc31b16483b38617647
| 710
|
py
|
Python
|
setup.py
|
vtecftwy/ecutils
|
bb599663df355a35edd525747a04e241a45f6b34
|
[
"MIT"
] | 2
|
2021-06-10T02:06:17.000Z
|
2021-06-14T04:57:17.000Z
|
setup.py
|
vtecftwy/ecutils
|
bb599663df355a35edd525747a04e241a45f6b34
|
[
"MIT"
] | null | null | null |
setup.py
|
vtecftwy/ecutils
|
bb599663df355a35edd525747a04e241a45f6b34
|
[
"MIT"
] | null | null | null |
from setuptools import setup
# https://pythonhosted.org/an_example_pypi_project/setuptools.html
setup(
name='ecutils',
version='1.0.0b',
author='EC',
author_email='bitbucker@procurasia.com',
packages=['ecutils'],
scripts=[],
url='',
license='LICENSE.txt',
description='Set of utility functions used in several contexts',
long_description=open('README.md').read(),
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Operating System :: Microsoft :: Windows :: Windows 10",
"Programming Language :: Python :: 3.6",
"Topic :: Utilities",
],
install_requires=[
"bokeh",
"IPython",
"matplotlib",
"numpy",
"pandas",
"scipy"
],
)
| 22.1875
| 66
| 0.666197
|
80aa4c2b596f70bef15e213b57e573367fb3cc97
| 4,898
|
py
|
Python
|
src/holophrasm/build_payout_data_set.py
|
princeton-vl/MetaGen
|
6963b973f7b5da8e868ff8ad22033e319976d6cb
|
[
"BSD-2-Clause"
] | 16
|
2020-11-13T15:25:49.000Z
|
2022-02-17T02:06:25.000Z
|
src/holophrasm/build_payout_data_set.py
|
princeton-vl/MetaGen
|
6963b973f7b5da8e868ff8ad22033e319976d6cb
|
[
"BSD-2-Clause"
] | 2
|
2021-06-03T20:41:50.000Z
|
2022-01-04T06:45:16.000Z
|
src/holophrasm/build_payout_data_set.py
|
princeton-vl/MetaGen
|
6963b973f7b5da8e868ff8ad22033e319976d6cb
|
[
"BSD-2-Clause"
] | 4
|
2020-12-12T02:36:49.000Z
|
2021-06-03T19:21:52.000Z
|
import pickle as pickle
import interface
import withpool
import data_utils5 as data
import heapq
import numpy as np
import time
'''
Figure out the data set.
For each validation proposition, do the following:
list all the entails proof steps
for each proof step:
find the ten best next steps using pred and gen
list all of the above tree, save these as 'wrong' trees
find the correct next step, save as 'correct' tree.
remove all of the correct trees from the wrong tree list if they appear
save the context expressions, the correct expressions, and the wrong expressions.
'''
BEAM_SIZE = 1
WRONG_SAMPLES = 2
global_interface = None
global_lm = None
def slow_delete_duplicates(inlist):
out = []
for i in inlist:
if i not in out:
out.append(i)
return out
def initialize_interface(lm, directory):
global global_interface
global global_lm
global_interface = interface.ProofInterface(lm, directory=directory)
global_lm = lm
class PropositionsData:
def __init__(self, prop):
'''
This creates the data for a single proposition.
'''
start = time.time()
lm = global_lm
assert global_interface is not None
context = lm.standardize_context(prop)
self.wrong = []
self.correct = []
self.hyps = [h.tree for h in context.hyps if h.type == 'e']
# self.f = context.f
proc_trees = []
for t in prop.entails_proof_steps:
if t.tree in proc_trees:
continue # don't do duplicate trees
else:
proc_trees.append(t.tree)
# add the wrong steps
this_tree = t.tree.copy().replace_values(context.replacement_dict)
new_wrong = get_wrong(context, this_tree)
self.wrong += new_wrong
# # the proof step is just an e-hypothesis
# if t.prop.type == 'e':
# print 'e-hypothesis'
# print t.tree
# print t.prop.tree
# continue
# add the correct steps
correct_pre_sub = get_correct(prop, t)
self.correct += [tree.replace_values(context.replacement_dict) for tree in correct_pre_sub]
# slow, but whatever.
self.wrong = [tree for tree in self.wrong if tree not in self.correct]
self.wrong = slow_delete_duplicates(self.wrong)
self.correct = slow_delete_duplicates(self.correct)
print prop.label, time.time()-start, 'with hyps/correct/wrong', len(self.hyps),len(self.correct),len(self.wrong)
# if any('wps' in tree for tree in self.correct):
# print 'WPSWPSWPSWPSWPSWPSWPSWPSWPSWPSWPSWPS'
# print self.correct, prop.f
# for t in prop.entails_proof_steps:
# print t.tree
# print t.unconstrained
def get_correct(context, t):
'''
gets the correct hypotheses from this context.
context is the un-standardized context
'''
prop = t.prop
fit = data.prop_applies_to_statement(t.tree, prop, context)
#print fit
assert fit is not None # the correct fit needs to work
for var, tree in zip(prop.unconstrained_variables, t.unconstrained):
fit[var] = tree
hyps = [h.tree.copy().replace(fit) for h in prop.hyps if h.type == 'e']
#print 'hyps', hyps
return hyps
def get_wrong(context, tree):
'''
context is the standardized context
'''
# generate the prop list
labels, log_probs = global_interface.props(tree, context)
log_probs -= np.max(log_probs)
heap = []
for l, p in zip(labels, log_probs):
heapq.heappush(heap, (-1.0*p, l, None) )
# and now extract elements from the heap until we're done
out = []
while len(out)<WRONG_SAMPLES and len(heap)>0:
child = process_child(tree, context, heap)
if child is not None:
out+=child
return out
# we ignore the parent tree restriction: too annoying to keep track of
def process_child(selftree, context, heap):
child_params = heapq.heappop(heap)
nlp, label, tree = child_params
lp = -nlp
if tree is None:
lptrees = global_interface.apply_prop(selftree, context, label, n=BEAM_SIZE)
else:
# we've already expanded this one
lptrees = [(lp, tree)]
child = None
while child is None and len(lptrees)>0:
lp_new, trees = lptrees.pop(0)
# print child_params[1], trees
child = trees # we just need the list of trees
# and now (possibly) add things back to the heap
if len(lptrees)>0:
# add the rest of the items back onto the heap
for lptree in lptrees:
this_lp, this_tree = lptree
this_lp = this_lp+lp-lp_new #
heapq.heappush(heap, (-1.0*this_lp, label, this_tree))
return child
| 31.397436
| 120
| 0.626786
|
4b97f80e6db4561fede052fc2630cb1705d6b11e
| 1,855
|
py
|
Python
|
tools/saber_plot/diag.py
|
NOAA-EMC/saber
|
9e9081573757f2c524f4a9ff33838d47961ae4fb
|
[
"Apache-2.0"
] | 3
|
2020-10-29T21:59:35.000Z
|
2021-12-17T18:44:40.000Z
|
tools/saber_plot/diag.py
|
NOAA-EMC/saber
|
9e9081573757f2c524f4a9ff33838d47961ae4fb
|
[
"Apache-2.0"
] | null | null | null |
tools/saber_plot/diag.py
|
NOAA-EMC/saber
|
9e9081573757f2c524f4a9ff33838d47961ae4fb
|
[
"Apache-2.0"
] | 3
|
2020-10-30T17:15:36.000Z
|
2021-06-29T19:16:52.000Z
|
#!/usr/bin/env python3
import argparse
from netCDF4 import Dataset
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import numpy.ma as ma
import os
def diag(testdata, test, mpi, omp, suffix, testfig):
"""! Plot script for the "diagnostic" files produced by BUMP"""
# Open file
f = Dataset(testdata + "/" + test + "/test_" + mpi + "-" + omp + "_" + suffix + ".nc", "r", format="NETCDF4")
# Get _FillValue
_FillValue = f.__dict__["_FillValue"]
# Diagnostics list
diag_list = ["coef_ens","fit_rh","fit_rv"]
# Plots
for diag in diag_list:
fig, ax = plt.subplots()
fig.subplots_adjust(right=0.8)
cmap = matplotlib.cm.get_cmap('Spectral')
ax.set_title(diag)
valid = False
for group in f.groups:
if group != "cov_full_vertical":
# Get vertical unit
vunit = f.groups[group]["vunit"][:]
vunitmin = np.min(vunit)
vunitmax = np.max(vunit)
if vunitmin < vunitmax:
ax.set_ylim([vunitmin,vunitmax])
# Get number of levels
nl0 = vunit.shape[0]
# Profiles only
if nl0 > 1:
for subgroup in f.groups[group].groups:
if (diag in f.groups[group].groups[subgroup].variables):
ax.plot(f.groups[group].groups[subgroup][diag][:], vunit, label=group + " - " + subgroup)
valid = True
if (valid):
# Single legend
handles, labels = ax.get_legend_handles_labels()
fig.legend(handles, labels, loc='center right')
# Save and close figure
plt.savefig(testfig + "/test_" + mpi + "-" + omp + "_" + suffix + "_" + diag + ".jpg", format="jpg", dpi=300)
plt.close()
else:
# Just close the figure
plt.close()
| 30.409836
| 118
| 0.56442
|
10a8c62b9f619636e5c609b19129769285dbbc40
| 1,536
|
py
|
Python
|
setup.py
|
moreati/doctest-prefix-all-the-strings
|
886d94261afb5586ecbf8b701c766dcb29fdcd6d
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
moreati/doctest-prefix-all-the-strings
|
886d94261afb5586ecbf8b701c766dcb29fdcd6d
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
moreati/doctest-prefix-all-the-strings
|
886d94261afb5586ecbf8b701c766dcb29fdcd6d
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup
import io
import os
def read(fname, encoding='utf-8'):
here = os.path.dirname(__file__)
with io.open(os.path.join(here, fname), encoding=encoding) as f:
return f.read()
setup(
name='pretext',
version='0.0.4',
description='Use doctest with bytes, str & unicode on Python 2.x and 3.x',
long_description=read('README.rst'),
url='https://github.com/moreati/b-prefix-all-the-doctests',
author='Alex Willmer',
author_email='alex@moreati.org.uk',
license='Apache Software License 2.0',
py_modules=['pretext'],
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Documentation',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Testing',
],
keywords='doctest bytes unicode bytestring prefix literal string str',
)
| 32.680851
| 78
| 0.626953
|
01ba907a6f782e12d39359d431ec19d6190beeab
| 2,644
|
py
|
Python
|
src/simulation.py
|
oKermorgant/ros2_2020
|
d6008446db4b14092f48a5ec66f08f8438481632
|
[
"MIT"
] | 3
|
2021-01-07T21:24:29.000Z
|
2021-05-12T04:13:55.000Z
|
src/simulation.py
|
oKermorgant/ros2_2020
|
d6008446db4b14092f48a5ec66f08f8438481632
|
[
"MIT"
] | null | null | null |
src/simulation.py
|
oKermorgant/ros2_2020
|
d6008446db4b14092f48a5ec66f08f8438481632
|
[
"MIT"
] | 1
|
2021-01-09T02:40:29.000Z
|
2021-01-09T02:40:29.000Z
|
#!/usr/bin/env python3
import rclpy
from rclpy.node import Node
import numpy as np
from geometry_msgs.msg import Twist, TransformStamped
from tf2_ros import TransformBroadcaster
a = 2
b = 3
dt = 0.05
class Robot:
def __init__(self, node, name):
self.name = name
self.tf = TransformStamped()
self.tf.child_frame_id = name + '/base_link'
self.tf.header.frame_id = 'map'
if name == 'r2d2':
self.x = 0.
self.y = 0.
self.theta = 0.
self.t = 0.
else:
self.cmd = None
if name == 'bb8':
self.x = 2.
self.y = 0.
self.theta = 0.
else:
self.x = -2.
self.y = 0.
self.theta = 0.
# also create subscriber
self.cmd_sub = node.create_subscription(
Twist,
name + '/cmd_vel',
self.cmd_callback,
10)
def cmd_callback(self, msg):
self.cmd = msg
def update_tf(self, node):
# update position
if self.name == 'r2d2':
self.t += dt
c,s = np.cos(.5*self.t),np.sin(.5*self.t)
self.x = (a + b*c)*c
self.y = (a + b*c)*s
vx = -a*s-2*b*c*s
vy = a*c + b - 2*b*s*s
self.theta = np.arctan2(vy, vx)
elif self.cmd is not None:
v,w = self.cmd.linear.x, self.cmd.angular.z
self.x += v * np.cos(self.theta)*dt
self.y += v * np.sin(self.theta)*dt
self.theta += w*dt
self.tf.header.stamp = node.get_clock().now().to_msg()
self.tf.transform.translation.x = self.x
self.tf.transform.translation.y = self.y
self.tf.transform.rotation.z = np.sin(self.theta/2)
self.tf.transform.rotation.w = np.cos(self.theta/2)
node.br.sendTransform(self.tf)
class SimulationNode(Node):
def __init__(self):
super().__init__('simulation')
self.robots = [Robot(self, name) for name in ('bb8','r2d2','d0')]
self.br = TransformBroadcaster(self)
self.create_timer(dt, self.publish)
def publish(self):
for robot in self.robots:
robot.update_tf(self)
def main(args=None):
rclpy.init(args=args)
simulation = SimulationNode()
rclpy.spin(simulation)
simulation.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
| 26.178218
| 73
| 0.49357
|
e3f51a16815448c7e661ab273f7e3246c0a3696a
| 724
|
py
|
Python
|
Python/B9-Sanduhr_rueckwaerts/B9-Sanduhr_rueckwaerts.py
|
frankyhub/Calliope
|
335f0ef5ca9bcf57e14166319501ec9086bc09bf
|
[
"MIT"
] | null | null | null |
Python/B9-Sanduhr_rueckwaerts/B9-Sanduhr_rueckwaerts.py
|
frankyhub/Calliope
|
335f0ef5ca9bcf57e14166319501ec9086bc09bf
|
[
"MIT"
] | null | null | null |
Python/B9-Sanduhr_rueckwaerts/B9-Sanduhr_rueckwaerts.py
|
frankyhub/Calliope
|
335f0ef5ca9bcf57e14166319501ec9086bc09bf
|
[
"MIT"
] | null | null | null |
Reihe = 0
Spalte = 0
def on_button_pressed_a():
global Reihe, Spalte
basic.show_leds("""
. . . . .
. . . . .
. . . . .
. . . . .
. . . . .
""")
Reihe = 0
Spalte = 0
for index in range(5):
for index2 in range(5):
led.toggle(Spalte, Reihe)
basic.pause(100)
Spalte += 1
Reihe += 1
Spalte = 0
Reihe = 4
Spalte = 4
for index3 in range(5):
for index4 in range(5):
led.toggle(Spalte, Reihe)
basic.pause(100)
Spalte += -1
Reihe += -1
Spalte = 4
input.on_button_pressed(Button.A, on_button_pressed_a)
| 22.625
| 55
| 0.43232
|
ece41a236dd85c1d68602fba2604eac373823ee4
| 8,739
|
py
|
Python
|
processor/processor_protonet.py
|
maxstrobel/HCN-PrototypeLoss-PyTorch
|
442d5915b36ecc21dfe98970b85757d2772e6d35
|
[
"MIT"
] | 24
|
2019-03-02T06:09:26.000Z
|
2021-12-13T17:21:30.000Z
|
processor/processor_protonet.py
|
maxstrobel/HCN-PrototypeLoss-PyTorch
|
442d5915b36ecc21dfe98970b85757d2772e6d35
|
[
"MIT"
] | 1
|
2019-03-02T03:17:30.000Z
|
2019-03-19T11:49:34.000Z
|
processor/processor_protonet.py
|
maxstrobel/HCN-PrototypeLoss-PyTorch
|
442d5915b36ecc21dfe98970b85757d2772e6d35
|
[
"MIT"
] | 5
|
2019-03-21T15:31:42.000Z
|
2020-05-30T17:41:28.000Z
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
from processor.processor import Processor
from processor.utils.utils import DictAction, euclidean_dist
class ProcessorProtoNet(Processor):
"""
Processor for training with prototype loss; adapted:
* data sampling
* training / loss function
* testing
* added some parser options
"""
def __init__(self, argv):
super().__init__(argv)
def load_data(self):
"""
Load data with special sampler for training of prototype loss
"""
if 'debug' not in self.arg.train_feeder_args:
self.arg.train_feeder_args['debug'] = self.arg.debug
if 'debug' not in self.arg.test_feeder_args:
self.arg.test_feeder_args['debug'] = self.arg.debug
data_loader = dict()
if self.arg.phase == 'train':
feeder_train = self.fileio.load_feeder(self.arg.feeder, **self.arg.train_feeder_args)
sampler_train = self.fileio.load_sampler(self.arg.sampler, labels=feeder_train.get_label(),
**self.arg.train_sampler_args)
data_loader['train'] = DataLoader(
dataset=feeder_train,
batch_sampler=sampler_train,
num_workers=self.arg.num_worker)
self.logger.print_log(f'DataLoader: {len(data_loader["train"].dataset)} training samples loaded')
if self.arg.test_feeder_args:
feeder_test = self.fileio.load_feeder(self.arg.feeder, **self.arg.test_feeder_args)
sampler_test = self.fileio.load_sampler(self.arg.sampler, labels=feeder_test.get_label(),
**self.arg.test_sampler_args)
data_loader['test'] = DataLoader(
dataset=feeder_test,
batch_sampler=sampler_test,
num_workers=self.arg.num_worker)
self.logger.print_log(f'DataLoader: {len(data_loader["test"].dataset)} test samples loaded')
return data_loader
def train(self):
"""
Train model an epoch using the Prototype loss procedure
"""
n_class = self.arg.train_sampler_args['num_classes']
n_support = self.arg.train_sampler_args['num_support']
n_query = self.arg.train_sampler_args['num_query']
self.model.train()
loader = self.data_loader['train']
loss_value = []
with tqdm(total=len(loader)) as t:
for data, label in loader:
# get data
data = data.float().to(self.dev)
# forward
z = self.model(data)
z_support = z[:n_class * n_support]
z_query = z[-n_class * n_query:]
loss, distances = self.loss(z_query, z_support, n_class, n_support, n_query)
# backward
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# statistics
self.iter_info['loss'] = loss.data.item()
self.iter_info['learning rate'] = self.optimizer.param_groups[0]["lr"]
loss_value.append(self.iter_info['loss'])
self.show_iter_info(t)
self.meta_info['iter'] += 1
self.epoch_info['mean_loss_train'] = np.mean(loss_value)
self.show_epoch_info()
def test(self):
"""
Testing model using the Prototype loss procedure
"""
n_class = self.arg.test_sampler_args['num_classes']
n_support = self.arg.test_sampler_args['num_support']
n_query = self.arg.test_sampler_args['num_query']
self.model.eval()
loader = self.data_loader['test']
loss_value = []
prediction_frag = []
result_frag = []
label_frag = []
with tqdm(total=len(loader)) as t:
for data, label in loader:
# get data
data = data.float().to(self.dev)
label = label.float().to(self.dev)
# inference
with torch.no_grad():
z = self.model(data)
z_support = z[:n_class * n_support]
z_query = z[-n_class * n_query:]
loss, distances = self.loss(z_query, z_support, n_class, n_support, n_query)
# statistics
class_labels = label[:n_class * n_support:n_support] # Determine labels of run
_, distance_order = distances.sort() # get indexes for downwardly sorted probabilities
if isinstance(self.loss, KNNLoss):
distance_order = distance_order // n_support
prediction = class_labels[distance_order] # get corresponding class labels
result_frag.append(z_query.data.cpu().numpy())
loss_value.append(loss)
label_frag.append(label[-n_class * n_query:].data.cpu().numpy().reshape(-1))
prediction_frag.append(prediction.data.cpu().numpy().reshape(n_class * n_query, -1))
self.iter_info['loss'] = loss.data.item()
self.update_progress_bar(t)
# evaluation
self.result['output'] = np.concatenate(result_frag)
self.result['prediction'] = np.concatenate(prediction_frag)
self.result['label'] = np.concatenate(label_frag)
self.result['classes'] = np.unique(self.result['label'])
self.eval_info['mean_loss_test'] = np.mean(loss_value)
for k in self.arg.show_topk: # calculate top-k accuracy
self.eval_info[f'top_{k}_accuracy'] = self.calculate_topk(k)
self.show_eval_info()
@staticmethod
def get_parser(add_help=True):
"""
Extended argument parser with options for prototype loss
:param add_help: boolean flag to enable command line help
:return: parser
"""
# parameter priority: command line > config > default
parser = super(ProcessorProtoNet, ProcessorProtoNet).get_parser(add_help=add_help)
parser.description = 'ProtoNet Processor'
# sampler
parser.add_argument('--sampler', default=None, help='type of sampler')
parser.add_argument('--train_sampler_args', action=DictAction, default=dict(),
help='arguments for training sampler')
parser.add_argument('--test_sampler_args', action=DictAction, default=dict(),
help='arguments for test sampler')
return parser
class PrototypeLoss(nn.Module):
def forward(self, z_query, z_support, n_class, n_support, n_query):
"""
Calculate prototype loss
:param z_query: Query points
:param z_support: Support points
:param n_class: Number of classes
:param n_support: Number of support points
:param n_query: Number of query points
:return: prototype loss
"""
device = z_query.device
# Calculate class-wise prototypes and determine distance to query samples
z_proto = z_support.view(n_class, n_support, -1).mean(dim=1)
distances = euclidean_dist(z_query, z_proto)
# Create target: n_class x n_query x 1 with step-wise ascending labels in first dimension
target = torch.arange(n_class).view(n_class, 1, 1).expand(n_class, n_query, 1)
target = target.long().to(device)
# Softmax over distances
log_p_y = F.log_softmax(-distances, dim=1).view(n_class, n_query, n_class)
loss = -log_p_y.gather(2, target).mean()
return loss, distances
class KNNLoss(nn.Module):
def forward(self, z_query, z_support, n_class, n_support, n_query):
"""
Calculate modified version of prototype loss
:param z_query: Query points
:param z_support: Support points
:param n_class: Number of classes
:param n_support: Number of support points
:param n_query: Number of query points
:return: modified form of prototype loss
"""
device = z_query.device
distances = euclidean_dist(z_query, z_support)
# Create target: n_class x n_query x 1 x n_query with block-wise ascending labels in first dimension
target = torch.arange(n_class).view(n_class, 1, 1, 1).expand(n_class, n_query, 1, n_support)
target = target.long().to(device)
log_p_y = F.log_softmax(-distances, dim=1).view(n_class, n_query, n_class, n_support)
loss = -log_p_y.gather(2, target).mean()
return loss, distances
| 40.271889
| 109
| 0.606591
|
7f0e818420902b999d6f027898cebc484c10d4b7
| 6,221
|
py
|
Python
|
omaha_server/omaha/builder.py
|
dentalwings/omaha-server
|
3d8e18c8f4aac4eb16445c0f3160ed1fc2fc8de5
|
[
"Apache-2.0"
] | 2
|
2019-06-13T20:47:18.000Z
|
2022-03-31T03:14:54.000Z
|
omaha_server/omaha/builder.py
|
dentalwings/omaha-server
|
3d8e18c8f4aac4eb16445c0f3160ed1fc2fc8de5
|
[
"Apache-2.0"
] | 1
|
2020-02-26T20:03:27.000Z
|
2020-02-26T20:03:27.000Z
|
omaha_server/omaha/builder.py
|
dentalwings/omaha-server
|
3d8e18c8f4aac4eb16445c0f3160ed1fc2fc8de5
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
This software is licensed under the Apache 2 license, quoted below.
Copyright 2014 Crystalnix Limited
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy of
the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations under
the License.
"""
from builtins import filter
from functools import partial, reduce
from uuid import UUID
from django.utils.timezone import now
from django.db.models import Q
from lxml import etree
from cacheops import cached_as
from omaha import tasks
from omaha.models import Version
from omaha.parser import parse_request
from omaha import parser
from omaha.statistics import is_user_active
from omaha.core import (Response, App, Updatecheck_negative, Manifest, Updatecheck_positive,
Packages, Package, Actions, Action, Event, Data)
__all__ = ['build_response']
def on_event(event_list, event):
event_list.append(Event())
return event_list
def on_data(data_list, data, version):
name = data.get('name')
if name == 'untrusted':
_data = Data('untrusted')
elif name == 'install':
index = data.get('index')
data_obj_list = filter(lambda d: d.index == index, version.app.data_set.all())
try:
_data = Data('install', index=index, text=next(data_obj_list).value)
except StopIteration:
_data = Data('install', index=index, status='error-nodata')
data_list.append(_data)
return data_list
def on_action(action_list, action):
action = Action(
event=action.get_event_display(),
**action.get_attributes()
)
action_list.append(action)
return action_list
def is_new_user(version):
if version == '':
return True
return False
@cached_as(Version, timeout=60)
def _get_version(partialupdate, app_id, platform, channel, version, date=None):
date = date or now()
qs = Version.objects.select_related('app')
qs = qs.filter_by_enabled(app=app_id,
platform__name=platform,
channel__name=channel)
qs = qs.filter(version__gt=version) if version else qs
qs = qs.prefetch_related("actions", "partialupdate")
if partialupdate:
try:
qs = qs.filter(partialupdate__is_enabled=True,
partialupdate__start_date__lte=date,
partialupdate__end_date__gte=date)
critical_version = qs.filter(is_critical=True).order_by('version').cache().first()
new_version = qs.cache().latest('version')
except Version.DoesNotExist:
return None
else:
qs = qs.filter(Q(partialupdate__isnull=True)
| Q(partialupdate__is_enabled=False))
try:
critical_version = qs.filter(is_critical=True).order_by('version').cache().first()
new_version = qs.cache().latest('version')
except:
raise Version.DoesNotExist
if not is_new_user(version) and critical_version:
return critical_version
return new_version
def get_version(app_id, platform, channel, version, userid, date=None):
try:
new_version = _get_version(True, app_id, platform, channel, version, date=date)
if not new_version:
raise Version.DoesNotExist
if new_version.partialupdate.exclude_new_users and is_new_user(version):
raise Version.DoesNotExist
if not is_user_active(new_version.partialupdate.active_users, userid):
raise Version.DoesNotExist
userid_int = UUID(userid).int
percent = new_version.partialupdate.percent
if not (userid_int % int(100 / percent)) == 0:
raise Version.DoesNotExist
except Version.DoesNotExist:
new_version = _get_version(False, app_id, platform, channel, version, date=date)
return new_version
def on_app(apps_list, app, os, userid):
app_id = app.get('appid')
version = app.get('version')
platform = os.get('platform')
channel = parser.get_channel(app)
ping = bool(app.findall('ping'))
events = reduce(on_event, app.findall('event'), [])
build_app = partial(App, app_id, status='ok', ping=ping, events=events)
updatecheck = app.findall('updatecheck')
try:
version = get_version(app_id, platform, channel, version, userid)
except Version.DoesNotExist:
apps_list.append(
build_app(updatecheck=Updatecheck_negative() if updatecheck else None))
return apps_list
data_list = reduce(partial(on_data, version=version), app.findall('data'), [])
build_app = partial(build_app, data_list=data_list)
if updatecheck:
actions = reduce(on_action, version.actions.all(), [])
updatecheck = Updatecheck_positive(
urls=[version.file_url],
manifest=Manifest(
version=str(version.version),
packages=Packages([Package(
name=version.file_package_name,
required='true',
size=str(version.file_size),
hash=version.file_hash,
)]),
actions=Actions(actions) if actions else None,
)
)
apps_list.append(build_app(updatecheck=updatecheck))
else:
apps_list.append(build_app())
return apps_list
def build_response(request, pretty_print=True, ip=None):
obj = parse_request(request)
tasks.collect_statistics.apply_async(args=(request, ip), queue='transient')
userid = obj.get('userid')
apps = obj.findall('app')
apps_list = reduce(partial(on_app, os=obj.os, userid=userid), apps, [])
response = Response(apps_list, date=now())
return etree.tostring(response, pretty_print=pretty_print, xml_declaration=True, encoding='UTF-8')
| 33.627027
| 102
| 0.665327
|
9c17019aa26c2b72d006b13279654c8cba939b82
| 3,139
|
py
|
Python
|
django_mailbox/south_migrations/0006_auto__add_field_message_in_reply_to.py
|
mathandpencil/django-mailbox
|
2932403417f51e51bec7ca2146cd294b3faef4a4
|
[
"MIT"
] | null | null | null |
django_mailbox/south_migrations/0006_auto__add_field_message_in_reply_to.py
|
mathandpencil/django-mailbox
|
2932403417f51e51bec7ca2146cd294b3faef4a4
|
[
"MIT"
] | null | null | null |
django_mailbox/south_migrations/0006_auto__add_field_message_in_reply_to.py
|
mathandpencil/django-mailbox
|
2932403417f51e51bec7ca2146cd294b3faef4a4
|
[
"MIT"
] | null | null | null |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Message.in_reply_to'
db.add_column('django_mailbox_message', 'in_reply_to',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='replies', null=True, to=orm['django_mailbox.Message']),
keep_default=False)
# Adding M2M table for field references on 'Message'
db.create_table('django_mailbox_message_references', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('from_message', models.ForeignKey(orm['django_mailbox.message'], on_delete=models.CASCADE, null=False)),
('to_message', models.ForeignKey(orm['django_mailbox.message'], on_delete=models.CASCADE, null=False))
))
db.create_unique('django_mailbox_message_references', ['from_message_id', 'to_message_id'])
def backwards(self, orm):
# Deleting field 'Message.in_reply_to'
db.delete_column('django_mailbox_message', 'in_reply_to_id')
# Removing M2M table for field references on 'Message'
db.delete_table('django_mailbox_message_references')
models = {
'django_mailbox.mailbox': {
'Meta': {'object_name': 'Mailbox'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uri': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'django_mailbox.message': {
'Meta': {'object_name': 'Message'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'body': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_reply_to': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'replies'", 'null': 'True', 'to': "orm['django_mailbox.Message']"}),
'mailbox': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'messages'", 'to': "orm['django_mailbox.Mailbox']"}),
'message_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'outgoing': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'processed': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'references': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'referenced_by'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['django_mailbox.Message']"}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['django_mailbox']
| 57.072727
| 215
| 0.614845
|
4aaa3f0a0a0b91a6139c800d10e7fdfbb82a7304
| 210
|
py
|
Python
|
app/favorite/renderers.py
|
tonyguesswho/favorite-things
|
cab2ff4a68ce468dbcbe2d6dd42991bcad9fc127
|
[
"MIT"
] | null | null | null |
app/favorite/renderers.py
|
tonyguesswho/favorite-things
|
cab2ff4a68ce468dbcbe2d6dd42991bcad9fc127
|
[
"MIT"
] | 5
|
2021-03-09T13:00:32.000Z
|
2022-02-26T15:45:12.000Z
|
app/favorite/renderers.py
|
tonyguesswho/favorite-things
|
cab2ff4a68ce468dbcbe2d6dd42991bcad9fc127
|
[
"MIT"
] | 2
|
2020-01-16T08:39:49.000Z
|
2020-01-17T16:18:44.000Z
|
from core.renderers import CoreJSONRenderer
class FavoriteJsonRenderer(CoreJSONRenderer):
object_label = 'favorite'
pagination_object_label = 'favorites'
pagination_count_label = 'favoritesCount'
| 26.25
| 45
| 0.8
|
bcfc43dcc756eaaba86e1f1498174a9666d540f0
| 9,328
|
py
|
Python
|
MANN/Model.py
|
hmishra2250/NTM-One-Shot-TF
|
bff66a87e89b65103375691b5512bac4ce4bd55f
|
[
"MIT"
] | 265
|
2017-01-26T17:56:57.000Z
|
2021-08-09T11:44:45.000Z
|
MANN/Model.py
|
hmishra2250/NTM-One-Shot-TF
|
bff66a87e89b65103375691b5512bac4ce4bd55f
|
[
"MIT"
] | 11
|
2017-05-09T21:42:26.000Z
|
2019-05-13T06:50:10.000Z
|
MANN/Model.py
|
hmishra2250/NTM-One-Shot-TF
|
bff66a87e89b65103375691b5512bac4ce4bd55f
|
[
"MIT"
] | 86
|
2017-01-30T17:38:39.000Z
|
2021-07-17T03:31:31.000Z
|
import tensorflow as tf
import numpy as np
from .Utils.init import weight_and_bias_init, shared_glorot_uniform, shared_one_hot
from .Utils.similarities import cosine_similarity
from .Utils.tf_utils import shared_float32
from .Utils.tf_utils import update_tensor
def memory_augmented_neural_network(input_var, target_var, \
batch_size=16, nb_class=5, memory_shape=(128, 40), \
controller_size=200, input_size=20 * 20, nb_reads=4):
## input_var has dimensions (batch_size, time, input_dim)
## target_var has dimensions (batch_size, time) (label indices)
M_0 = shared_float32(1e-6 * np.ones((batch_size,) + memory_shape), name='memory')
c_0 = shared_float32(np.zeros((batch_size, controller_size)), name='memory_cell_state')
h_0 = shared_float32(np.zeros((batch_size, controller_size)), name='hidden_state')
r_0 = shared_float32(np.zeros((batch_size, nb_reads * memory_shape[1])), name='read_vector')
wr_0 = shared_one_hot((batch_size, nb_reads, memory_shape[0]), name='wr')
wu_0 = shared_one_hot((batch_size, memory_shape[0]), name='wu')
def shape_high(shape):
shape = np.array(shape)
if isinstance(shape, int):
high = np.sqrt(6. / shape)
else:
high = np.sqrt(6. / (np.sum(shape[:2]) * np.prod(shape[2:])))
return (list(shape),high)
with tf.variable_scope("Weights"):
shape, high = shape_high((nb_reads, controller_size, memory_shape[1]))
W_key = tf.get_variable('W_key', shape=shape,initializer=tf.random_uniform_initializer(-1*high, high))
b_key = tf.get_variable('b_key', shape=(nb_reads, memory_shape[1]),initializer=tf.constant_initializer(0))
shape, high = shape_high((nb_reads, controller_size, memory_shape[1]))
W_add = tf.get_variable('W_add', shape=shape,initializer=tf.random_uniform_initializer(-1*high, high))
b_add = tf.get_variable('b_add', shape=(nb_reads, memory_shape[1]),initializer=tf.constant_initializer(0))
shape, high = shape_high((nb_reads, controller_size, 1))
W_sigma = tf.get_variable('W_sigma', shape=shape,initializer=tf.random_uniform_initializer(-1*high, high))
b_sigma = tf.get_variable('b_sigma', shape=(nb_reads, 1),initializer=tf.constant_initializer(0))
shape, high = shape_high((input_size + nb_class, 4*controller_size))
W_xh = tf.get_variable('W_xh', shape=shape,initializer=tf.random_uniform_initializer(-1*high, high))
b_h = tf.get_variable('b_xh', shape=(4*controller_size),initializer=tf.constant_initializer(0))
shape, high = shape_high((controller_size + nb_reads * memory_shape[1], nb_class))
W_o = tf.get_variable('W_o', shape=shape,initializer=tf.random_uniform_initializer(-1*high, high))
b_o = tf.get_variable('b_o', shape=(nb_class),initializer=tf.constant_initializer(0))
shape, high = shape_high((nb_reads * memory_shape[1], 4 * controller_size))
W_rh = tf.get_variable('W_rh', shape=shape,initializer=tf.random_uniform_initializer(-1*high, high))
shape, high = shape_high((controller_size, 4*controller_size))
W_hh = tf.get_variable('W_hh', shape=shape,initializer=tf.random_uniform_initializer(-1*high, high))
gamma = tf.get_variable('gamma', shape=[1], initializer=tf.constant_initializer(0.95))
def slice_equally(x, size, nb_slice):
# type: (object, object, object) -> object
return [x[:,n*size:(n+1)*size] for n in range(nb_slice)]
def step((M_tm1, c_tm1, h_tm1, r_tm1, wr_tm1, wu_tm1),(x_t)):
with tf.variable_scope("Weights", reuse=True):
W_key = tf.get_variable('W_key', shape=(nb_reads, controller_size, memory_shape[1]))
b_key = tf.get_variable('b_key', shape=(nb_reads, memory_shape[1]))
W_add = tf.get_variable('W_add', shape=(nb_reads, controller_size, memory_shape[1]))
b_add = tf.get_variable('b_add', shape=(nb_reads, memory_shape[1]))
W_sigma = tf.get_variable('W_sigma', shape=(nb_reads, controller_size, 1))
b_sigma = tf.get_variable('b_sigma', shape=(nb_reads, 1))
W_xh = tf.get_variable('W_xh', shape=(input_size + nb_class, 4 * controller_size))
b_h = tf.get_variable('b_xh', shape=(4 * controller_size))
W_o = tf.get_variable('W_o', shape=(controller_size + nb_reads * memory_shape[1], nb_class))
b_o = tf.get_variable('b_o', shape=(nb_class))
W_rh = tf.get_variable('W_rh', shape=(nb_reads * memory_shape[1], 4 * controller_size))
W_hh = tf.get_variable('W_hh', shape=(controller_size, 4 * controller_size))
gamma = tf.get_variable('gamma', shape=[1], initializer=tf.constant_initializer(0.95))
#pt = M_tm1[0:2]
#pt = tf.Print(pt, [pt], message='Prinitng W_key: ')
#x_t = tf.transpose(X_t, perm=[1, 0, 2])[ix]
#with tf.control_dependencies([pt]):
preactivations = tf.matmul(x_t,W_xh) + tf.matmul(r_tm1,W_rh) + tf.matmul(h_tm1,W_hh) + b_h
gf_, gi_, go_, u_ = slice_equally(preactivations, controller_size, 4)
gf = tf.sigmoid(gf_)
gi = tf.sigmoid(gi_)
go = tf.sigmoid(go_)
u = tf.sigmoid(u_)
c_t = gf*c_tm1 + gi*u
h_t = go * tf.tanh(c_t) #(batch_size, controller_size)
h_t_W_key = tf.matmul(h_t, tf.reshape(W_key, shape=(controller_size,-1)))
k_t = tf.tanh(tf.reshape(h_t_W_key, shape=(batch_size, nb_reads, memory_shape[1])) + b_key) #(batch_size, nb_reads, memory_shape[1])
h_t_W_add = tf.matmul(h_t, tf.reshape(W_add, shape=(controller_size, -1)))
a_t = tf.tanh(tf.reshape(h_t_W_add, shape=(batch_size, nb_reads, memory_shape[1])) + b_add)
h_t_W_sigma = tf.matmul(h_t, tf.reshape(W_sigma, shape=(controller_size, -1)))
sigma_t = tf.sigmoid(tf.reshape(h_t_W_sigma, shape=(batch_size, nb_reads,1)) + b_sigma) #(batch_size, nb_reads, 1)
_,temp_indices = tf.nn.top_k(wu_tm1, memory_shape[0])
wlu_tm1 = tf.slice(temp_indices, [0,0], [batch_size,nb_reads]) #(batch_size, nb_reads)
sigma_t_wr_tm_1 = tf.tile(sigma_t, tf.stack([1, 1, wr_tm1.get_shape().as_list()[2]]))
ww_t = tf.reshape(sigma_t*wr_tm1, (batch_size*nb_reads, memory_shape[0])) #(batch_size*nb_reads, memory_shape[0])
#with tf.variable_scope("ww_t"):
ww_t = update_tensor(ww_t, tf.reshape(wlu_tm1,[-1]),1.0 - tf.reshape(sigma_t,shape=[-1])) #Update tensor done using index slicing
ww_t = tf.reshape(ww_t,(batch_size, nb_reads, memory_shape[0]))
with tf.variable_scope("M_t"):
print 'wlu_tm1 : ', wlu_tm1.get_shape().as_list()
M_t = update_tensor(M_tm1, wlu_tm1[:,0], tf.constant(0., shape=[batch_size, memory_shape[1]])) #Update tensor done using sparse to dense
M_t = tf.add(M_t, tf.matmul(tf.transpose(ww_t, perm=[0,2,1] ), a_t)) #(batch_size, memory_size[0], memory_size[1])
K_t = cosine_similarity(k_t, M_t)
wr_t = tf.nn.softmax(tf.reshape(K_t, (batch_size*nb_reads, memory_shape[0])))
wr_t = tf.reshape(wr_t, (batch_size, nb_reads, memory_shape[0])) #(batch_size, nb_reads, memory_size[0])
wu_t = gamma * wu_tm1 + tf.reduce_sum(wr_t, axis=1) + tf.reduce_sum(ww_t, axis=1) #(batch_size, memory_size[0])
r_t = tf.reshape(tf.matmul(wr_t, M_t),[batch_size,-1])
return [M_t, c_t, h_t, r_t, wr_t, wu_t]
#Model Part:
sequence_length_var = target_var.get_shape().as_list()[1] #length of the input
output_shape_var = (batch_size*sequence_length_var, nb_class) #(batch_size*sequence_length_vat,nb_class)
# Input concat with time offset
one_hot_target_flattened = tf.one_hot(tf.reshape(target_var,[-1]), depth=nb_class)
one_hot_target = tf.reshape(one_hot_target_flattened, (batch_size, sequence_length_var, nb_class)) #(batch_size, sequence_var_length, nb_class)
offset_target_var = tf.concat([tf.zeros_like(tf.expand_dims(one_hot_target[:,0],1)),one_hot_target[:,:-1]],axis=1) #(batch_size, sequence_var_length, nb_class)
l_input_var = tf.concat([input_var,offset_target_var],axis=2) #(batch_size, sequence_var_length, input_size+nb_class)
#ix = tf.variable(0,dtype=tf.int32)
#cond = lambda M_0, c_0, h_0, r_0, wr_0, wu_0, ix: ix < sequence_length_var
l_ntm_var = tf.scan(step, elems=tf.transpose(l_input_var, perm=[1,0,2]),initializer=[M_0, c_0, h_0, r_0, wr_0, wu_0], name="Scan_MANN_Last") #Set of all above parameters, as list
l_ntm_output_var = tf.transpose(tf.concat(l_ntm_var[2:4], axis=2), perm=[1, 0, 2]) #h_t & r_t, size=(batch_size, sequence_var_length, controller_size+nb_reads*memory_size[1])
l_input_var_W_o = tf.matmul(tf.reshape(l_ntm_output_var, shape=(batch_size*sequence_length_var,-1)), W_o)
output_var_preactivation = tf.add(tf.reshape(l_input_var_W_o, (batch_size, sequence_length_var,nb_class)), b_o)
output_var_flatten = tf.nn.softmax(tf.reshape(output_var_preactivation, output_shape_var))
output_var = tf.reshape(output_var_flatten, output_var_preactivation.get_shape().as_list())
#Parameters
params = [W_key, b_key, W_add, b_add, W_sigma, b_sigma, W_xh, W_rh, W_hh, b_h, W_o, b_o]
return output_var, output_var_flatten, params
| 64.331034
| 184
| 0.680532
|
db229cbcf96c19129bd02c109ae027569dfac030
| 1,163
|
py
|
Python
|
user/migrations/0001_initial.py
|
zhongmei57485/SwiperPro
|
b00dde5af05f158d7cd2c649e8a07a2c19623b69
|
[
"Apache-2.0"
] | null | null | null |
user/migrations/0001_initial.py
|
zhongmei57485/SwiperPro
|
b00dde5af05f158d7cd2c649e8a07a2c19623b69
|
[
"Apache-2.0"
] | 9
|
2019-12-04T23:48:54.000Z
|
2021-06-10T18:31:57.000Z
|
user/migrations/0001_initial.py
|
zhongmei57485/SwiperPro
|
b00dde5af05f158d7cd2c649e8a07a2c19623b69
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.2.1 on 2019-07-17 19:45
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phonenum', models.CharField(max_length=11, unique=True)),
('nickname', models.CharField(max_length=32)),
('sex', models.IntegerField(choices=[(0, '未知'), (1, '男'), (2, '女')], default=0)),
('birth_year', models.IntegerField(default=2000)),
('birth_month', models.IntegerField(default=1)),
('birth_day', models.IntegerField(default=1)),
('avatar', models.CharField(max_length=256)),
('location', models.CharField(choices=[('gz', '广州'), ('sz', '深圳'), ('bj', '北京'), ('sh', '上海'), ('hz', '杭州'), ('cd', '成都')], default='gz', max_length=16)),
],
options={
'db_table': 'users',
},
),
]
| 36.34375
| 170
| 0.522786
|
cd5893d04810839d7027be2fe50cdc2b940b120c
| 346
|
py
|
Python
|
mrp_system/migrations/0038_auto_20190114_1348.py
|
mgeorge8/django_time
|
f75a442941b0ebbb6cc46a6d18e42b91695b7e57
|
[
"MIT"
] | 1
|
2018-11-09T02:09:14.000Z
|
2018-11-09T02:09:14.000Z
|
mrp_system/migrations/0038_auto_20190114_1348.py
|
mgeorge8/django_time
|
f75a442941b0ebbb6cc46a6d18e42b91695b7e57
|
[
"MIT"
] | null | null | null |
mrp_system/migrations/0038_auto_20190114_1348.py
|
mgeorge8/django_time
|
f75a442941b0ebbb6cc46a6d18e42b91695b7e57
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.2 on 2019-01-14 13:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mrp_system', '0037_billofmaterials_amount'),
]
operations = [
migrations.RenameModel(
old_name='BillofMaterials',
new_name='Amount',
),
]
| 19.222222
| 54
| 0.612717
|
d893d9b38ba3cdbb39582156d0fcb9ede9ebe1e1
| 1,603
|
py
|
Python
|
selene/wait.py
|
KalinkinaMaria/selene
|
859e1102c85740b52af8d0f08dd6b6490b4bd2ff
|
[
"MIT"
] | null | null | null |
selene/wait.py
|
KalinkinaMaria/selene
|
859e1102c85740b52af8d0f08dd6b6490b4bd2ff
|
[
"MIT"
] | 1
|
2021-06-02T04:21:17.000Z
|
2021-06-02T04:21:17.000Z
|
selene/wait.py
|
vkarpenko/selene
|
4776357430c940be38f38be9981006dd156f9730
|
[
"MIT"
] | null | null | null |
import six
import time
from selenium.common.exceptions import TimeoutException
from selene.abctypes.conditions import IEntityCondition
def wait_for(entity, condition, timeout=4, polling=0.1):
# type: (object, IEntityCondition, int) -> object
end_time = time.time() + timeout
while True:
try:
return condition.fn(entity)
except Exception as reason:
reason_message = getattr(reason, 'msg',
getattr(reason, 'message',
getattr(reason, 'args', '')))
if six.PY2:
if isinstance(reason_message, unicode):
reason_message = reason_message.encode('unicode-escape')
reason_string = '{name}: {message}'.format(name=reason.__class__.__name__, message=reason_message)
screen = getattr(reason, 'screen', None)
stacktrace = getattr(reason, 'stacktrace', None)
if time.time() > end_time:
raise TimeoutException('''
failed while waiting {timeout} seconds
to assert {condition}
for {entity}
reason: {reason}'''.format(
timeout=timeout,
condition=condition.description(),
entity=entity,
reason=reason_string), screen, stacktrace)
time.sleep(polling)
def satisfied(entity, condition):
try:
value = condition(entity)
return value if value is not None else False
except Exception as exc:
return False
| 34.847826
| 110
| 0.570805
|
364baa5a110741b4405535ea81ed76f9302348f5
| 985
|
py
|
Python
|
setup.py
|
cmccandless/homectl
|
338b09583b9817a11bf55df7c4db6143b43d74f9
|
[
"MIT"
] | null | null | null |
setup.py
|
cmccandless/homectl
|
338b09583b9817a11bf55df7c4db6143b43d74f9
|
[
"MIT"
] | null | null | null |
setup.py
|
cmccandless/homectl
|
338b09583b9817a11bf55df7c4db6143b43d74f9
|
[
"MIT"
] | null | null | null |
import setuptools
from homectl.__version__ import VERSION
if __name__ == '__main__':
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="homectl",
version=VERSION,
author="Corey McCandless",
author_email="crm1994@gmail.com",
description=(
"Command line smart device control"
),
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/cmccandless/homectl",
packages=setuptools.find_packages(),
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
entry_points={
'console_scripts': [
'homectl = homectl.homectl:main'
],
},
install_requires=['pyfttt'],
include_package_data=True
)
| 29.848485
| 54
| 0.57868
|
150ec851b06d04877e9140def5f6f13ab4540575
| 292
|
py
|
Python
|
src/utils/install.py
|
project-delphi/object-detector
|
0caf4f8c676f433286e99425baa2ad7c8350f711
|
[
"BSD-3-Clause"
] | 8
|
2019-06-16T22:44:50.000Z
|
2022-01-23T17:11:27.000Z
|
src/utils/install.py
|
project-delphi/object-detector
|
0caf4f8c676f433286e99425baa2ad7c8350f711
|
[
"BSD-3-Clause"
] | null | null | null |
src/utils/install.py
|
project-delphi/object-detector
|
0caf4f8c676f433286e99425baa2ad7c8350f711
|
[
"BSD-3-Clause"
] | 1
|
2022-01-23T17:11:41.000Z
|
2022-01-23T17:11:41.000Z
|
from os import chdir
from subprocess import run
def install_retinanet():
run('git clone https://github.com/fizyr/keras-retinanet.git'.split(' '))
chdir('keras-retinanet')
run('pip install /content/keras-retinanet/'.split(' '))
run('python setup.py build_ext --inplace'.split(' '))
| 26.545455
| 74
| 0.708904
|
4f5b63b9ca4715331d83934eac853e385980a522
| 809
|
py
|
Python
|
setup.py
|
casimp/cpex
|
ef22860c9f1b1fe7faaa093c480a07c4f8ef34a0
|
[
"MIT"
] | 1
|
2021-04-17T00:09:02.000Z
|
2021-04-17T00:09:02.000Z
|
setup.py
|
casimp/cpex
|
ef22860c9f1b1fe7faaa093c480a07c4f8ef34a0
|
[
"MIT"
] | null | null | null |
setup.py
|
casimp/cpex
|
ef22860c9f1b1fe7faaa093c480a07c4f8ef34a0
|
[
"MIT"
] | 2
|
2021-07-18T02:43:11.000Z
|
2022-03-16T03:25:28.000Z
|
from setuptools import setup
setup(
name='cpex',
version='0.1',
author='C. Simpson',
author_email='c.a.simpson01@gmail.com',
packages=['cpex'],
include_package_data=True,
url='https://github.com/casimp/cpex',
download_url = 'https://github.com/casimp/cpex/tarball/v0.1',
license='LICENSE.txt',
description='Extraction and manipulation of crystal plasticity data from Abaqus ODB files.',
keywords = ['CP', 'crystal plasticity', 'diffraction', 'strain'],
# long_description=open('description').read(),
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.7",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows"]
)
| 35.173913
| 96
| 0.641533
|
23f1af6bdbae70dc1a4423499e7dc960f84aa8fa
| 936
|
py
|
Python
|
dfp/get_device_capabilities.py
|
surensilva/dfp-prebid-setup
|
394a6b7ba8208aa45a3a9241158fc77e07b77c96
|
[
"MIT"
] | null | null | null |
dfp/get_device_capabilities.py
|
surensilva/dfp-prebid-setup
|
394a6b7ba8208aa45a3a9241158fc77e07b77c96
|
[
"MIT"
] | null | null | null |
dfp/get_device_capabilities.py
|
surensilva/dfp-prebid-setup
|
394a6b7ba8208aa45a3a9241158fc77e07b77c96
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import sys
from googleads import ad_manager
from dfp.client import get_client
logger = logging.getLogger(__name__)
def get_device_capabilities():
"""
Gets Device capabilities.
Args:
order_id(str): the id of the DFP orderrd
Returns:
array of line item objects
"""
dfp_client = get_client()
report_downloader = dfp_client.GetDataDownloader(version='v201911')
device_query = ('SELECT Id, DeviceCapabilityName '
'FROM Device_Capability ')
results = report_downloader.DownloadPqlResultToList(device_query)
capability_map = {}
# Build associative array mapping category to id
for d in results:
# Skips the header row
if isinstance(d[0],int):
capability_map[d[1]] = d[0]
return capability_map
def main():
cm = get_device_capabilities()
print(cm)
if __name__ == '__main__':
main()
| 19.5
| 69
| 0.689103
|
909fee69ec78b5b1519f052bc3f8bd05c9cd8446
| 4,120
|
py
|
Python
|
src/bluetooth/bluetooth_service.py
|
mikaponics/mikapod-soil-rpi
|
5090a2cf7d252b7e53fe25680048732c0c9cecb9
|
[
"BSD-3-Clause"
] | null | null | null |
src/bluetooth/bluetooth_service.py
|
mikaponics/mikapod-soil-rpi
|
5090a2cf7d252b7e53fe25680048732c0c9cecb9
|
[
"BSD-3-Clause"
] | null | null | null |
src/bluetooth/bluetooth_service.py
|
mikaponics/mikapod-soil-rpi
|
5090a2cf7d252b7e53fe25680048732c0c9cecb9
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import datetime
import signal
import time
import json
from serial import Serial
import Pyro4
import pytz
from foundation import *
class ServiceExit(Exception):
"""
Custom exception which is used to trigger the clean exit
of all running threads and the main program.
"""
pass
def onServiceShutdownHandler(signum, frame):
"""
Function to be called by our `SIGINT` and `SIGTERM` handlers.
"""
print("-------------------------------------------------------------------")
print(getDT(), '| SERIAL TERMINAL SERVICE | Caught signal %d' % signum)
print("-------------------------------------------------------------------")
raise ServiceExit
class BluetoothSerialTerminalService(object):
"""
Service interacts with the external device (Arduino) and prints the data
on a specific interval to the user's console.
"""
def __init__(self):
'''
Wait until our computer can connect to the external device (Ardunio)
over serial USB communication to begin running our program.
'''
try:
self.__serial = Serial(SERIAL_PORT, SERIAL_BAUD, timeout=SERIAL_TIMEOUT)
time.sleep(2) # Wait for serial terminal to setup.
print(getDT(), "| SERIAL TERMINAL SERVICE | Successfully connected to serial port:", SERIAL_PORT);
except Exception as e:
if "could not open port" in str(e):
print(getDT(), "| SERIAL TERMINAL SERVICE | Could not connect to serial port:", SERIAL_PORT);
exit()
'''
Load up our application variables.
'''
self.__storage = Pyro4.Proxy("PYRONAME:mikapod.storage")
def runOnMainLoop(self):
"""
Function is the main loop of the application.
"""
print(getDT(), "| SERIAL TERMINAL SERVICE | Register the signal handlers.")
signal.signal(signal.SIGTERM, onServiceShutdownHandler)
signal.signal(signal.SIGINT, onServiceShutdownHandler)
print(getDT(), "| SERIAL TERMINAL SERVICE | Starting main program.")
try:
self.runOperationLoop()
except ServiceExit:
print(getDT(), "| SERIAL TERMINAL SERVICE | Gracefully shutting down.")
print(getDT(), "| SERIAL TERMINAL SERVICE | Exiting main program.")
def runOperationLoop(self):
# Keep running the main runtime loop with various
# being inputted for a frame of reference in the computations
# along with a few computations.
while True:
byte_data = self.__serial.readline()
string_data = byte_data.decode('UTF-8') # NOTE: https://stackoverflow.com/questions/6269765/what-does-the-b-character-do-in-front-of-a-string-literal#6273618
# Check to see if ANY data was returned from the serial port, if
# there was then we load up the string
if len(string_data) > 0:
array_data = [x.strip() for x in string_data.split(',')]
print(getDT(), "| SERIAL TERMINAL SERVICE | Output - Pre:"+string_data+"\n")
print(getDT(), "| SERIAL TERMINAL SERVICE | Output - Post:"+str(array_data)+"\n")
commandID = int(array_data[0])
if commandID == SET_WIFI_COMMAND_ID:
self.changeWiFiCommand(array_data[1], array_data[2], array_data[3])
def changeWiFiCommand(self, country, ssid, pw):
print(getDT(), "| SERIAL TERMINAL SERVICE | Set Wifi w/ SSID `"+ssid+"` and PW `"+pw+"`.\n")
import subprocess
p = subprocess.Popen(
['python', '../cmd/wifi_config_cmd.py', country, ssid, pw],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
out, err = p.communicate()
print(getDT(), '| SERIAL TERMINAL SERVICE | OUT: ' + str(out))
print(getDT(), '| SERIAL TERMINAL SERVICE | ERR: ' + str(err))
if __name__ == "__main__":
"""
Main entry into the main program.
"""
app = BluetoothSerialTerminalService()
app.runOnMainLoop()
| 35.826087
| 169
| 0.6
|
d1bbcbefdc21f7aacec04508ef2ae3a1badfcffc
| 323
|
py
|
Python
|
lp_solver.py
|
DenisAltruist/NEFreeSamplesFinder
|
56e550332bef26251730742897c08dc604113673
|
[
"MIT"
] | 1
|
2019-11-06T17:45:38.000Z
|
2019-11-06T17:45:38.000Z
|
lp_solver.py
|
DenisAltruist/NEFreeSamplesFinder
|
56e550332bef26251730742897c08dc604113673
|
[
"MIT"
] | null | null | null |
lp_solver.py
|
DenisAltruist/NEFreeSamplesFinder
|
56e550332bef26251730742897c08dc604113673
|
[
"MIT"
] | null | null | null |
from scipy.optimize import linprog
import time
import numpy as np
def solve(A, b):
c = np.zeros(len(A[0])).tolist()
res = linprog(c=c, A_ub=A, b_ub=b, bounds=(None, None), method='interior-point')
return res['success'], np.array(res['x']).tolist()
def is_feasible(A, b):
res, sol = solve(A, b)
return res, sol
| 26.916667
| 82
| 0.659443
|
3da5cf2abc80c206d7dcbb44a3e5077e417db332
| 1,910
|
py
|
Python
|
check_nodes.py
|
backster82/check_kubernetes
|
e4b82cc2abf0459832a3876e862184d7fe897a9e
|
[
"Apache-2.0"
] | 14
|
2018-07-17T12:48:47.000Z
|
2020-11-25T14:57:00.000Z
|
check_nodes.py
|
backster82/check_kubernetes
|
e4b82cc2abf0459832a3876e862184d7fe897a9e
|
[
"Apache-2.0"
] | 3
|
2019-03-15T08:06:37.000Z
|
2019-09-23T09:57:59.000Z
|
check_nodes.py
|
backster82/check_kubernetes
|
e4b82cc2abf0459832a3876e862184d7fe897a9e
|
[
"Apache-2.0"
] | 5
|
2019-08-28T12:59:08.000Z
|
2021-08-10T06:03:37.000Z
|
#!/usr/bin/env python
"""
Check for Kubernetes Nodes
"""
import argparse
from kubernetes import config, client
import nagiosplugin
from version import __version__ as version
class Nodes(nagiosplugin.Resource):
"""
Check for Kubernetes Nodes
"""
def __init__(self, kube_config):
self.kube_config = kube_config
self.nodes = []
self.nodes_with_problems = []
def probe(self):
config.load_kube_config(self.kube_config)
kube = client.CoreV1Api()
for node in kube.list_node().items:
self.nodes.append(node)
for condition in node.status.conditions:
# OutOfDisk is not postet in k8s > 1.12, but is still listet in node status contitions,
# see https://github.com/kubernetes/kubernetes/pull/72507
if condition.type == "OutOfDisk":
continue
if (condition.type == 'Ready' and condition.status != 'True') \
or (condition.type != 'Ready' and condition.status != 'False'):
self.nodes_with_problems.append(node)
break
return [
nagiosplugin.Metric('problem_nodes', len(self.nodes_with_problems), min=0),
nagiosplugin.Metric('all_nodes', len(self.nodes), min=0),
]
@nagiosplugin.guarded
def main():
"""
:return:
"""
argp = argparse.ArgumentParser(description='Nagios/Icinga check for Kubernetes Nodes')
argp.add_argument('-v', '--version', action='version', version='%(prog)s ' + version)
argp.add_argument('--kube-config', help='Kubernetes Config File')
args = argp.parse_args()
check = nagiosplugin.Check(
Nodes(args.kube_config),
nagiosplugin.ScalarContext('problem_nodes', 1, 2),
nagiosplugin.ScalarContext('all_nodes')
)
check.main()
if __name__ == '__main__':
main()
| 28.507463
| 103
| 0.615707
|
b3af738793af3106f39d573803e3d7b8c0120957
| 10,555
|
py
|
Python
|
kivy/core/video/video_ffpyplayer.py
|
CharaD7/kivy
|
85065fe6633f5ac831c193dc84e3f636b789cc3a
|
[
"MIT"
] | null | null | null |
kivy/core/video/video_ffpyplayer.py
|
CharaD7/kivy
|
85065fe6633f5ac831c193dc84e3f636b789cc3a
|
[
"MIT"
] | null | null | null |
kivy/core/video/video_ffpyplayer.py
|
CharaD7/kivy
|
85065fe6633f5ac831c193dc84e3f636b789cc3a
|
[
"MIT"
] | 1
|
2019-04-12T05:43:48.000Z
|
2019-04-12T05:43:48.000Z
|
'''
FFmpeg based video abstraction
==============================
To use, you need to install ffpyplyaer and have a compiled ffmpeg shared
library.
https://github.com/matham/ffpyplayer
The docs there describe how to set this up. But briefly, first you need to
compile ffmpeg using the shared flags while disabling the static flags (you'll
probably have to set the fPIC flag, e.g. CFLAGS=-fPIC). Here's some
instructions: https://trac.ffmpeg.org/wiki/CompilationGuide. For Windows, you
can download compiled GPL binaries from http://ffmpeg.zeranoe.com/builds/.
Similarly, you should download SDL.
Now, you should a ffmpeg and sdl directory. In each, you should have a include,
bin, and lib directory, where e.g. for Windows, lib contains the .dll.a files,
while bin contains the actual dlls. The include directory holds the headers.
The bin directory is only needed if the shared libraries are not already on
the path. In the environment define FFMPEG_ROOT and SDL_ROOT, each pointing to
the ffmpeg, and SDL directories, respectively. (If you're using SDL2,
the include directory will contain a directory called SDL2, which then holds
the headers).
Once defined, download the ffpyplayer git and run
python setup.py build_ext --inplace
Finally, before running you need to ensure that ffpyplayer is in python's path.
..Note::
When kivy exits by closing the window while the video is playing,
it appears that the __del__method of VideoFFPy
is not called. Because of this the VideoFFPy object is not
properly deleted when kivy exits. The consequence is that because
MediaPlayer creates internal threads which do not have their daemon
flag set, when the main threads exists it'll hang and wait for the other
MediaPlayer threads to exit. But since __del__ is not called to delete the
MediaPlayer object, those threads will remain alive hanging kivy. What this
means is that you have to be sure to delete the MediaPlayer object before
kivy exits by setting it to None.
'''
__all__ = ('VideoFFPy', )
try:
import ffpyplayer
from ffpyplayer.player import MediaPlayer
from ffpyplayer.tools import set_log_callback, get_log_callback
except:
raise
from threading import Thread
from kivy.clock import Clock, mainthread
from kivy.logger import Logger
from kivy.core.video import VideoBase
from kivy.graphics import Rectangle, BindTexture
from kivy.graphics.texture import Texture
from kivy.graphics.fbo import Fbo
from kivy.weakmethod import WeakMethod
import time
Logger.info('VideoFFPy: Using ffpyplayer {}'.format(ffpyplayer.version))
logger_func = {'quiet': Logger.critical, 'panic': Logger.critical,
'fatal': Logger.critical, 'error': Logger.error,
'warning': Logger.warning, 'info': Logger.info,
'verbose': Logger.debug, 'debug': Logger.debug}
def _log_callback(message, level):
message = message.strip()
if message:
logger_func[level]('ffpyplayer: {}'.format(message))
if not get_log_callback():
set_log_callback(_log_callback)
class VideoFFPy(VideoBase):
YUV_RGB_FS = """
$HEADER$
uniform sampler2D tex_y;
uniform sampler2D tex_u;
uniform sampler2D tex_v;
void main(void) {
float y = texture2D(tex_y, tex_coord0).r;
float u = texture2D(tex_u, tex_coord0).r - 0.5;
float v = texture2D(tex_v, tex_coord0).r - 0.5;
float r = y + 1.402 * v;
float g = y - 0.344 * u - 0.714 * v;
float b = y + 1.772 * u;
gl_FragColor = vec4(r, g, b, 1.0);
}
"""
def __init__(self, **kwargs):
self._ffplayer = None
self._thread = None
self._next_frame = None
self._ffplayer_need_quit = False
self._callback_ref = WeakMethod(self._player_callback)
self._trigger = Clock.create_trigger(self._redraw)
super(VideoFFPy, self).__init__(**kwargs)
def __del__(self):
self.unload()
def _player_callback(self, selector, value):
if self._ffplayer is None:
return
if selector == 'quit':
def close(*args):
self.unload()
Clock.schedule_once(close, 0)
def _get_position(self):
if self._ffplayer is not None:
return self._ffplayer.get_pts()
return 0
def _set_position(self, pos):
self.seek(pos)
def _set_volume(self, volume):
self._volume = volume
if self._ffplayer:
self._ffplayer.set_volume(self._volume)
def _get_duration(self):
if self._ffplayer is None:
return 0
return self._ffplayer.get_metadata()['duration']
@mainthread
def _do_eos(self):
if self.eos == 'pause':
self.pause()
elif self.eos == 'stop':
self.stop()
elif self.eos == 'loop':
self.position = 0
self.dispatch('on_eos')
@mainthread
def _change_state(self, state):
self._state = state
def _redraw(self, *args):
if not self._ffplayer:
return
next_frame = self._next_frame
if not next_frame:
return
img, pts = next_frame
if img.get_size() != self._size or self._texture is None:
self._size = w, h = img.get_size()
if self._out_fmt == 'yuv420p':
w2 = int(w / 2)
h2 = int(h / 2)
self._tex_y = Texture.create(
size=(w, h), colorfmt='luminance')
self._tex_u = Texture.create(
size=(w2, h2), colorfmt='luminance')
self._tex_v = Texture.create(
size=(w2, h2), colorfmt='luminance')
self._fbo = fbo = Fbo(size=self._size)
with fbo:
BindTexture(texture=self._tex_u, index=1)
BindTexture(texture=self._tex_v, index=2)
Rectangle(size=fbo.size, texture=self._tex_y)
fbo.shader.fs = VideoFFPy.YUV_RGB_FS
fbo['tex_y'] = 0
fbo['tex_u'] = 1
fbo['tex_v'] = 2
self._texture = fbo.texture
else:
self._texture = Texture.create(size=self._size, colorfmt='rgba')
# XXX FIXME
#self.texture.add_reload_observer(self.reload_buffer)
self._texture.flip_vertical()
self.dispatch('on_load')
if self._texture:
if self._out_fmt == 'yuv420p':
dy, du, dv, _ = img.to_memoryview()
self._tex_y.blit_buffer(dy, colorfmt='luminance')
self._tex_u.blit_buffer(du, colorfmt='luminance')
self._tex_v.blit_buffer(dv, colorfmt='luminance')
else:
self._texture.blit_buffer(
img.to_memoryview()[0], colorfmt='rgba')
self._fbo.ask_update()
self._fbo.draw()
self.dispatch('on_frame')
def _next_frame_run(self):
ffplayer = self._ffplayer
sleep = time.sleep
trigger = self._trigger
did_dispatch_eof = False
# fast path, if the source video is yuv420p, we'll use a glsl shader for
# buffer conversion to rgba
while not self._ffplayer_need_quit:
src_pix_fmt = ffplayer.get_metadata().get('src_pix_fmt')
if not src_pix_fmt:
sleep(0.005)
continue
if src_pix_fmt == 'yuv420p':
self._out_fmt = 'yuv420p'
ffplayer.set_output_pix_fmt(self._out_fmt)
self._ffplayer.toggle_pause()
break
if self._ffplayer_need_quit:
return
# wait until loaded or failed, shouldn't take long, but just to make
# sure metadata is available.
s = time.clock()
while not self._ffplayer_need_quit:
if ffplayer.get_metadata()['src_vid_size'] != (0, 0):
break
# XXX if will fail later then?
if time.clock() - s > 10.:
break
sleep(0.005)
if self._ffplayer_need_quit:
return
# we got all the informations, now, get the frames :)
self._change_state('playing')
while not self._ffplayer_need_quit:
t1 = time.time()
frame, val = ffplayer.get_frame()
t2 = time.time()
if val == 'eof':
sleep(0.2)
if not did_dispatch_eof:
self._do_eos()
did_dispatch_eof = True
elif val == 'paused':
did_dispatch_eof = False
sleep(0.2)
else:
did_dispatch_eof = False
if frame:
self._next_frame = frame
trigger()
else:
val = val if val else (1 / 30.)
sleep(val)
def seek(self, percent):
if self._ffplayer is None:
return
self._ffplayer.seek(percent * self._ffplayer.get_metadata()
['duration'], relative=False)
self._next_frame = None
def stop(self):
self.unload()
def pause(self):
if self._ffplayer and self._state != 'paused':
self._ffplayer.toggle_pause()
self._state = 'paused'
def play(self):
if self._ffplayer and self._state == 'paused':
self._ffplayer.toggle_pause()
self._state = 'playing'
return
self.load()
self._out_fmt = 'rgba'
ff_opts = {
'paused': True,
'out_fmt': self._out_fmt
}
self._ffplayer = MediaPlayer(
self._filename, callback=self._callback_ref,
thread_lib='SDL',
loglevel='info', ff_opts=ff_opts)
self._ffplayer.set_volume(self._volume)
self._thread = Thread(target=self._next_frame_run, name='Next frame')
self._thread.daemon = True
self._thread.start()
def load(self):
self.unload()
def unload(self):
Clock.unschedule(self._redraw)
self._ffplayer_need_quit = True
if self._thread:
self._thread.join()
self._thread = None
if self._ffplayer:
self._ffplayer = None
self._next_frame = None
self._size = (0, 0)
self._state = ''
self._ffplayer_need_quit = False
| 32.779503
| 80
| 0.591473
|
0b8c26bda4f832cc278bcb452a517a8b7c5435b4
| 4,588
|
py
|
Python
|
contrib/testgen/gen_base58_test_vectors.py
|
DemoCoin-Dev/democoin
|
4f3ee2a4484a05140cc1066a299afae7c120b0d2
|
[
"MIT"
] | 1
|
2018-10-15T00:55:42.000Z
|
2018-10-15T00:55:42.000Z
|
contrib/testgen/gen_base58_test_vectors.py
|
DemoCoin-Dev/democoin
|
4f3ee2a4484a05140cc1066a299afae7c120b0d2
|
[
"MIT"
] | null | null | null |
contrib/testgen/gen_base58_test_vectors.py
|
DemoCoin-Dev/democoin
|
4f3ee2a4484a05140cc1066a299afae7c120b0d2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2012-2018 The Democoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Generate valid and invalid base58 address and private key test vectors.
Usage:
gen_base58_test_vectors.py valid 50 > ../../src/test/data/base58_keys_valid.json
gen_base58_test_vectors.py invalid 50 > ../../src/test/data/base58_keys_invalid.json
'''
# 2012 Wladimir J. van der Laan
# Released under MIT License
import os
from itertools import islice
from base58 import b58encode_chk, b58decode_chk, b58chars
import random
from binascii import b2a_hex
# key types
PUBKEY_ADDRESS = 0
SCRIPT_ADDRESS = 5
PUBKEY_ADDRESS_TEST = 111
SCRIPT_ADDRESS_TEST = 196
PRIVKEY = 128
PRIVKEY_TEST = 239
metadata_keys = ['isPrivkey', 'isTestnet', 'addrType', 'isCompressed']
# templates for valid sequences
templates = [
# prefix, payload_size, suffix, metadata
# None = N/A
((PUBKEY_ADDRESS,), 20, (), (False, False, 'pubkey', None)),
((SCRIPT_ADDRESS,), 20, (), (False, False, 'script', None)),
((PUBKEY_ADDRESS_TEST,), 20, (), (False, True, 'pubkey', None)),
((SCRIPT_ADDRESS_TEST,), 20, (), (False, True, 'script', None)),
((PRIVKEY,), 32, (), (True, False, None, False)),
((PRIVKEY,), 32, (1,), (True, False, None, True)),
((PRIVKEY_TEST,), 32, (), (True, True, None, False)),
((PRIVKEY_TEST,), 32, (1,), (True, True, None, True))
]
def is_valid(v):
'''Check vector v for validity'''
result = b58decode_chk(v)
if result is None:
return False
for template in templates:
prefix = bytearray(template[0])
suffix = bytearray(template[2])
if result.startswith(prefix) and result.endswith(suffix):
if (len(result) - len(prefix) - len(suffix)) == template[1]:
return True
return False
def gen_valid_vectors():
'''Generate valid test vectors'''
while True:
for template in templates:
prefix = bytearray(template[0])
payload = bytearray(os.urandom(template[1]))
suffix = bytearray(template[2])
rv = b58encode_chk(prefix + payload + suffix)
assert is_valid(rv)
metadata = {x: y for x, y in zip(metadata_keys,template[3]) if y is not None}
hexrepr = b2a_hex(payload)
if isinstance(hexrepr, bytes):
hexrepr = hexrepr.decode('utf8')
yield (rv, hexrepr, metadata)
def gen_invalid_vector(template, corrupt_prefix, randomize_payload_size, corrupt_suffix):
'''Generate possibly invalid vector'''
if corrupt_prefix:
prefix = os.urandom(1)
else:
prefix = bytearray(template[0])
if randomize_payload_size:
payload = os.urandom(max(int(random.expovariate(0.5)), 50))
else:
payload = os.urandom(template[1])
if corrupt_suffix:
suffix = os.urandom(len(template[2]))
else:
suffix = bytearray(template[2])
return b58encode_chk(prefix + payload + suffix)
def randbool(p = 0.5):
'''Return True with P(p)'''
return random.random() < p
def gen_invalid_vectors():
'''Generate invalid test vectors'''
# start with some manual edge-cases
yield "",
yield "x",
while True:
# kinds of invalid vectors:
# invalid prefix
# invalid payload length
# invalid (randomized) suffix (add random data)
# corrupt checksum
for template in templates:
val = gen_invalid_vector(template, randbool(0.2), randbool(0.2), randbool(0.2))
if random.randint(0,10)<1: # line corruption
if randbool(): # add random character to end
val += random.choice(b58chars)
else: # replace random character in the middle
n = random.randint(0, len(val))
val = val[0:n] + random.choice(b58chars) + val[n+1:]
if not is_valid(val):
yield val,
if __name__ == '__main__':
import sys
import json
iters = {'valid':gen_valid_vectors, 'invalid':gen_invalid_vectors}
try:
uiter = iters[sys.argv[1]]
except IndexError:
uiter = gen_valid_vectors
try:
count = int(sys.argv[2])
except IndexError:
count = 0
data = list(islice(uiter(), count))
json.dump(data, sys.stdout, sort_keys=True, indent=4)
sys.stdout.write('\n')
| 34.496241
| 91
| 0.613775
|
0d6c00e9a179eb1633c8daea1a310e4906878c1f
| 4,469
|
py
|
Python
|
PythonCode/Gravity Simulation/main.py
|
sansy98/sansy_repo
|
bf67dfc9770223b9c87b328c92992095a660d36f
|
[
"MIT"
] | 2
|
2020-06-11T06:51:25.000Z
|
2020-10-06T18:57:02.000Z
|
PythonCode/Gravity Simulation/main.py
|
sansy98/sansy_repo
|
bf67dfc9770223b9c87b328c92992095a660d36f
|
[
"MIT"
] | null | null | null |
PythonCode/Gravity Simulation/main.py
|
sansy98/sansy_repo
|
bf67dfc9770223b9c87b328c92992095a660d36f
|
[
"MIT"
] | null | null | null |
import pygame as pg
from pygame.locals import *
from math import sqrt
SIZE = 750
G = 6.67 * 10**-11
class Particle():
def __init__(self, pos, m):
self.x = pos[0]
self.y = pos[1]
self.v = 0 #Particle's velocity, starts at 0
self.d = SIZE-pos[1]/30 #30px == 1m. Distance from gravitational center
self.h = self.d #Same as d but only from where the object started falling
self.m = m #Object's mass
self.isFalling = True
self.hasStopped = False
self.historic = [0, 0]
self.historicIndex = 0
#Checks if velocity remains constant, which means that the object has stopped.
#Used to prevent the object from going through the floor
def checkHistoric(self):
if self.historicIndex > 1: self.historicIndex = 0
self.historic[self.historicIndex] = self.v
self.historicIndex += 1
flag = True
for i in range(0, 2):
if abs(self.v - self.historic[i]) > 0.0001: #Error margin of 0.0001
flag = False
break
if flag: self.hasStopped = True
def update(self, g):
if not self.hasStopped:
self.d = SIZE-self.y/10
self.v += g/60 #60 fps
self.y += self.v/2 #60 fps but it's 2 because 30px=1m so 60/30 = 2
if self.y+20 >= SIZE and self.isFalling:
self.v = -self.v/(1.0 + self.m/10) #Calculates bounce negative velocity, the more massive the object is, the less it will bounce
self.isFalling = False
if not self.isFalling and self.v < 5 and self.v > -5:
self.h = self.d
self.isFalling = True
self.checkHistoric()
else:
self.y = SIZE-20
self.v = 0
if __name__ == "__main__":
try:
M = float(input("Introduce la masa (M) del planeta en kg (Ejm, Tierra = ->5.97<- * 10**24): "))
except ValueError:
M = 5.97 #Defaults to Earth's value
try:
expM = int(input("Introduce el exponente: (M *10^?): "))
except ValueError:
expM = 24 #Defaults to Earth's value
M *= 10**expM #Applies scientific notation
try:
R = float(input("Introduce el radio del planeta en km (Ejm, Tierra = 6501): "))
except ValueError:
R = 6501 #Defaults to Earth's value
R *= 1000 #Converts radius from km to m
m = 1 #Next particle's mass starts at 1 by default
g = (G*M)/R**2 #Calculates the gravitational acceleration using Newton's law of universal gravitation
pg.init()
displayFont = pg.font.SysFont('Arial', 20)
clock = pg.time.Clock()
screen = pg.display.set_mode((SIZE, SIZE))
pg.display.set_caption("Gravity Simulator")
running = True
started = False #Indicates if the first particle has been created in order to start the simulation
particles = []
#SIMULATION MAIN LOOP
while running:
for event in pg.event.get():
if event.type == QUIT: running = False
if event.type == MOUSEBUTTONDOWN:
started = True
particles.append(Particle(pg.mouse.get_pos(), m))
if event.type == KEYDOWN:
if event.key == K_UP: m += 1 #Increments next particle's mass
if event.key == K_DOWN and m>1: m -= 1 #Decrements next particle's mass
if event.key == K_LEFT: m = 1 #Resets next particle's mass (Defaults it to 1)
screen.fill((255, 255, 255))
if started:
for particle in particles:
particle.update(g) #Updates particle
pg.draw.circle(screen, (255, 0, 0), (particle.x, particle.y), 20) #Draws particle
#TEXT DISPLAYING
massDisplaySurface = displayFont.render(f"Prox. masa: {m}", False, (0, 0, 0))
screen.blit(massDisplaySurface, (0, 0))
#FOR DEBUGGING PURPOSE:
#print(m)
#print(f"v: {particle.v} | g: {g} | isFalling: {particle.isFalling} | hasStopped: {particle.hasStopped}")
pg.display.update()
| 39.548673
| 148
| 0.531886
|
47c19d1fa99543db277add104d0e8899d5d44b17
| 1,429
|
py
|
Python
|
hgvs/utils/reftranscriptdata.py
|
naomifox/hgvs
|
5007142191cac8dba2272bad5b945a27c0a5cf87
|
[
"Apache-2.0"
] | null | null | null |
hgvs/utils/reftranscriptdata.py
|
naomifox/hgvs
|
5007142191cac8dba2272bad5b945a27c0a5cf87
|
[
"Apache-2.0"
] | null | null | null |
hgvs/utils/reftranscriptdata.py
|
naomifox/hgvs
|
5007142191cac8dba2272bad5b945a27c0a5cf87
|
[
"Apache-2.0"
] | null | null | null |
from Bio.Seq import Seq
from hgvs.exceptions import HGVSDataNotAvailableError
class RefTranscriptData(object):
def __init__(self, hdp, tx_ac, pro_ac):
"""helper for generating RefTranscriptData from for c_to_p"""
tx_info = hdp.get_tx_identity_info(tx_ac)
tx_seq = hdp.get_seq(tx_ac)
if tx_info is None or tx_seq is None:
raise HGVSDataNotAvailableError("Missing transcript data for accession: {}".format(tx_ac))
# use 1-based hgvs coords
cds_start = tx_info["cds_start_i"] + 1
cds_stop = tx_info["cds_end_i"]
# coding sequences that are not divisable by 3 are not yet supported
tx_seq_to_translate = tx_seq[cds_start - 1:cds_stop]
if len(tx_seq_to_translate) % 3 != 0:
raise NotImplementedError("Transcript {} is not supported because its sequence length of {} is not divisible by 3.".format(tx_ac, len(tx_seq_to_translate)))
tx_seq_cds = Seq(tx_seq_to_translate)
protein_seq = str(tx_seq_cds.translate())
if pro_ac is None:
# get_acs... will always return at least the MD5_ accession
pro_ac = (hdp.get_pro_ac_for_tx_ac(tx_ac) or hdp.get_acs_for_protein_seq(protein_seq)[0])
self.transcript_sequence = tx_seq
self.aa_sequence = protein_seq
self.cds_start = cds_start
self.cds_stop = cds_stop
self.protein_accession = pro_ac
| 39.694444
| 168
| 0.682295
|
01da27d92c9929fe7ff7c5cb18521b2140736abc
| 12,731
|
py
|
Python
|
google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/services/services/ad_group_criterion_service/transports/grpc.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 7
|
2021-02-21T10:39:41.000Z
|
2021-12-07T07:31:28.000Z
|
google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/services/services/ad_group_criterion_service/transports/grpc.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 6
|
2021-02-02T23:46:11.000Z
|
2021-11-15T01:46:02.000Z
|
google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/services/services/ad_group_criterion_service/transports/grpc.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 4
|
2021-01-28T23:25:45.000Z
|
2021-08-30T01:55:16.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v7.resources.types import ad_group_criterion
from google.ads.googleads.v7.services.types import ad_group_criterion_service
from .base import AdGroupCriterionServiceTransport, DEFAULT_CLIENT_INFO
class AdGroupCriterionServiceGrpcTransport(AdGroupCriterionServiceTransport):
"""gRPC backend transport for AdGroupCriterionService.
Service to manage ad group criteria.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(self, *,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn("api_mtls_endpoint and client_cert_source are deprecated", DeprecationWarning)
host = api_mtls_endpoint if ":" in api_mtls_endpoint else api_mtls_endpoint + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host,
credentials=credentials,
client_info=client_info,
)
@classmethod
def create_channel(cls,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs
)
def close(self):
self.grpc_channel.close()
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_ad_group_criterion(self) -> Callable[
[ad_group_criterion_service.GetAdGroupCriterionRequest],
ad_group_criterion.AdGroupCriterion]:
r"""Return a callable for the get ad group criterion method over gRPC.
Returns the requested criterion in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Returns:
Callable[[~.GetAdGroupCriterionRequest],
~.AdGroupCriterion]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_ad_group_criterion' not in self._stubs:
self._stubs['get_ad_group_criterion'] = self.grpc_channel.unary_unary(
'/google.ads.googleads.v7.services.AdGroupCriterionService/GetAdGroupCriterion',
request_serializer=ad_group_criterion_service.GetAdGroupCriterionRequest.serialize,
response_deserializer=ad_group_criterion.AdGroupCriterion.deserialize,
)
return self._stubs['get_ad_group_criterion']
@property
def mutate_ad_group_criteria(self) -> Callable[
[ad_group_criterion_service.MutateAdGroupCriteriaRequest],
ad_group_criterion_service.MutateAdGroupCriteriaResponse]:
r"""Return a callable for the mutate ad group criteria method over gRPC.
Creates, updates, or removes criteria. Operation statuses are
returned.
List of thrown errors: `AdGroupCriterionError <>`__
`AdxError <>`__ `AuthenticationError <>`__
`AuthorizationError <>`__ `BiddingError <>`__
`BiddingStrategyError <>`__ `CollectionSizeError <>`__
`ContextError <>`__ `CriterionError <>`__ `DatabaseError <>`__
`DateError <>`__ `DistinctError <>`__ `FieldError <>`__
`FieldMaskError <>`__ `HeaderError <>`__ `IdError <>`__
`InternalError <>`__ `MultiplierError <>`__ `MutateError <>`__
`NewResourceCreationError <>`__ `NotEmptyError <>`__
`NullError <>`__ `OperationAccessDeniedError <>`__
`OperatorError <>`__ `PolicyViolationError <>`__
`QuotaError <>`__ `RangeError <>`__ `RequestError <>`__
`ResourceCountLimitExceededError <>`__ `SizeLimitError <>`__
`StringFormatError <>`__ `StringLengthError <>`__
`UrlFieldError <>`__
Returns:
Callable[[~.MutateAdGroupCriteriaRequest],
~.MutateAdGroupCriteriaResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'mutate_ad_group_criteria' not in self._stubs:
self._stubs['mutate_ad_group_criteria'] = self.grpc_channel.unary_unary(
'/google.ads.googleads.v7.services.AdGroupCriterionService/MutateAdGroupCriteria',
request_serializer=ad_group_criterion_service.MutateAdGroupCriteriaRequest.serialize,
response_deserializer=ad_group_criterion_service.MutateAdGroupCriteriaResponse.deserialize,
)
return self._stubs['mutate_ad_group_criteria']
__all__ = (
'AdGroupCriterionServiceGrpcTransport',
)
| 45.467857
| 112
| 0.634593
|
b0b74b8cbec73ac2d2c379e44f4e1a5200253bbd
| 1,477
|
py
|
Python
|
tkinter/pynput-keylogger/main.py
|
whitmans-max/python-examples
|
881a8f23f0eebc76816a0078e19951893f0daaaa
|
[
"MIT"
] | 140
|
2017-02-21T22:49:04.000Z
|
2022-03-22T17:51:58.000Z
|
tkinter/pynput-keylogger/main.py
|
whitmans-max/python-examples
|
881a8f23f0eebc76816a0078e19951893f0daaaa
|
[
"MIT"
] | 5
|
2017-12-02T19:55:00.000Z
|
2021-09-22T23:18:39.000Z
|
tkinter/pynput-keylogger/main.py
|
whitmans-max/python-examples
|
881a8f23f0eebc76816a0078e19951893f0daaaa
|
[
"MIT"
] | 79
|
2017-01-25T10:53:33.000Z
|
2022-03-11T16:13:57.000Z
|
# date: 2019.07.08
# https://stackoverflow.com/questions/56925820/function-to-start-and-stopthread/56926749#56926749
from pynput.keyboard import Listener, Key
import tkinter as tk
from functools import partial
def press(key):
keyd = str(key)
keyd = keyd.replace("Key.space", " ")
keyd = keyd.replace("'", "")
with open("doc.txt", "a") as o:
o.write(keyd)
print("key:", keyd)
# key combination to stop listener (and end thread)
#if key == Key.esc:
# return False
def startListener(arg):
global listener # inform function to use external variable
if arg == btStart:
if listener is None:
print('[+] starting listener')
listener = Listener(on_press=press)
listener.start()
else:
print('[!] already running')
if arg == btStop:
if listener is None:
print('[!] not running')
else:
print('[+] stoping thread')
listener.stop()
listener.join()
listener = None
# ---------------------------------------------------------
listener = None
app = tk.Tk()
app.geometry("300x100")
btStart = tk.Button(app, text="Start")
btStart.pack(side='top', fill='both', expand=True)
btStart["command"] = partial(startListener, btStart)
btStop = tk.Button(app, text="Stop")
btStop.pack(side='top', fill='both', expand=True)
btStop["command"] = partial(startListener, btStop)
app.mainloop()
| 23.822581
| 97
| 0.586324
|
9b1b7c5c3b522680e787121120a8cbc9c5782822
| 1,102
|
py
|
Python
|
src/cplex/_internal/_pycplex_platform.py
|
tfang94/paql-project
|
0848d13a0f2489349d196a596cc1a1a1f2ee1bfe
|
[
"MIT"
] | 1
|
2021-04-28T21:30:39.000Z
|
2021-04-28T21:30:39.000Z
|
src/cplex/_internal/_pycplex_platform.py
|
tfang94/paql-project
|
0848d13a0f2489349d196a596cc1a1a1f2ee1bfe
|
[
"MIT"
] | null | null | null |
src/cplex/_internal/_pycplex_platform.py
|
tfang94/paql-project
|
0848d13a0f2489349d196a596cc1a1a1f2ee1bfe
|
[
"MIT"
] | 3
|
2021-04-25T16:51:47.000Z
|
2022-02-03T21:04:34.000Z
|
# ------------------------------------------------------------------------------
# Licensed Materials - Property of IBM
# 5725-A06 5725-A29 5724-Y48 5724-Y49 5724-Y54 5724-Y55 5655-Y21
# Copyright IBM Corporation 2008, 2020. All Rights Reserved.
#
# US Government Users Restricted Rights - Use, duplication or
# disclosure restricted by GSA ADP Schedule Contract with
# IBM Corp.
# ------------------------------------------------------------------------------
"""Imports the shared library on supported platforms."""
import platform
from sys import version_info
ERROR_STRING = "CPLEX 20.1.0.0 is not compatible with this version of Python."
if platform.system() in ('Darwin', 'Linux', 'AIX', 'Windows', 'Microsoft'):
if version_info < (3, 7, 0):
raise Exception(ERROR_STRING)
elif version_info < (3, 8, 0):
from cplex._internal.py37_cplex2010 import *
elif version_info < (3, 9, 0):
from cplex._internal.py38_cplex2010 import *
else:
raise Exception(ERROR_STRING)
else:
raise Exception("The CPLEX Python API is not supported on this platform.")
| 38
| 80
| 0.608893
|
7d42de1f6bbdee52afa746f1494b557ea1f026a2
| 24,582
|
py
|
Python
|
website/project/views/contributor.py
|
harrismendell/osf.io
|
e2727b1bb2aaa7de494f941be08cb3e9305ae624
|
[
"Apache-2.0"
] | null | null | null |
website/project/views/contributor.py
|
harrismendell/osf.io
|
e2727b1bb2aaa7de494f941be08cb3e9305ae624
|
[
"Apache-2.0"
] | null | null | null |
website/project/views/contributor.py
|
harrismendell/osf.io
|
e2727b1bb2aaa7de494f941be08cb3e9305ae624
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import time
import itertools
import httplib as http
from collections import Counter
from flask import request
from modularodm.exceptions import ValidationValueError
from framework import forms
from framework import status
from framework.flask import redirect # VOL-aware redirect
from framework.sessions import session
from framework.auth import authenticate
from framework.auth import User, get_user
from framework.exceptions import HTTPError
from framework.auth.signals import user_registered
from framework.auth.decorators import collect_auth, must_be_logged_in
from framework.auth.forms import PasswordForm, SetEmailAndPasswordForm
from framework.transactions.handlers import no_auto_transaction
from website import mails
from website import language
from website import settings
from website.models import Node
from website.profile import utils
from website.project.model import has_anonymous_link
from website.util import web_url_for, is_json_request
from website.project.model import unreg_contributor_added
from website.util.permissions import expand_permissions, ADMIN
from website.project.decorators import (must_have_permission, must_be_valid_project,
must_not_be_registration, must_be_contributor_or_public, must_be_contributor)
@collect_auth
@must_be_valid_project
def get_node_contributors_abbrev(auth, **kwargs):
node = kwargs['node'] or kwargs['project']
anonymous = has_anonymous_link(node, auth)
max_count = kwargs.get('max_count', 3)
if 'user_ids' in kwargs:
users = [
User.load(user_id) for user_id in kwargs['user_ids']
if user_id in node.visible_contributor_ids
]
else:
users = node.visible_contributors
if anonymous or not node.can_view(auth):
raise HTTPError(http.FORBIDDEN)
contributors = []
n_contributors = len(users)
others_count = ''
for index, user in enumerate(users[:max_count]):
if index == max_count - 1 and len(users) > max_count:
separator = ' &'
others_count = str(n_contributors - 3)
elif index == len(users) - 1:
separator = ''
elif index == len(users) - 2:
separator = ' &'
else:
separator = ','
contributors.append({
'user_id': user._primary_key,
'separator': separator,
})
return {
'contributors': contributors,
'others_count': others_count,
}
@collect_auth
@must_be_valid_project
def get_contributors(auth, **kwargs):
# Can set limit to only receive a specified number of contributors in a call to this route
if request.args.get('limit'):
try:
limit = int(request.args['limit'])
except ValueError:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long='Invalid value for "limit": {}'.format(request.args['limit'])
))
else:
limit = None
node = kwargs['node'] or kwargs['project']
anonymous = has_anonymous_link(node, auth)
if anonymous or not node.can_view(auth):
raise HTTPError(http.FORBIDDEN)
# Limit is either an int or None:
# if int, contribs list is sliced to specified length
# if None, contribs list is not sliced
contribs = utils.serialize_contributors(
node.visible_contributors[0:limit],
node=node,
)
# Will either return just contributor list or contributor list + 'more' element
if limit:
return {
'contributors': contribs,
'more': max(0, len(node.visible_contributors) - limit)
}
else:
return {'contributors': contribs}
@must_be_logged_in
@must_be_valid_project
def get_contributors_from_parent(auth, **kwargs):
node = kwargs['node'] or kwargs['project']
parent = node.parent_node
if not parent:
raise HTTPError(http.BAD_REQUEST)
if not node.can_view(auth):
raise HTTPError(http.FORBIDDEN)
contribs = [
utils.add_contributor_json(contrib)
for contrib in parent.visible_contributors
if contrib._id not in node.visible_contributor_ids
]
return {'contributors': contribs}
@must_be_contributor_or_public
def get_most_in_common_contributors(auth, **kwargs):
node = kwargs['node'] or kwargs['project']
node_contrib_ids = set(node.contributors._to_primary_keys())
try:
n_contribs = int(request.args.get('max', None))
except (TypeError, ValueError):
n_contribs = settings.MAX_MOST_IN_COMMON_LENGTH
contrib_counts = Counter(contrib_id
for node in auth.user.node__contributed
for contrib_id in node.contributors._to_primary_keys()
if contrib_id not in node_contrib_ids)
active_contribs = itertools.ifilter(
lambda c: User.load(c[0]).is_active,
contrib_counts.most_common()
)
limited = itertools.islice(active_contribs, n_contribs)
contrib_objs = [(User.load(_id), count) for _id, count in limited]
contribs = [
utils.add_contributor_json(most_contrib, auth.user)
for most_contrib, count in sorted(contrib_objs, key=lambda t: (-t[1], t[0].fullname))
]
return {'contributors': contribs}
@must_be_contributor_or_public
def get_recently_added_contributors(auth, **kwargs):
node = kwargs['node'] or kwargs['project']
max_results = request.args.get('max')
if max_results:
try:
max_results = int(max_results)
except (TypeError, ValueError):
raise HTTPError(http.BAD_REQUEST)
if not max_results:
max_results = len(auth.user.recently_added)
# only include active contributors
active_contribs = itertools.ifilter(
lambda c: c.is_active and c._id not in node.contributors,
auth.user.recently_added
)
# Limit to max_results
limited_contribs = itertools.islice(active_contribs, max_results)
contribs = [
utils.add_contributor_json(contrib, auth.user)
for contrib in limited_contribs
]
return {'contributors': contribs}
@must_be_valid_project # returns project
@must_be_contributor
@must_not_be_registration
def project_before_remove_contributor(auth, **kwargs):
node = kwargs['node'] or kwargs['project']
contributor = User.load(request.json.get('id'))
# Forbidden unless user is removing herself
if not node.has_permission(auth.user, 'admin'):
if auth.user != contributor:
raise HTTPError(http.FORBIDDEN)
prompts = node.callback(
'before_remove_contributor', removed=contributor,
)
if auth.user == contributor:
prompts.insert(
0,
'Are you sure you want to remove yourself from this project?'
)
return {'prompts': prompts}
@must_be_valid_project # returns project
@must_be_contributor
@must_not_be_registration
def project_removecontributor(auth, **kwargs):
node = kwargs['node'] or kwargs['project']
contributor = User.load(request.json['id'])
if contributor is None:
raise HTTPError(http.BAD_REQUEST)
# Forbidden unless user is removing herself
if not node.has_permission(auth.user, 'admin'):
if auth.user != contributor:
raise HTTPError(http.FORBIDDEN)
outcome = node.remove_contributor(
contributor=contributor, auth=auth,
)
if outcome:
if auth.user == contributor:
status.push_status_message('Removed self from project', 'info')
return {'redirectUrl': '/dashboard/'}
status.push_status_message('Contributor removed', 'info')
return {}
raise HTTPError(
http.BAD_REQUEST,
data={
'message_long': (
'{0} must have at least one contributor with admin '
'rights'.format(
node.project_or_component.capitalize()
)
)
}
)
def deserialize_contributors(node, user_dicts, auth):
"""View helper that returns a list of User objects from a list of
serialized users (dicts). The users in the list may be registered or
unregistered users.
e.g. ``[{'id': 'abc123', 'registered': True, 'fullname': ..},
{'id': None, 'registered': False, 'fullname'...},
{'id': '123ab', 'registered': False, 'fullname': ...}]
If a dict represents an unregistered user without an ID, creates a new
unregistered User record.
:param Node node: The node to add contributors to
:param list(dict) user_dicts: List of serialized users in the format above.
:param Auth auth:
"""
# Add the registered contributors
contribs = []
for contrib_dict in user_dicts:
fullname = contrib_dict['fullname']
visible = contrib_dict['visible']
email = contrib_dict.get('email')
if contrib_dict['id']:
contributor = User.load(contrib_dict['id'])
else:
try:
contributor = User.create_unregistered(
fullname=fullname,
email=email)
contributor.save()
except ValidationValueError:
contributor = get_user(username=email)
# Add unclaimed record if necessary
if (not contributor.is_registered
and node._primary_key not in contributor.unclaimed_records):
contributor.add_unclaimed_record(node=node, referrer=auth.user,
given_name=fullname,
email=email)
contributor.save()
unreg_contributor_added.send(node, contributor=contributor,
auth=auth)
contribs.append({
'user': contributor,
'visible': visible,
'permissions': expand_permissions(contrib_dict.get('permission'))
})
return contribs
@unreg_contributor_added.connect
def finalize_invitation(node, contributor, auth):
record = contributor.get_unclaimed_record(node._primary_key)
if record['email']:
send_claim_email(record['email'], contributor, node, notify=True)
@must_be_valid_project
@must_have_permission(ADMIN)
@must_not_be_registration
def project_contributors_post(**kwargs):
""" Add contributors to a node. """
node = kwargs['node'] or kwargs['project']
auth = kwargs['auth']
user_dicts = request.json.get('users')
node_ids = request.json.get('node_ids')
if user_dicts is None or node_ids is None:
raise HTTPError(http.BAD_REQUEST)
# Prepare input data for `Node::add_contributors`
contribs = deserialize_contributors(node, user_dicts, auth=auth)
node.add_contributors(contributors=contribs, auth=auth)
node.save()
# Disconnect listener to avoid multiple invite emails
unreg_contributor_added.disconnect(finalize_invitation)
for child_id in node_ids:
child = Node.load(child_id)
# Only email unreg users once
child_contribs = deserialize_contributors(
child, user_dicts, auth=auth
)
child.add_contributors(contributors=child_contribs, auth=auth)
child.save()
# Reconnect listener
unreg_contributor_added.connect(finalize_invitation)
return {'status': 'success'}, 201
@no_auto_transaction
@must_be_valid_project # injects project
@must_have_permission(ADMIN)
@must_not_be_registration
def project_manage_contributors(auth, **kwargs):
"""Reorder and remove contributors.
:param Auth auth: Consolidated authorization
:param-json list contributors: Ordered list of contributors represented as
dictionaries of the form:
{'id': <id>, 'permission': <One of 'read', 'write', 'admin'>}
:raises: HTTPError(400) if contributors to be removed are not in list
or if no admin users would remain after changes were applied
"""
node = kwargs['node'] or kwargs['project']
contributors = request.json.get('contributors')
# Update permissions and order
try:
node.manage_contributors(contributors, auth=auth, save=True)
except ValueError as error:
raise HTTPError(http.BAD_REQUEST, data={'message_long': error.message})
# If user has removed herself from project, alert; redirect to user
# dashboard if node is private, else node dashboard
if not node.is_contributor(auth.user):
status.push_status_message(
'You have removed yourself as a contributor from this project',
'info'
)
if node.is_public:
return {'redirectUrl': node.url}
return {'redirectUrl': web_url_for('dashboard')}
# Else if user has revoked her admin permissions, alert and stay on
# current page
if not node.has_permission(auth.user, ADMIN):
status.push_status_message(
'You have removed your administrative privileges for this project',
'info'
)
# Else stay on current page
return {}
def get_timestamp():
return int(time.time())
def throttle_period_expired(timestamp, throttle):
return timestamp is None or (get_timestamp() - timestamp) > throttle
def send_claim_registered_email(claimer, unreg_user, node, throttle=24 * 3600):
unclaimed_record = unreg_user.get_unclaimed_record(node._primary_key)
referrer = User.load(unclaimed_record['referrer_id'])
claim_url = web_url_for(
'claim_user_registered',
uid=unreg_user._primary_key,
pid=node._primary_key,
token=unclaimed_record['token'],
_external=True,
)
timestamp = unclaimed_record.get('last_sent')
if throttle_period_expired(timestamp, throttle):
# Send mail to referrer, telling them to forward verification link to claimer
mails.send_mail(
referrer.username,
mails.FORWARD_INVITE_REGiSTERED,
user=unreg_user,
referrer=referrer,
node=node,
claim_url=claim_url,
fullname=unclaimed_record['name'],
)
unclaimed_record['last_sent'] = get_timestamp()
unreg_user.save()
# Send mail to claimer, telling them to wait for referrer
mails.send_mail(
claimer.username,
mails.PENDING_VERIFICATION_REGISTERED,
fullname=claimer.fullname,
referrer=referrer,
node=node,
)
def send_claim_email(email, user, node, notify=True, throttle=24 * 3600):
"""Send an email for claiming a user account. Either sends to the given email
or the referrer's email, depending on the email address provided.
:param str email: The address given in the claim user form
:param User user: The User record to claim.
:param Node node: The node where the user claimed their account.
:param bool notify: If True and an email is sent to the referrer, an email
will also be sent to the invited user about their pending verification.
:param int throttle: Time period (in seconds) after the referrer is
emailed during which the referrer will not be emailed again.
"""
invited_email = email.lower().strip()
unclaimed_record = user.get_unclaimed_record(node._primary_key)
referrer = User.load(unclaimed_record['referrer_id'])
claim_url = user.get_claim_url(node._primary_key, external=True)
# If given email is the same provided by user, just send to that email
if unclaimed_record.get('email', None) == invited_email:
mail_tpl = mails.INVITE
to_addr = invited_email
else: # Otherwise have the referrer forward the email to the user
if notify:
pending_mail = mails.PENDING_VERIFICATION
mails.send_mail(
invited_email,
pending_mail,
user=user,
referrer=referrer,
fullname=unclaimed_record['name'],
node=node
)
timestamp = unclaimed_record.get('last_sent')
if throttle_period_expired(timestamp, throttle):
unclaimed_record['last_sent'] = get_timestamp()
user.save()
else: # Don't send the email to the referrer
return
mail_tpl = mails.FORWARD_INVITE
to_addr = referrer.username
mails.send_mail(
to_addr,
mail_tpl,
user=user,
referrer=referrer,
node=node,
claim_url=claim_url,
email=invited_email,
fullname=unclaimed_record['name']
)
return to_addr
def verify_claim_token(user, token, pid):
"""View helper that checks that a claim token for a given user and node ID
is valid. If not valid, throws an error with custom error messages.
"""
# if token is invalid, throw an error
if not user.verify_claim_token(token=token, project_id=pid):
if user.is_registered:
error_data = {
'message_short': 'User has already been claimed.',
'message_long': 'Please <a href="/login/">log in</a> to continue.'}
raise HTTPError(400, data=error_data)
else:
return False
return True
@collect_auth
@must_be_valid_project
def claim_user_registered(auth, **kwargs):
"""View that prompts user to enter their password in order to claim
contributorship on a project.
A user must be logged in.
"""
current_user = auth.user
node = kwargs['node'] or kwargs['project']
sign_out_url = web_url_for('auth_login', logout=True, next=request.path)
if not current_user:
response = redirect(sign_out_url)
return response
# Logged in user should not be a contributor the project
if node.is_contributor(current_user):
data = {'message_short': 'Already a contributor',
'message_long': 'The logged-in user is already a contributor to '
'this project. Would you like to <a href="/logout/">log out</a>?'}
raise HTTPError(http.BAD_REQUEST, data=data)
uid, pid, token = kwargs['uid'], kwargs['pid'], kwargs['token']
unreg_user = User.load(uid)
if not verify_claim_token(unreg_user, token, pid=node._primary_key):
raise HTTPError(http.BAD_REQUEST)
# Store the unreg_user data on the session in case the user registers
# a new account
session.data['unreg_user'] = {
'uid': uid, 'pid': pid, 'token': token
}
form = PasswordForm(request.form)
if request.method == 'POST':
if form.validate():
if current_user.check_password(form.password.data):
node.replace_contributor(old=unreg_user, new=current_user)
node.save()
status.push_status_message(
'You are now a contributor to this project.',
'success')
return redirect(node.url)
else:
status.push_status_message(language.LOGIN_FAILED, 'warning')
else:
forms.push_errors_to_status(form.errors)
if is_json_request():
form_ret = forms.utils.jsonify(form)
user_ret = utils.serialize_user(current_user, full=False)
else:
form_ret = form
user_ret = current_user
return {
'form': form_ret,
'user': user_ret,
'signOutUrl': sign_out_url
}
@user_registered.connect
def replace_unclaimed_user_with_registered(user):
"""Listens for the user_registered signal. If unreg_user is stored in the
session, then the current user is trying to claim themselves as a contributor.
Replaces the old, unregistered contributor with the newly registered
account.
"""
unreg_user_info = session.data.get('unreg_user')
if unreg_user_info:
unreg_user = User.load(unreg_user_info['uid'])
pid = unreg_user_info['pid']
node = Node.load(pid)
node.replace_contributor(old=unreg_user, new=user)
node.save()
status.push_status_message(
'Successfully claimed contributor.', 'success')
@collect_auth
def claim_user_form(auth, **kwargs):
"""View for rendering the set password page for a claimed user.
Must have ``token`` as a querystring argument.
Renders the set password form, validates it, and sets the user's password.
"""
uid, pid = kwargs['uid'], kwargs['pid']
token = request.form.get('token') or request.args.get('token')
# If user is logged in, redirect to 're-enter password' page
if auth.logged_in:
return redirect(web_url_for('claim_user_registered',
uid=uid, pid=pid, token=token))
user = User.load(uid) # The unregistered user
# user ID is invalid. Unregistered user is not in database
if not user:
raise HTTPError(http.BAD_REQUEST)
# If claim token not valid, redirect to registration page
if not verify_claim_token(user, token, pid):
return redirect('/account/')
unclaimed_record = user.unclaimed_records[pid]
user.fullname = unclaimed_record['name']
user.update_guessed_names()
email = unclaimed_record['email']
form = SetEmailAndPasswordForm(request.form, token=token)
if request.method == 'POST':
if form.validate():
username, password = form.username.data, form.password.data
user.register(username=username, password=password)
# Clear unclaimed records
user.unclaimed_records = {}
user.save()
# Authenticate user and redirect to project page
response = redirect('/settings/')
node = Node.load(pid)
status.push_status_message(language.CLAIMED_CONTRIBUTOR.format(node=node),
'success')
return authenticate(user, response)
else:
forms.push_errors_to_status(form.errors)
return {
'firstname': user.given_name,
'email': email if email else '',
'fullname': user.fullname,
'form': forms.utils.jsonify(form) if is_json_request() else form,
}
@must_be_valid_project
@must_have_permission(ADMIN)
@must_not_be_registration
def invite_contributor_post(**kwargs):
"""API view for inviting an unregistered user.
Expects JSON arguments with 'fullname' (required) and email (not required).
"""
node = kwargs['node'] or kwargs['project']
fullname = request.json.get('fullname').strip()
email = request.json.get('email')
if email:
email = email.lower().strip()
if not fullname:
return {'status': 400, 'message': 'Must provide fullname'}, 400
# Check if email is in the database
user = get_user(username=email)
if user:
if user.is_registered:
msg = 'User is already in database. Please go back and try your search again.'
return {'status': 400, 'message': msg}, 400
elif node.is_contributor(user):
msg = 'User with this email address is already a contributor to this project.'
return {'status': 400, 'message': msg}, 400
else:
serialized = utils.add_contributor_json(user)
# use correct display name
serialized['fullname'] = fullname
serialized['email'] = email
else:
# Create a placeholder
serialized = utils.serialize_unregistered(fullname, email)
return {'status': 'success', 'contributor': serialized}
@must_be_contributor_or_public
def claim_user_post(**kwargs):
"""View for claiming a user from the X-editable form on a project page.
"""
reqdata = request.json
# Unreg user
user = User.load(reqdata['pk'])
node = kwargs['node'] or kwargs['project']
unclaimed_data = user.get_unclaimed_record(node._primary_key)
# Submitted through X-editable
if 'value' in reqdata: # Submitted email address
email = reqdata['value'].lower().strip()
claimer = get_user(username=email)
if claimer:
send_claim_registered_email(claimer=claimer, unreg_user=user,
node=node)
else:
send_claim_email(email, user, node, notify=True)
# TODO(sloria): Too many assumptions about the request data. Just use
elif 'claimerId' in reqdata: # User is logged in and confirmed identity
claimer_id = reqdata['claimerId']
claimer = User.load(claimer_id)
send_claim_registered_email(claimer=claimer, unreg_user=user, node=node)
email = claimer.username
else:
raise HTTPError(http.BAD_REQUEST)
return {
'status': 'success',
'email': email,
'fullname': unclaimed_data['name']
}
| 34.332402
| 94
| 0.659548
|
35bd4d9acea08dc6ab021683cb10db4de588e284
| 1,228
|
py
|
Python
|
Lab_3/busqueda binaria/busqueda_binaria.py
|
Hubert-HD/Lab_ADA
|
5720db350baef7b8b892b563b93f82df7591d301
|
[
"BSD-3-Clause"
] | null | null | null |
Lab_3/busqueda binaria/busqueda_binaria.py
|
Hubert-HD/Lab_ADA
|
5720db350baef7b8b892b563b93f82df7591d301
|
[
"BSD-3-Clause"
] | null | null | null |
Lab_3/busqueda binaria/busqueda_binaria.py
|
Hubert-HD/Lab_ADA
|
5720db350baef7b8b892b563b93f82df7591d301
|
[
"BSD-3-Clause"
] | null | null | null |
# Implementar el algoritmo de búsqueda binaria
import random, time, json, math
def binarySearch(array, target):
# Devuelve "Verdadero" si un número dado se encuentra en un arreglo de enteros, caso contrario devuelve "Falso"
izquierda = 0;
derecha = len(array) - 1
while(izquierda <= derecha):
medio = math.floor((izquierda + derecha) / 2)
if array[medio] == target:
return True
elif array[medio] < target:
izquierda = medio + 1
else:
derecha = medio - 1
return False
def generateArray(size):
# Dada un tmaño mayor que 0, devuelve una arreglo de enteros (con valores de entre -1000000 y 1000000)
array = []
for i in range(size):
array.append(random.randint(-1000000, 1000000))
return array
def main():
# Guarda en un archivo json los tiempos de ejecucion del algoritmo de busqueda binaria para determidadas entradas
data = [["#Datos", "Tiempo de ejecucion"],]
for i in [0,10,100,1000,10000,100000,1000000]:
array = generateArray(i)
array.sort()
tic = time.perf_counter()
check = binarySearch(array, 0)
toc = time.perf_counter()
data.append([i, toc - tic])
with open('busqueda_binaria.json', 'w') as file:
json.dump(data, file)
main()
| 32.315789
| 115
| 0.681596
|
9a3588ede9d1a9323aca6728b831c85c5b7a4d56
| 1,048
|
py
|
Python
|
skxray/core/tests/test_image.py
|
celiafish/scikit-xray
|
660a37821d58544b6443c5b8cd9c96daef577ed2
|
[
"BSD-3-Clause"
] | null | null | null |
skxray/core/tests/test_image.py
|
celiafish/scikit-xray
|
660a37821d58544b6443c5b8cd9c96daef577ed2
|
[
"BSD-3-Clause"
] | null | null | null |
skxray/core/tests/test_image.py
|
celiafish/scikit-xray
|
660a37821d58544b6443c5b8cd9c96daef577ed2
|
[
"BSD-3-Clause"
] | 1
|
2019-03-04T07:12:02.000Z
|
2019-03-04T07:12:02.000Z
|
from __future__ import absolute_import, division, print_function
import numpy as np
import numpy.random
from nose.tools import assert_equal
import skimage.draw as skd
from scipy.ndimage.morphology import binary_dilation
import skxray.core.image as nimage
def test_find_ring_center_acorr_1D():
for x in [110, 150, 190]:
for y in [110, 150, 190]:
yield (_helper_find_rings,
nimage.find_ring_center_acorr_1D,
(x, y), [10, 25, 50])
def _helper_find_rings(proc_method, center, radii_list):
x, y = center
image_size = (256, 265)
numpy.random.seed(42)
noise = np.random.rand(*image_size)
tt = np.zeros(image_size)
for r in radii_list:
rr, cc = skd.circle_perimeter(x, y, r)
tt[rr, cc] = 1
tt = binary_dilation(tt, structure=np.ones((3, 3))).astype(float) * 100
tt = tt + noise
res = proc_method(tt)
assert_equal(res, center)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| 27.578947
| 75
| 0.657443
|
b4fcac2df9cf55c84e7aa522571fcfc630876c8d
| 705
|
py
|
Python
|
src/environments/env_wrapper.py
|
sarah-keren/MAC
|
884aad0f0b5a6b74fd7a63e464aa8404cdb057c6
|
[
"MIT"
] | null | null | null |
src/environments/env_wrapper.py
|
sarah-keren/MAC
|
884aad0f0b5a6b74fd7a63e464aa8404cdb057c6
|
[
"MIT"
] | null | null | null |
src/environments/env_wrapper.py
|
sarah-keren/MAC
|
884aad0f0b5a6b74fd7a63e464aa8404cdb057c6
|
[
"MIT"
] | null | null | null |
class EnvWrappper:
def __init__(self, env, needs_conv=False):
self.env = env
self.num_obs = self.env.observation_spaces[env.possible_agents[0]].shape
self.num_actions = self.env.action_spaces[env.possible_agents[0]].n
self.needs_conv = needs_conv
self.env_agents = self.env.possible_agents
def get_env(self):
return self.env
def get_num_obs(self):
return self.num_obs
def get_num_actions(self):
return self.num_actions
def get_needs_conv(self):
return self.needs_conv
def get_env_agents(self):
return self.env_agents
def step(self, joint_action):
return self.env.step(joint_action)
| 26.111111
| 80
| 0.670922
|
04451594405d5047affb0513ed40aad65582d12c
| 1,588
|
py
|
Python
|
examples/textbook/plot_M67Data.py
|
joshuawall/amuse
|
c2034074ee76c08057c4faa96c32044ab40952e9
|
[
"Apache-2.0"
] | 1
|
2019-12-28T22:47:51.000Z
|
2019-12-28T22:47:51.000Z
|
examples/textbook/plot_M67Data.py
|
joshuawall/amuse
|
c2034074ee76c08057c4faa96c32044ab40952e9
|
[
"Apache-2.0"
] | null | null | null |
examples/textbook/plot_M67Data.py
|
joshuawall/amuse
|
c2034074ee76c08057c4faa96c32044ab40952e9
|
[
"Apache-2.0"
] | 2
|
2021-11-19T04:41:37.000Z
|
2021-11-20T02:11:17.000Z
|
#!/usr/bin/python
from __future__ import division
from matplotlib import pyplot
import os
def logLuminosity(V):
VminMv = 9.7
MBolSun = 4.74
Mv = V - VminMv
L = (MBolSun - Mv)/2.5
return L
def logTeff(BminV):
logT = (14.551 - BminV)/3.684
if logT > 3.961:
a,b,c = [.344,-3.402,8.037]
logT = (-b - (b*b-4*a*c)**.5)/(2.*a)
return logT
def Teff(BminV):
return 10.**logTeff(BminV)
class Cluster():
def __init__(self) :
self.n = 0
self.L = []
self.Teff = []
self.BmV = []
self.V = []
def __iter__(self):
return self
def __repr__(self):
tt = 'Cluster()'
return tt
def read(self):
try:
amusedir = os.environ['AMUSE_DIR']
dir = amusedir+'/examples/textbook/'
except:
print 'Environment variable AMUSE_DIR not set'
dir = './'
isofile = open(dir+'M67Data.dat')
lines = isofile.readlines()
E_BminV = 0.04
for line in lines:
self.V.append(float(line.split()[0]))
self.BmV.append(float(line.split()[1]))
self.L.append(10**logLuminosity(self.V[-1]-E_BminV))
self.Teff.append(10**logTeff(self.BmV[-1]-E_BminV))
self.n = self.n +1
def plot(self):
pyplot.xlim(max(self.Teff), min(self.Teff))
pyplot.scatter(self.Teff, self.L)
pyplot.xlabel("Teff")
pyplot.ylabel("L")
pyplot.show()
if __name__=="__main__":
cls = Cluster()
cls.read()
cls.plot()
| 23.014493
| 64
| 0.528967
|
a8e0d6d3f391c14712924b220fd45245ade932d9
| 3,118
|
py
|
Python
|
Task-5/src/mesh.py
|
rionaldichandraseta/grafika-opengl
|
ab511880d7a36aacfa98c01ebbdecaabb7d6733f
|
[
"MIT"
] | null | null | null |
Task-5/src/mesh.py
|
rionaldichandraseta/grafika-opengl
|
ab511880d7a36aacfa98c01ebbdecaabb7d6733f
|
[
"MIT"
] | null | null | null |
Task-5/src/mesh.py
|
rionaldichandraseta/grafika-opengl
|
ab511880d7a36aacfa98c01ebbdecaabb7d6733f
|
[
"MIT"
] | null | null | null |
import glm
import shader
class Vertex(object):
def __init__(self, position, texture_coordinate, normal):
self._position = position
self._texture_coordinate = texture_coordinate
self._normal = normal
class Texture(object):
def __init__(self, id, texture_type, path):
self._id = id
self._type = texture_type
self._path = path
class Mesh(object):
def __init__(self, buffer, index_buffer):
self._vertices = []
self._indices = []
self._textures = []
self._VAO = glGenVertexArrays(1)
self._VBO = glGenBuffers(1)
self._EBO = glGenBuffers(1)
for idx in range(0, len(buffer), 8):
position = glm.vec3(buffer[idx], buffer[idx + 1], buffer[idx + 2])
# TODO: Check if the mesh has empty texture coordinate
texture = glm.vec2(buffer[idx + 3], buffer[idx + 4])
normal = glm.vec3(buffer[idx + 5], buffer[idx + 6], buffer[idx + 7])
v = Vertex(position, texture, normal)
self._vertices.append(v)
self._indices = index_buffer
# TODO: Find out how to fill self._textures
setupMesh(self)
def __init__(self, vertices, indices, textures):
self._vertices = vertices
self._indices = indices
self._textures = textures
self._VAO = glGenVertexArrays(1)
self._VBO = glGenBuffers(1)
self._EBO = glGenBuffers(1)
setupMesh(self)
def setupMesh():
vertices = numpy.asarray(self._vertices, numpy.float32)
indices = numpy.asarray(self._indices)
textures = numpy.asarray(self._textures, numpy.float32)
glBindVertexArray(self._VAO)
glBindBuffer(GL_ARRAY_BUFFER, self._VBO)
glBufferData(GL_ARRAY_BUFFER, vertices, GL_STATIC_DRAW)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self._EBO)
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices, GL_STATIC_DRAW)
glEnableVertexAttribArray(0)
# glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)0);
# // vertex normals
# glEnableVertexAttribArray(1);
# glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)offsetof(Vertex, Normal));
# // vertex texture coords
# glEnableVertexAttribArray(2);
# glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)offsetof(Vertex, TexCoords));
# // vertex tangent
# glEnableVertexAttribArray(3);
# glVertexAttribPointer(3, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)offsetof(Vertex, Tangent));
# // vertex bitangent
# glEnableVertexAttribArray(4);
# glVertexAttribPointer(4, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)offsetof(Vertex, Bitangent));
# glBindVertexArray(0);
# TODO: Convert the above lines to python
def draw(shader):
diffuseNr = 1
specularNr = 1
normalNr = 1
heightNr = 1
for idx, texture in enumerate(self._textures):
glActiveTexture(GL_TEXTURE0 + idx)
| 34.263736
| 110
| 0.628608
|
d1dc50e97b6b3b5437f19b1639e8279fd077430c
| 7,556
|
py
|
Python
|
python/fate_test/fate_test/scripts/benchmark_cli.py
|
QuantumA/FATE
|
89a3dd593252128c1bf86fb1014b25a629bdb31a
|
[
"Apache-2.0"
] | 1
|
2022-02-07T06:23:15.000Z
|
2022-02-07T06:23:15.000Z
|
python/fate_test/fate_test/scripts/benchmark_cli.py
|
JavaGreenHands/FATE
|
ea1e94b6be50c70c354d1861093187e523af32f2
|
[
"Apache-2.0"
] | 11
|
2020-10-09T09:53:50.000Z
|
2021-12-06T16:14:51.000Z
|
python/fate_test/fate_test/scripts/benchmark_cli.py
|
JavaGreenHands/FATE
|
ea1e94b6be50c70c354d1861093187e523af32f2
|
[
"Apache-2.0"
] | null | null | null |
import os
import re
import time
import uuid
from datetime import timedelta
from inspect import signature
import click
from fate_test._client import Clients
from fate_test._config import Config
from fate_test._io import LOGGER, echo
from fate_test._parser import BenchmarkSuite
from fate_test.scripts._options import SharedOptions
from fate_test.scripts._utils import _upload_data, _delete_data, _load_testsuites, _load_module_from_script
from fate_test.utils import show_data, match_metrics
DATA_DISPLAY_PATTERN = re.compile("^FATE")
@click.command(name="benchmark-quality")
@click.option('-i', '--include', required=True, type=click.Path(exists=True), multiple=True, metavar="<include>",
help="include *benchmark.json under these paths")
@click.option('-e', '--exclude', type=click.Path(exists=True), multiple=True,
help="exclude *benchmark.json under these paths")
@click.option('-g', '--glob', type=str,
help="glob string to filter sub-directory of path specified by <include>")
@click.option('-t', '--tol', type=float,
help="tolerance (absolute error) for metrics to be considered almost equal. "
"Comparison is done by evaluating abs(a-b) <= max(relative_tol * max(abs(a), abs(b)), absolute_tol)")
@click.option('-s', '--storage-tag', type=str,
help="tag for storing metrics, for future metrics info comparison")
@click.option('-v', '--history-tag', type=str, multiple=True,
help="Extract metrics info from history tags for comparison")
@click.option('-d', '--match-details', type=click.Choice(['all', 'relative', 'absolute', 'none']),
default="all", help="Error value display in algorithm comparison")
@click.option('--skip-data', is_flag=True, default=False,
help="skip uploading data specified in benchmark conf")
@click.option("--disable-clean-data", "clean_data", flag_value=False, default=None)
@click.option("--enable-clean-data", "clean_data", flag_value=True, default=None)
@SharedOptions.get_shared_options(hidden=True)
@click.pass_context
def run_benchmark(ctx, include, exclude, glob, skip_data, tol, clean_data, storage_tag, history_tag, match_details,
**kwargs):
"""
process benchmark suite, alias: bq
"""
ctx.obj.update(**kwargs)
ctx.obj.post_process()
namespace = ctx.obj["namespace"]
config_inst = ctx.obj["config"]
config_inst.extend_sid = ctx.obj["extend_sid"]
config_inst.auto_increasing_sid = ctx.obj["auto_increasing_sid"]
if clean_data is None:
clean_data = config_inst.clean_data
data_namespace_mangling = ctx.obj["namespace_mangling"]
yes = ctx.obj["yes"]
echo.welcome("benchmark")
echo.echo(f"testsuite namespace: {namespace}", fg='red')
echo.echo("loading testsuites:")
suites = _load_testsuites(includes=include, excludes=exclude, glob=glob,
suffix="benchmark.json", suite_type="benchmark")
for suite in suites:
echo.echo(f"\tdataset({len(suite.dataset)}) benchmark groups({len(suite.pairs)}) {suite.path}")
if not yes and not click.confirm("running?"):
return
with Clients(config_inst) as client:
fate_version = client["guest_0"].get_version()
for i, suite in enumerate(suites):
# noinspection PyBroadException
try:
start = time.time()
echo.echo(f"[{i + 1}/{len(suites)}]start at {time.strftime('%Y-%m-%d %X')} {suite.path}", fg='red')
if not skip_data:
try:
_upload_data(client, suite, config_inst)
except Exception as e:
raise RuntimeError(f"exception occur while uploading data for {suite.path}") from e
try:
_run_benchmark_pairs(config_inst, suite, tol, namespace, data_namespace_mangling, storage_tag,
history_tag, fate_version, match_details)
except Exception as e:
raise RuntimeError(f"exception occur while running benchmark jobs for {suite.path}") from e
if not skip_data and clean_data:
_delete_data(client, suite)
echo.echo(f"[{i + 1}/{len(suites)}]elapse {timedelta(seconds=int(time.time() - start))}", fg='red')
except Exception:
exception_id = uuid.uuid1()
echo.echo(f"exception in {suite.path}, exception_id={exception_id}", err=True, fg='red')
LOGGER.exception(f"exception id: {exception_id}")
finally:
echo.stdout_newline()
echo.farewell()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
@LOGGER.catch
def _run_benchmark_pairs(config: Config, suite: BenchmarkSuite, tol: float, namespace: str,
data_namespace_mangling: bool, storage_tag, history_tag, fate_version, match_details):
# pipeline demo goes here
pair_n = len(suite.pairs)
fate_base = config.fate_base
PYTHONPATH = os.environ.get('PYTHONPATH') + ":" + os.path.join(fate_base, "python")
os.environ['PYTHONPATH'] = PYTHONPATH
for i, pair in enumerate(suite.pairs):
echo.echo(f"Running [{i + 1}/{pair_n}] group: {pair.pair_name}")
results = {}
# data_summary = None
job_n = len(pair.jobs)
for j, job in enumerate(pair.jobs):
try:
echo.echo(f"Running [{j + 1}/{job_n}] job: {job.job_name}")
job_name, script_path, conf_path = job.job_name, job.script_path, job.conf_path
param = Config.load_from_file(conf_path)
mod = _load_module_from_script(script_path)
input_params = signature(mod.main).parameters
# local script
if len(input_params) == 1:
data, metric = mod.main(param=param)
elif len(input_params) == 2:
data, metric = mod.main(config=config, param=param)
# pipeline script
elif len(input_params) == 3:
if data_namespace_mangling:
data, metric = mod.main(config=config, param=param, namespace=f"_{namespace}")
else:
data, metric = mod.main(config=config, param=param)
else:
data, metric = mod.main()
results[job_name] = metric
echo.echo(f"[{j + 1}/{job_n}] job: {job.job_name} Success!\n")
if data and DATA_DISPLAY_PATTERN.match(job_name):
# data_summary = data
show_data(data)
# if data_summary is None:
# data_summary = data
except Exception as e:
exception_id = uuid.uuid1()
echo.echo(f"exception while running [{j + 1}/{job_n}] job, exception_id={exception_id}", err=True,
fg='red')
LOGGER.exception(f"exception id: {exception_id}, error message: \n{e}")
continue
rel_tol = pair.compare_setting.get("relative_tol")
# show_data(data_summary)
match_metrics(evaluate=True, group_name=pair.pair_name, abs_tol=tol, rel_tol=rel_tol,
storage_tag=storage_tag, history_tag=history_tag, fate_version=fate_version,
cache_directory=config.cache_directory, match_details=match_details, **results)
| 49.710526
| 120
| 0.616464
|
3efca31d583f7ad22f2ce7fa845bd7f4340e9268
| 16,456
|
py
|
Python
|
tests/nodeos_forked_chain_test.py
|
InfraBlockchain/infrablockchain
|
447fa0e836d31be35357a07c22dc55d93079bbcf
|
[
"MIT"
] | 14
|
2021-04-15T06:50:08.000Z
|
2022-02-23T14:43:14.000Z
|
tests/nodeos_forked_chain_test.py
|
InfraBlockchain/infrablockchain
|
447fa0e836d31be35357a07c22dc55d93079bbcf
|
[
"MIT"
] | null | null | null |
tests/nodeos_forked_chain_test.py
|
InfraBlockchain/infrablockchain
|
447fa0e836d31be35357a07c22dc55d93079bbcf
|
[
"MIT"
] | 3
|
2021-07-04T13:08:17.000Z
|
2021-12-14T03:46:50.000Z
|
#!/usr/bin/env python3
from testUtils import Utils
import testUtils
import time
from Cluster import Cluster
from WalletMgr import WalletMgr
from Node import BlockType
from Node import Node
from TestHelper import AppArgs
from TestHelper import TestHelper
import decimal
import math
import re
import signal
###############################################################
# nodeos_forked_chain_test
# --dump-error-details <Upon error print etc/eosio/node_*/config.ini and var/lib/node_*/stderr.log to stdout>
# --keep-logs <Don't delete var/lib/node_* folders upon test completion>
###############################################################
Print=Utils.Print
from core_symbol import CORE_SYMBOL
from native_token_symbol import INFRABLOCKCHAIN_NATIVE_TOKEN_SYMBOL
def analyzeBPs(bps0, bps1, expectDivergence):
start=0
index=None
length=len(bps0)
firstDivergence=None
errorInDivergence=False
analysysPass=0
bpsStr=None
bpsStr0=None
bpsStr1=None
while start < length:
analysysPass+=1
bpsStr=None
for i in range(start,length):
bp0=bps0[i]
bp1=bps1[i]
if bpsStr is None:
bpsStr=""
else:
bpsStr+=", "
blockNum0=bp0["blockNum"]
prod0=bp0["prod"]
blockNum1=bp1["blockNum"]
prod1=bp1["prod"]
numDiff=True if blockNum0!=blockNum1 else False
prodDiff=True if prod0!=prod1 else False
if numDiff or prodDiff:
index=i
if firstDivergence is None:
firstDivergence=min(blockNum0, blockNum1)
if not expectDivergence:
errorInDivergence=True
break
bpsStr+=str(blockNum0)+"->"+prod0
if index is None:
if expectDivergence:
errorInDivergence=True
break
return None
bpsStr0=None
bpsStr2=None
start=length
for i in range(index,length):
if bpsStr0 is None:
bpsStr0=""
bpsStr1=""
else:
bpsStr0+=", "
bpsStr1+=", "
bp0=bps0[i]
bp1=bps1[i]
blockNum0=bp0["blockNum"]
prod0=bp0["prod"]
blockNum1=bp1["blockNum"]
prod1=bp1["prod"]
numDiff="*" if blockNum0!=blockNum1 else ""
prodDiff="*" if prod0!=prod1 else ""
if not numDiff and not prodDiff:
start=i
index=None
if expectDivergence:
errorInDivergence=True
break
bpsStr0+=str(blockNum0)+numDiff+"->"+prod0+prodDiff
bpsStr1+=str(blockNum1)+numDiff+"->"+prod1+prodDiff
if errorInDivergence:
break
if errorInDivergence:
msg="Failed analyzing block producers - "
if expectDivergence:
msg+="nodes indicate different block producers for the same blocks, but did not expect them to diverge."
else:
msg+="did not expect nodes to indicate different block producers for the same blocks."
msg+="\n Matching Blocks= %s \n Diverging branch node0= %s \n Diverging branch node1= %s" % (bpsStr,bpsStr0,bpsStr1)
Utils.errorExit(msg)
return firstDivergence
def getMinHeadAndLib(prodNodes):
info0=prodNodes[0].getInfo(exitOnError=True)
info1=prodNodes[1].getInfo(exitOnError=True)
headBlockNum=min(int(info0["head_block_num"]),int(info1["head_block_num"]))
libNum=min(int(info0["last_irreversible_block_num"]), int(info1["last_irreversible_block_num"]))
return (headBlockNum, libNum)
args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run",
"--p2p-plugin","--wallet-port"})
Utils.Debug=args.v
totalProducerNodes=2
totalNonProducerNodes=1
totalNodes=totalProducerNodes+totalNonProducerNodes
maxActiveProducers=21
totalProducers=maxActiveProducers
cluster=Cluster(walletd=True)
dumpErrorDetails=args.dump_error_details
keepLogs=args.keep_logs
dontKill=args.leave_running
prodCount=args.prod_count
killAll=args.clean_run
p2pPlugin=args.p2p_plugin
walletPort=args.wallet_port
walletMgr=WalletMgr(True, port=walletPort)
testSuccessful=False
killEosInstances=not dontKill
killWallet=not dontKill
WalletdName=Utils.EosWalletName
ClientName="infra-cli"
try:
TestHelper.printSystemInfo("BEGIN")
cluster.setWalletMgr(walletMgr)
cluster.killall(allInstances=killAll)
cluster.cleanup()
Print("Stand up cluster")
specificExtraNodeosArgs={}
# producer nodes will be mapped to 0 through totalProducerNodes-1, so the number totalProducerNodes will be the non-producing node
specificExtraNodeosArgs[totalProducerNodes]="--plugin eosio::test_control_api_plugin"
# *** setup topogrophy ***
# "bridge" shape connects defprocera through defproducerk (in node0) to each other and defproducerl through defproduceru (in node01)
# and the only connection between those 2 groups is through the bridge node
if cluster.launch(prodCount=prodCount, onlyBios=False, topo="bridge", pnodes=totalProducerNodes,
totalNodes=totalNodes, totalProducers=totalProducers, p2pPlugin=p2pPlugin,
useBiosBootFile=False, specificExtraNodeosArgs=specificExtraNodeosArgs) is False:
Utils.cmdError("launcher")
Utils.errorExit("Failed to stand up eos cluster.")
Print("Validating system accounts after bootstrap")
cluster.validateAccounts(None)
# *** create accounts to vote in desired producers ***
accounts=cluster.createAccountKeys(5)
if accounts is None:
Utils.errorExit("FAILURE - create keys")
accounts[0].name="tester111111"
accounts[1].name="tester222222"
accounts[2].name="tester333333"
accounts[3].name="tester444444"
accounts[4].name="tester555555"
testWalletName="test"
Print("Creating wallet \"%s\"." % (testWalletName))
testWallet=walletMgr.create(testWalletName, [cluster.eosioAccount,accounts[0],accounts[1],accounts[2],accounts[3],accounts[4]])
for _, account in cluster.defProducerAccounts.items():
walletMgr.importKey(account, testWallet, ignoreDupKeyWarning=True)
Print("Wallet \"%s\" password=%s." % (testWalletName, testWallet.password.encode("utf-8")))
# *** identify each node (producers and non-producing node) ***
nonProdNode=None
prodNodes=[]
producers=[]
for i in range(0, totalNodes):
node=cluster.getNode(i)
node.producers=Cluster.parseProducers(i)
numProducers=len(node.producers)
Print("node has producers=%s" % (node.producers))
if numProducers==0:
if nonProdNode is None:
nonProdNode=node
nonProdNode.nodeNum=i
else:
Utils.errorExit("More than one non-producing nodes")
else:
for prod in node.producers:
trans=node.regproducer(cluster.defProducerAccounts[prod], "http::/mysite.com", 0, waitForTransBlock=False, exitOnError=True)
trans=node.authproducer(cluster.defProducerAccounts[prod], waitForTransBlock=False, exitOnError=True)
prodNodes.append(node)
producers.extend(node.producers)
# *** delegate bandwidth to accounts ***
node=prodNodes[0]
# create accounts via eosio as otherwise a bid is needed
for account in accounts:
Print("Create new account %s via %s" % (account.name, cluster.eosioAccount.name))
trans=node.createAccount(account, cluster.eosioAccount, stakedDeposit=0, waitForTransBlock=False, exitOnError=True)
transferAmount="100000000.00 {0}".format(INFRABLOCKCHAIN_NATIVE_TOKEN_SYMBOL)
Print("Transfer funds %s from account %s to %s" % (transferAmount, cluster.eosioAccount.name, account.name))
node.transferNativeToken(cluster.eosioAccount, account, transferAmount, "test transfer")
# *** Identify a block where production is stable ***
#verify nodes are in sync and advancing
cluster.waitOnClusterSync(blockAdvancing=5)
blockNum=node.getNextCleanProductionCycle(trans)
blockProducer=node.getBlockProducerByNum(blockNum)
Print("Validating blockNum=%s, producer=%s" % (blockNum, blockProducer))
cluster.biosNode.kill(signal.SIGTERM)
#advance to the next block of 12
lastBlockProducer=blockProducer
while blockProducer==lastBlockProducer:
blockNum+=1
blockProducer=node.getBlockProducerByNum(blockNum)
# *** Identify what the production cycel is ***
productionCycle=[]
producerToSlot={}
slot=-1
inRowCountPerProducer=12
while True:
if blockProducer not in producers:
Utils.errorExit("Producer %s was not one of the voted on producers" % blockProducer)
productionCycle.append(blockProducer)
slot+=1
if blockProducer in producerToSlot:
Utils.errorExit("Producer %s was first seen in slot %d, but is repeated in slot %d" % (blockProducer, producerToSlot[blockProducer], slot))
producerToSlot[blockProducer]={"slot":slot, "count":0}
lastBlockProducer=blockProducer
while blockProducer==lastBlockProducer:
producerToSlot[blockProducer]["count"]+=1
blockNum+=1
blockProducer=node.getBlockProducerByNum(blockNum)
if producerToSlot[lastBlockProducer]["count"]!=inRowCountPerProducer:
Utils.errorExit("Producer %s, in slot %d, expected to produce %d blocks but produced %d blocks" % (blockProducer, inRowCountPerProducer, producerToSlot[lastBlockProducer]["count"]))
if blockProducer==productionCycle[0]:
break
output=None
for blockProducer in productionCycle:
if output is None:
output=""
else:
output+=", "
output+=blockProducer+":"+str(producerToSlot[blockProducer]["count"])
Print("ProductionCycle ->> {\n%s\n}" % output)
#retrieve the info for all the nodes to report the status for each
for node in cluster.getNodes():
node.getInfo()
cluster.reportStatus()
# *** Killing the "bridge" node ***
Print("Sending command to kill \"bridge\" node to separate the 2 producer groups.")
# block number to start expecting node killed after
preKillBlockNum=nonProdNode.getBlockNum()
preKillBlockProducer=nonProdNode.getBlockProducerByNum(preKillBlockNum)
# kill at last block before defproducerl, since the block it is killed on will get propagated
killAtProducer="defproducerk"
nonProdNode.killNodeOnProducer(producer=killAtProducer, whereInSequence=(inRowCountPerProducer-1))
# *** Identify a highest block number to check while we are trying to identify where the divergence will occur ***
# will search full cycle after the current block, since we don't know how many blocks were produced since retrieving
# block number and issuing kill command
postKillBlockNum=prodNodes[1].getBlockNum()
blockProducers0=[]
blockProducers1=[]
libs0=[]
libs1=[]
lastBlockNum=max([preKillBlockNum,postKillBlockNum])+maxActiveProducers*inRowCountPerProducer
actualLastBlockNum=None
prodChanged=False
nextProdChange=False
#identify the earliest LIB to start identify the earliest block to check if divergent branches eventually reach concensus
(headBlockNum, libNumAroundDivergence)=getMinHeadAndLib(prodNodes)
for blockNum in range(preKillBlockNum,lastBlockNum):
#avoiding getting LIB until my current block passes the head from the last time I checked
if blockNum>headBlockNum:
(headBlockNum, libNumAroundDivergence)=getMinHeadAndLib(prodNodes)
# track the block number and producer from each producing node
blockProducer0=prodNodes[0].getBlockProducerByNum(blockNum)
blockProducer1=prodNodes[1].getBlockProducerByNum(blockNum)
blockProducers0.append({"blockNum":blockNum, "prod":blockProducer0})
blockProducers1.append({"blockNum":blockNum, "prod":blockProducer1})
#in the case that the preKillBlockNum was also produced by killAtProducer, ensure that we have
#at least one producer transition before checking for killAtProducer
if not prodChanged:
if preKillBlockProducer!=blockProducer0:
prodChanged=True
#since it is killing for the last block of killAtProducer, we look for the next producer change
if not nextProdChange and prodChanged and blockProducer1==killAtProducer:
nextProdChange=True
elif nextProdChange and blockProducer1!=killAtProducer:
actualLastBlockNum=blockNum
break
#if we diverge before identifying the actualLastBlockNum, then there is an ERROR
if blockProducer0!=blockProducer1:
Utils.errorExit("Groups reported different block producers for block number %d. %s != %s." % (blockNum,blockProducer0,blockProducer1))
# *** Analyze the producers leading up to the block after killing the non-producing node ***
firstDivergence=analyzeBPs(blockProducers0, blockProducers1, expectDivergence=True)
# Nodes should not have diverged till the last block
if firstDivergence!=blockNum:
Utils.errorExit("Expected to diverge at %s, but diverged at %s." % (firstDivergence, blockNum))
blockProducers0=[]
blockProducers1=[]
#verify that the non producing node is not alive (and populate the producer nodes with current getInfo data to report if
#an error occurs)
if nonProdNode.verifyAlive():
Utils.errorExit("Expected the non-producing node to have shutdown.")
for prodNode in prodNodes:
prodNode.getInfo()
# *** Track the blocks from the divergence till there are 10*12 blocks on one chain and 10*12+1 on the other ***
killBlockNum=blockNum
lastBlockNum=killBlockNum+(maxActiveProducers - 1)*inRowCountPerProducer+1 # allow 1st testnet group to produce just 1 more block than the 2nd
for blockNum in range(killBlockNum,lastBlockNum):
blockProducer0=prodNodes[0].getBlockProducerByNum(blockNum)
blockProducer1=prodNodes[1].getBlockProducerByNum(blockNum)
blockProducers0.append({"blockNum":blockNum, "prod":blockProducer0})
blockProducers1.append({"blockNum":blockNum, "prod":blockProducer1})
# *** Analyze the producers from the divergence to the lastBlockNum and verify they stay diverged ***
firstDivergence=analyzeBPs(blockProducers0, blockProducers1, expectDivergence=True)
if firstDivergence!=killBlockNum:
Utils.errorExit("Expected to diverge at %s, but diverged at %s." % (firstDivergence, killBlockNum))
blockProducers0=[]
blockProducers1=[]
# *** Relaunch the non-producing bridge node to connect the producing nodes again ***
if not nonProdNode.relaunch(nonProdNode.nodeNum, None):
errorExit("Failure - (non-production) node %d should have restarted" % (nonProdNode.nodeNum))
# *** Identify the producers from the saved LIB to the current highest head ***
#ensure that the nodes have enough time to get in concensus, so wait for 3 producers to produce their complete round
time.sleep(inRowCountPerProducer * 3 / 2)
# ensure all blocks from the lib before divergence till the current head are now in consensus
endBlockNum=max(prodNodes[0].getBlockNum(), prodNodes[1].getBlockNum())
for blockNum in range(libNumAroundDivergence,endBlockNum):
blockProducer0=prodNodes[0].getBlockProducerByNum(blockNum)
blockProducer1=prodNodes[1].getBlockProducerByNum(blockNum)
blockProducers0.append({"blockNum":blockNum, "prod":blockProducer0})
blockProducers1.append({"blockNum":blockNum, "prod":blockProducer1})
# *** Analyze the producers from the saved LIB to the current highest head and verify they match now ***
analyzeBPs(blockProducers0, blockProducers1, expectDivergence=False)
blockProducers0=[]
blockProducers1=[]
testSuccessful=True
finally:
TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails)
exit(0)
| 40.038929
| 193
| 0.685404
|
c8a1f5c4d6af3eedefcbf34399e6f394aba013ac
| 15,870
|
py
|
Python
|
apps/sarasate.py
|
mudbri/Faure
|
bda4321abf54fea3df686c8958209c449798eb73
|
[
"Apache-2.0"
] | null | null | null |
apps/sarasate.py
|
mudbri/Faure
|
bda4321abf54fea3df686c8958209c449798eb73
|
[
"Apache-2.0"
] | null | null | null |
apps/sarasate.py
|
mudbri/Faure
|
bda4321abf54fea3df686c8958209c449798eb73
|
[
"Apache-2.0"
] | null | null | null |
from ravel.app import AppConsole, mk_watchcmd
import psycopg2
import tabulate
import re
from z3 import *
class RelaAlgConsole(AppConsole):
def default(self, line):
"Execute a PostgreSQL statement"
try:
select_clause, from_clause, defined_where_clause, where_lists = self.pre_processing(line)
self.generator(select_clause, from_clause, defined_where_clause, where_lists)
except psycopg2.ProgrammingError as e:
print(e)
return
try:
self.db.cursor.execute("select * from output;")
data = self.db.cursor.fetchall()
if data is not None:
names = [row[0] for row in self.db.cursor.description]
print(tabulate.tabulate(data, headers=names))
except psycopg2.ProgrammingError:
# no results, eg from an insert/delete
pass
except TypeError as e:
print(e)
def pre_processing(self, query):
# remove ;
if ';' in query:
query = query[:-1]
query_lower = query.lower()
# detect the location of where
where_index = query_lower.find('where')
where_clause = query[where_index+5:]
select_clause = query_lower[ :where_index]
# get the tables
pattern = re.compile(r'from(.*?)where', re.S)
from_clause = re.findall(pattern, query_lower)[0].strip()
'''
Processing comparison operators
'''
defined_where_clause = ""
where_lists = re.split("and", where_clause)
for w in where_lists:
# if 'len' in w, that means this columm's type is integer
if 'len' in w:
continue
if '!=' in w:
args = w.split('!=')
left = args[0].strip()
right = args[1].strip()
defined_where_clause = defined_where_clause + "c_not_equal({}, {}) and".format(left, right)
elif '<>' in w:
args = w.split('<>')
left = args[0].strip()
right = args[1].strip()
defined_where_clause = defined_where_clause + "c_not_equal({}, {}) and ".format(left, right)
elif '<=' in w:
args = w.split('<=')
left = args[0].strip()
right = args[1].strip()
defined_where_clause = defined_where_clause + "c_leq({}, {}) and ".format(left, right)
elif '>=' in w:
args = w.split('>=')
left = args[0].strip()
right = args[1].strip()
defined_where_clause = defined_where_clause + "c_geq({}, {}) and ".format(left, right)
elif '<' in w:
args = w.split('<')
left = args[0].strip()
right = args[1].strip()
defined_where_clause = defined_where_clause + "c_less({}, {}) and ".format(left, right)
elif '>' in w:
args = w.split('>')
left = args[0].strip()
right = args[1].strip()
defined_where_clause = defined_where_clause + "c_greater({}, {}) and ".format(left, right)
elif '=' in w:
args = w.split('=')
left = args[0].strip()
right = args[1].strip()
defined_where_clause = defined_where_clause + "c_equal({}, {}) and ".format(left, right)
defined_where_clause = defined_where_clause[:-4] # remove final 'and'
return select_clause, from_clause, defined_where_clause, where_lists
def generator(self, select_clause, from_clause, where_clause, where_lists):
self.db.cursor.execute("drop table if exists output")
'''
The number of tables is greater than 1, it is join operation
else, it is selection
'''
table_list = from_clause.split(',')
if len(table_list) > 1:
# print('join')
t1_name = table_list[0].strip()
t2_name = table_list[1].strip()
if '*' in select_clause:
'''
get the attributes of each table
'''
self.db.cursor.execute("select * from {}".format(t1_name))
t1_attrs = [row[0] for row in self.db.cursor.description]
self.db.cursor.execute("select * from {}".format(t2_name))
t2_attrs = [row[0] for row in self.db.cursor.description]
'''
get common attributes and difference attributes
'''
common_attr = set(t1_attrs).intersection(set(t2_attrs)) - set(['condition'])
union_attr = set(t1_attrs).union(set(t2_attrs)) - set(['condition'])
diff_attr = union_attr - common_attr
attr_diff = ""
attr_equal = ""
for c in common_attr:
attr_equal += "{}.{}, {}.{} AS {}_{},".format(t1_name, c, t2_name, c, t2_name, c)
for d in diff_attr:
attr_diff += " {},".format(d)
if 'condition' in t1_attrs and 'condition' in t2_attrs:
attr_diff += "array_cat({}.condition, {}.condition) as condition".format(t1_name, t2_name)
elif 'condition' in t1_attrs or 'condition' in t2_attrs:
attr_diff += "condition"
else:
attr_diff = attr_diff
# print("Step1: Create Data Content")
sql = "create table output as select {} {} FROM {} where ".format(attr_equal, attr_diff, from_clause) + where_clause
self.db.cursor.execute(sql)
# print("Step2: Update Condition")
for w in where_lists:
args = w.strip().split(' ')
left = args[0].strip()
if '.' in left:
left = left.replace(t1_name + '.', '')
opr = args[1].strip()
right = args[2].strip()
if '.' in right:
right = right.replace('.', '_')
# repalce = with == in order accommodate z3
if '!=' not in opr and '<=' not in opr and '>=' not in opr and '=' in opr:
opr = opr.replace('=', '==')
sql = "update output set condition = array_append(condition, {} || ' {} ' || {});".format(left, opr, right)
self.db.cursor.execute(sql)
attr_drop = ""
for c in common_attr:
attr_drop = attr_drop + "drop column {}_{}, ".format(t2_name, c)
if 'len' in c:
continue
sql = "update output set {} = {}_{} where not is_var({})".format(c, t2_name, c, c)
self.db.cursor.execute(sql)
# remove the spare ,
attr_drop = attr_drop[:-2]
sql = "alter table output {};".format(attr_drop)
self.db.cursor.execute(sql)
else:
print("still working")
else:
# print('selection')
# print('Step1: Create Data Content')
sql = 'create table output as '
sql = sql + select_clause + ' where '+ where_clause + ';'
self.db.cursor.execute(sql)
# print('Step2: Update Condition')
for w in where_lists:
args = w.strip().split(' ')
left = args[0].strip()
opr = args[1].strip()
right = args[2].strip()
# repalce = with == in order accommodate z3
if '!=' not in opr and '<=' not in opr and '>=' not in opr and '=' in opr:
opr = opr.replace('=', '==')
sql = "update output set condition = array_append(condition, {} || ' {} ' || {});".format(left, opr, right)
self.db.cursor.execute(sql)
# print('Step3: Normalization')
sql = 'delete from output where is_contradiction(condition);'
self.db.cursor.execute(sql)
sql = "UPDATE output SET condition = '{}' WHERE is_tauto(condition);"
self.db.cursor.execute(sql)
sql = "UPDATE output SET condition = remove_redundant(condition) WHERE has_redundant(condition);"
self.db.cursor.execute(sql)
def do_data(self, line):
"Create data content."
select_clause, from_clause, defined_where_clause, where_lists = self.pre_processing(line)
self._data(select_clause, from_clause, defined_where_clause, where_lists)
def do_condition(self, line):
"Update Conditions"
select_clause, from_clause, defined_where_clause, where_lists = self.pre_processing(line)
self._condition(select_clause, from_clause, defined_where_clause, where_lists)
# _, condition, _ = self._get_sql(line)
# print("\nStep2: Update Conditions\n")
# for c in condition:
# if c != '':
# print(c)
# self.db.cursor.execute(c)
def do_z3(self, line):
self._z3()
def _data(self, select_clause, from_clause, where_clause, where_lists):
self.db.cursor.execute("drop table if exists output")
'''
The number of tables is greater than 1, it is join operation
else, it is selection
'''
table_list = from_clause.split(',')
if len(table_list) > 1:
# print('join')
t1_name = table_list[0].strip()
t2_name = table_list[1].strip()
if '*' in select_clause:
'''
get the attributes of each table
'''
self.db.cursor.execute("select * from {}".format(t1_name))
t1_attrs = [row[0] for row in self.db.cursor.description]
self.db.cursor.execute("select * from {}".format(t2_name))
t2_attrs = [row[0] for row in self.db.cursor.description]
'''
get common attributes and difference attributes
'''
common_attr = set(t1_attrs).intersection(set(t2_attrs)) - set(['condition'])
union_attr = set(t1_attrs).union(set(t2_attrs)) - set(['condition'])
diff_attr = union_attr - common_attr
attr_diff = ""
attr_equal = ""
for c in common_attr:
attr_equal += "{}.{}, {}.{} AS {}_{},".format(t1_name, c, t2_name, c, t2_name, c)
for d in diff_attr:
attr_diff += " {},".format(d)
attr_diff += "array_cat({}.condition, {}.condition) as condition".format(t1_name, t2_name)
# print("Step1: Create Data Content")
sql = "create table output as select {} {} FROM {} where ".format(attr_equal, attr_diff, from_clause) + where_clause
self.db.cursor.execute(sql)
else:
print("still working")
else:
# print('selection')
# print('Step1: Create Data Content')
sql = 'create table output as '
sql = sql + select_clause + ' where '+ where_clause + ';'
print(sql)
self.db.cursor.execute(sql)
def _condition(self, select_clause, from_clause, where_clause, where_lists):
'''
The number of tables is greater than 1, it is join operation
else, it is selection
'''
table_list = from_clause.split(',')
if len(table_list) > 1:
# print('join')
t1_name = table_list[0].strip()
t2_name = table_list[1].strip()
if '*' in select_clause:
'''
get the attributes of each table
'''
self.db.cursor.execute("select * from {}".format(t1_name))
t1_attrs = [row[0] for row in self.db.cursor.description]
self.db.cursor.execute("select * from {}".format(t2_name))
t2_attrs = [row[0] for row in self.db.cursor.description]
'''
get common attributes and difference attributes
'''
common_attr = set(t1_attrs).intersection(set(t2_attrs)) - set(['condition'])
union_attr = set(t1_attrs).union(set(t2_attrs)) - set(['condition'])
diff_attr = union_attr - common_attr
# print("Step2: Update Condition")
for w in where_lists:
args = w.strip().split(' ')
left = args[0].strip()
if '.' in left:
left = left.replace(t1_name + '.', '')
opr = args[1].strip()
right = args[2].strip()
if '.' in right:
right = right.replace('.', '_')
# repalce = with == in order accommodate z3
if '!=' not in opr and '<=' not in opr and '>=' not in opr and '=' in opr:
opr = opr.replace('=', '==')
sql = "update output set condition = array_append(condition, {} || ' {} ' || {});".format(left, opr, right)
self.db.cursor.execute(sql)
attr_drop = ""
for c in common_attr:
sql = "update output set {} = {}_{} where not is_var({})".format(c, t2_name, c, c)
attr_drop = attr_drop + "drop column {}_{}, ".format(t2_name, c)
self.db.cursor.execute(sql)
# remove the spare ,
attr_drop = attr_drop[:-2]
sql = "alter table output {};".format(attr_drop)
self.db.cursor.execute(sql)
else:
print("still working")
else:
# print('Step2: Update Condition')
for w in where_lists:
args = w.strip().split(' ')
left = args[0].strip()
opr = args[1].strip()
right = args[2].strip()
# repalce = with == in order accommodate z3
if '!=' not in opr and '<=' not in opr and '>=' not in opr and '=' in opr:
opr = opr.replace('=', '==')
sql = "update output set condition = array_append(condition, {} || ' {} ' || {});".format(left, opr, right)
self.db.cursor.execute(sql)
def _z3(self):
# print('Step3: Normalization')
sql = 'delete from output where is_contradiction(condition);'
self.db.cursor.execute(sql)
sql = "UPDATE output SET condition = '{}' WHERE is_tauto(condition);"
self.db.cursor.execute(sql)
sql = "UPDATE output SET condition = remove_redundant(condition) WHERE has_redundant(condition);"
self.db.cursor.execute(sql)
def do_watch(self, line):
"""Launch an xterm window to watch database tables in real-time
Usage: watch [table1(,max_rows)] [table2(,max_rows)] ...
Example: watch hosts switches cf,5"""
if not line:
return
args = line.split()
if len(args) == 0:
print("Invalid syntax")
return
cmd, cmdfile = mk_watchcmd(self.env.db, args)
self.env.mkterm(cmd, cmdfile)
shortcut = "s"
description = "Relational Algebra for Conditional Table."
console = RelaAlgConsole
| 39.185185
| 132
| 0.501449
|
d5d1d0590bb0fb76880b4b3549211a6507904aec
| 4,224
|
py
|
Python
|
swaggercheck/strategies/basestrategies.py
|
FrancoMaxime/swagger-check
|
d884604b338afcbda3679fd8baca87721fbf1197
|
[
"MIT"
] | 1
|
2019-07-12T07:18:07.000Z
|
2019-07-12T07:18:07.000Z
|
swaggercheck/strategies/basestrategies.py
|
FrancoMaxime/swagger-check
|
d884604b338afcbda3679fd8baca87721fbf1197
|
[
"MIT"
] | 7
|
2019-07-06T08:00:00.000Z
|
2020-01-27T14:28:17.000Z
|
swaggercheck/strategies/basestrategies.py
|
FrancoMaxime/swagger-check
|
d884604b338afcbda3679fd8baca87721fbf1197
|
[
"MIT"
] | 2
|
2019-07-06T08:07:48.000Z
|
2019-09-24T13:20:54.000Z
|
"""
Extra hypothesis strategies built from those in `hypothesis.strategies`, and
helper functions for merging dictionary type strategies and dictionaries of
strategies.
"""
import logging
import datetime
import io
import hypothesis.strategies as hy_st
__all__ = [
"json",
"dates",
"times",
"datetimes",
"file_objects",
"files",
"merge_dicts_strategy",
"merge_dicts_max_size_strategy",
"merge_optional_dict_strategy",
]
log = logging.getLogger(__name__)
def json(value_limit=5):
"""Hypothesis strategy for generating values that can be passed to
`json.dumps` to produce valid JSON data.
:param value_limit: A limit on the number of values in the JSON data -
setting this too high can cause value generation to
time out.
:type value_limit: int
"""
return hy_st.recursive(
hy_st.floats() | hy_st.booleans() | hy_st.text() | hy_st.none(),
lambda children: hy_st.dictionaries(hy_st.text(), children),
max_leaves=value_limit,
)
def dates():
"""Hypothesis strategy for generating `datetime.date` values."""
return hy_st.builds(
datetime.date.fromordinal,
hy_st.integers(min_value=1, max_value=datetime.date.max.toordinal()),
)
def times():
"""Hypothesis strategy for generating `datetime.time` values."""
return hy_st.builds(
datetime.time,
hour=hy_st.integers(min_value=0, max_value=23),
minute=hy_st.integers(min_value=0, max_value=59),
second=hy_st.integers(min_value=0, max_value=59),
microsecond=hy_st.integers(min_value=0, max_value=999999),
)
def datetimes():
"""Hypothesis strategy for generating `datetime.datetime` values."""
return hy_st.builds(datetime.datetime.combine, dates(), times())
def file_objects():
"""Hypothesis strategy for generating pre-populated `file objects`."""
return hy_st.builds(io.BytesIO, hy_st.binary())
def files():
"""Hypothesis strategy for generating objects pyswagger can use as file
handles to populate `file` format parameters.
Generated values take the format: `dict('data': <file object>)`"""
return file_objects().map(lambda x: {"data": x})
def merge_dicts_strategy(dict_strat_1, dict_strat_2):
"""Strategy merging two strategies producting dicts into one."""
return hy_st.builds(
lambda x, y: dict((list(x.items()) + list(y.items()))),
dict_strat_1,
dict_strat_2,
)
def merge_optional_dict_strategy(required_fields, optional_fields):
"""Combine dicts of strings mapping to required and optional strategies.
:param required_fields: Mapping containing required fields.
:type required_fields: dict(str)
:param optional_fields: Mapping containing optional fields.
:type optional_fields: dict(str)
"""
# Create a strategy for a set of keys from the optional dict strategy, then
# a strategy to build those back into a dictionary.
# Finally, merge the strategy of selected optionals with the required one.
if optional_fields:
opt_keys = hy_st.sets(hy_st.sampled_from(list(optional_fields)))
selected_optionals = hy_st.builds(
lambda dictionary, keys: {key: dictionary[key] for key in keys},
hy_st.fixed_dictionaries(optional_fields),
opt_keys,
)
result = merge_dicts_strategy(
hy_st.fixed_dictionaries(required_fields), selected_optionals
)
else:
result = hy_st.fixed_dictionaries(required_fields)
return result
def merge_dicts_max_size_strategy(dict1, dict2, max_size):
"""Combine dict strategies into one to produce a dict up to a max size.
Assumes both dicts have distinct keys.
:param max_size: Maximum number of keys in dicts generated by the strategy.
:type max_size: int
"""
# This is grim, but combine both dictionaries after creating a copy of the
# second containing a reduced number of keys if that would take us over the
# max size.
result = hy_st.builds(
lambda x, y: dict((list(x.items()) + list(y.items()))[:max_size]),
dict1,
dict2,
)
return result
| 31.288889
| 79
| 0.682055
|
1f0e17d805745d52043e1593c0a9d754ebaf7e23
| 905
|
py
|
Python
|
tests/test_dataset/test_ndplot.py
|
Mailaender/spectrochempy
|
d58221afeb9f78e2e3e0079b3fd6c0162a902c04
|
[
"CECILL-B"
] | null | null | null |
tests/test_dataset/test_ndplot.py
|
Mailaender/spectrochempy
|
d58221afeb9f78e2e3e0079b3fd6c0162a902c04
|
[
"CECILL-B"
] | null | null | null |
tests/test_dataset/test_ndplot.py
|
Mailaender/spectrochempy
|
d58221afeb9f78e2e3e0079b3fd6c0162a902c04
|
[
"CECILL-B"
] | null | null | null |
# -*- coding: utf-8 -*-
# ======================================================================================================================
# Copyright (©) 2015-2021 LCS - Laboratoire Catalyse et Spectrochimie, Caen, France. =
# CeCILL-B FREE SOFTWARE LICENSE AGREEMENT - See full LICENSE agreement in the root directory =
# ======================================================================================================================
from spectrochempy.utils import show
def test_plot_generic_1D(IR_dataset_1D):
for method in ["scatter", "pen", "scatter+pen"]:
dataset = IR_dataset_1D.copy()
dataset.plot(method=method)
show()
def test_plot_generic_2D(IR_dataset_2D):
for method in ["stack", "map", "image"]:
dataset = IR_dataset_2D.copy()
dataset.plot(method=method)
show()
| 36.2
| 120
| 0.454144
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.