hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dcc5975e820927a9cdf1ae7282e9fef0f42b40f8
| 591
|
py
|
Python
|
bin/convert_geofon_to_quakeml.py
|
trichter/sito
|
1b3895f3a56fe0fa6cdce1201545111025d606c8
|
[
"MIT"
] | 18
|
2015-01-30T00:48:30.000Z
|
2021-04-03T03:11:15.000Z
|
bin/convert_geofon_to_quakeml.py
|
trichter/sito
|
1b3895f3a56fe0fa6cdce1201545111025d606c8
|
[
"MIT"
] | null | null | null |
bin/convert_geofon_to_quakeml.py
|
trichter/sito
|
1b3895f3a56fe0fa6cdce1201545111025d606c8
|
[
"MIT"
] | 21
|
2015-04-09T13:55:13.000Z
|
2021-06-19T22:22:08.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Convert SeiscompEventXML version 0.6 to QuakeML.
arguments: files to convert
write to output_geofon.xml
"""
import sys
from sito.events import read_regex
from obspy.core.event import Catalog
def read_files(fnames):
out = Catalog()
for fname in fnames:
out += read_regex(fname)
out_fname = 'output_geofon.xml'
out.write(out_fname, 'QUAKEML')
if __name__ == '__main__':
if len(sys.argv) <= 1 or sys.argv[1] in ('-h', '--help'):
print(__doc__.strip('\n'))
sys.exit()
read_files(sys.argv[1:])
| 22.730769
| 61
| 0.651438
|
656a58fb0e48ec13beb5fa0bec4dca6fab3b4ec9
| 11,533
|
py
|
Python
|
glue/core/fixed_resolution_buffer.py
|
areza7/glue
|
42b68baec522b257e70a2f902c151233a0414720
|
[
"BSD-3-Clause"
] | null | null | null |
glue/core/fixed_resolution_buffer.py
|
areza7/glue
|
42b68baec522b257e70a2f902c151233a0414720
|
[
"BSD-3-Clause"
] | null | null | null |
glue/core/fixed_resolution_buffer.py
|
areza7/glue
|
42b68baec522b257e70a2f902c151233a0414720
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from glue.core.exceptions import IncompatibleDataException
from glue.core.component import CoordinateComponent
from glue.core.coordinate_helpers import dependent_axes
from glue.utils import unbroadcast, broadcast_to, broadcast_arrays_minimal
# TODO: cache needs to be updated when links are removed/changed
__all__ = ['compute_fixed_resolution_buffer']
ARRAY_CACHE = {}
PIXEL_CACHE = {}
def translate_pixel(data, pixel_coords, target_cid):
"""
Given a dataset and pixel coordinates in that dataset, compute a
corresponding pixel coordinate for another dataset.
Parameters
----------
data : `~glue.core.data.Data`
The main data object in which the original pixel coordinates are defined.
pixel_coords : tuple or list
List of pixel coordinates in the original data object. This should contain
as many Numpy arrays as there are dimensions in ``data``.
target_cid : `~glue.core.component_id.ComponentID`
The component to compute - this can be for a different dataset.
Returns
-------
coords : `~numpy.ndarray`
The values of the translated coordinates
dimensions : tuple
The dimensions in ``data`` for which pixel coordinates were used in the
translation.
"""
if not len(pixel_coords) == data.ndim:
raise ValueError('The number of coordinates in pixel_coords does not '
'match the number of dimensions in data')
if target_cid in data.pixel_component_ids:
return pixel_coords[target_cid.axis], [target_cid.axis]
# TODO: check that things are efficient if the PixelComponentID is in a
# pixel-aligned dataset.
component = data.get_component(target_cid)
if hasattr(component, 'link'):
link = component.link
values_all = []
dimensions_all = []
for cid in link._from:
values, dimensions = translate_pixel(data, pixel_coords, cid)
values_all.append(values)
dimensions_all.extend(dimensions)
# Unbroadcast arrays to smallest common shape for performance
if len(values_all) > 0:
shape = values_all[0].shape
values_all = broadcast_arrays_minimal(*values_all)
results = link._using(*values_all)
result = broadcast_to(results, shape)
else:
result = None
return result, sorted(set(dimensions_all))
elif isinstance(component, CoordinateComponent):
# FIXME: Hack for now - if we pass arrays in the view, it's interpreted
return component._calculate(view=pixel_coords), dependent_axes(data.coords, component.axis)
else:
raise Exception("Dependency on non-pixel component", target_cid)
class AnyScalar(object):
def __eq__(self, other):
return np.isscalar(other)
def bounds_for_cache(bounds, dimensions):
cache_bounds = []
for i in range(len(bounds)):
if i not in dimensions and np.isscalar(bounds[i]):
cache_bounds.append(AnyScalar())
else:
cache_bounds.append(bounds[i])
return cache_bounds
def compute_fixed_resolution_buffer(data, bounds, target_data=None, target_cid=None,
subset_state=None, broadcast=True, cache_id=None):
"""
Get a fixed-resolution buffer for a dataset.
Parameters
----------
data : `~glue.core.Data`
The dataset from which to extract a fixed resolution buffer
bounds : list
The list of bounds for the fixed resolution buffer. This list should
have as many items as there are dimensions in ``target_data``. Each
item should either be a scalar value, or a tuple of ``(min, max, nsteps)``.
target_data : `~glue.core.Data`, optional
The data in whose frame of reference the bounds are defined. Defaults
to ``data``.
target_cid : `~glue.core.component_id.ComponentID`, optional
If specified, gives the component ID giving the component to use for the
data values. Alternatively, use ``subset_state`` to get a subset mask.
subset_state : `~glue.core.subset.SubsetState`, optional
If specified, gives the subset state for which to compute a mask.
Alternatively, use ``target_cid`` if you want to get data values.
broadcast : bool, optional
If `True`, then if a dimension in ``target_data`` for which ``bounds``
is not a scalar does not affect any of the dimensions in ``data``,
then the final array will be effectively broadcast along this
dimension, otherwise an error will be raised.
"""
if target_data is None:
target_data = data
if target_cid is None and subset_state is None:
raise ValueError("Either target_cid or subset_state should be specified")
if target_cid is not None and subset_state is not None:
raise ValueError("Either target_cid or subset_state should be specified (not both)")
for bound in bounds:
if isinstance(bound, tuple) and bound[2] < 1:
raise ValueError("Number of steps in bounds should be >=1")
# If cache_id is specified, we keep a cached version of the resulting array
# indexed by cache_id as well as a hash formed of the call arguments to this
# function. We then check if the resulting array already exists in the cache.
if cache_id is not None:
if subset_state is None:
# Use uuid for component ID since otherwise component IDs don't return
# False when comparing two different CIDs (instead they return a subset state).
# For bounds we use a special wrapper that can identify wildcards.
current_array_hash = (data, bounds, target_data, target_cid.uuid, broadcast)
else:
current_array_hash = (data, bounds, target_data, subset_state, broadcast)
current_pixel_hash = (data, target_data)
if cache_id in ARRAY_CACHE:
if ARRAY_CACHE[cache_id]['hash'] == current_array_hash:
return ARRAY_CACHE[cache_id]['array']
# To save time later, if the pixel cache doesn't match at the level of the
# data and target_data, we just reset the cache.
if cache_id in PIXEL_CACHE:
if PIXEL_CACHE[cache_id]['hash'] != current_pixel_hash:
PIXEL_CACHE.pop(cache_id)
# Start off by generating arrays of coordinates in the original dataset
pixel_coords = [np.linspace(*bound) if isinstance(bound, tuple) else bound for bound in bounds]
pixel_coords = np.meshgrid(*pixel_coords, indexing='ij', copy=False)
# Keep track of the original shape of these arrays
original_shape = pixel_coords[0].shape
# Now loop through the dimensions of 'data' to find the corresponding
# coordinates in the frame of view of this dataset.
translated_coords = []
dimensions_all = []
invalid_all = np.zeros(original_shape, dtype=bool)
for ipix, pix in enumerate(data.pixel_component_ids):
# At this point, if cache_id is in PIXEL_CACHE, we know that data and
# target_data match so we just check the bounds. Note that the bounds
# include the AnyScalar wildcard for any dimensions that don't impact
# the pixel coordinates here. We do this so that we don't have to
# recompute the pixel coordinates when e.g. slicing through cubes.
if cache_id in PIXEL_CACHE and ipix in PIXEL_CACHE[cache_id] and PIXEL_CACHE[cache_id][ipix]['bounds'] == bounds:
translated_coord = PIXEL_CACHE[cache_id][ipix]['translated_coord']
dimensions = PIXEL_CACHE[cache_id][ipix]['dimensions']
invalid = PIXEL_CACHE[cache_id][ipix]['invalid']
else:
translated_coord, dimensions = translate_pixel(target_data, pixel_coords, pix)
# The returned coordinates may often be a broadcasted array. To convert
# the coordinates to integers and check which ones are within bounds, we
# thus operate on the un-broadcasted array, before broadcasting it back
# to the original shape.
translated_coord = np.round(unbroadcast(translated_coord)).astype(int)
invalid = (translated_coord < 0) | (translated_coord >= data.shape[ipix])
# Since we are going to be using these coordinates later on to index an
# array, we need the coordinates to be within the array, so we reset
# any invalid coordinates and keep track of which pixels are invalid
# to reset them later.
translated_coord[invalid] = 0
# We now populate the cache
if cache_id is not None:
if cache_id not in PIXEL_CACHE:
PIXEL_CACHE[cache_id] = {'hash': current_pixel_hash}
PIXEL_CACHE[cache_id][ipix] = {'translated_coord': translated_coord,
'dimensions': dimensions,
'invalid': invalid,
'bounds': bounds_for_cache(bounds, dimensions)}
invalid_all |= invalid
# Broadcast back to the original shape and add to the list
translated_coords.append(broadcast_to(translated_coord, original_shape))
# Also keep track of all the dimensions that contributed to this coordinate
dimensions_all.extend(dimensions)
translated_coords = tuple(translated_coords)
# If a dimension from the target data for which bounds was set to an interval
# did not actually contribute to any of the coordinates in data, then if
# broadcast is set to False we raise an error, otherwise we proceed and
# implicitly broadcast values along that dimension of the target data.
if data is not target_data and not broadcast:
for i in range(target_data.ndim):
if isinstance(bounds[i], tuple) and i not in dimensions_all:
raise IncompatibleDataException()
# PERF: optimize further - check if we can extract a sub-region that
# contains all the valid values.
# Take subset_state into account, if present
if subset_state is None:
array = data.get_data(target_cid, view=translated_coords).astype(float)
invalid_value = -np.inf
else:
array = data.get_mask(subset_state, view=translated_coords)
invalid_value = False
if np.any(invalid_all):
if not array.flags.writeable:
array = np.array(array, dtype=type(invalid_value))
array[invalid_all] = invalid_value
# Drop dimensions for which bounds were scalars
slices = []
for bound in bounds:
if isinstance(bound, tuple):
slices.append(slice(None))
else:
slices.append(0)
array = array[tuple(slices)]
if cache_id is not None:
# For the bounds, we use a special wildcard for bounds that don't affect
# the result. This will allow the cache to match regardless of the
# value for those bounds. However, we only do this for scalar bounds.
cache_bounds = bounds_for_cache(bounds, dimensions_all)
current_array_hash = current_array_hash[:1] + (cache_bounds,) + current_array_hash[2:]
if subset_state is None:
ARRAY_CACHE[cache_id] = {'hash': current_array_hash, 'array': array}
else:
ARRAY_CACHE[cache_id] = {'hash': current_array_hash, 'array': array}
return array
| 41.485612
| 121
| 0.666435
|
dc40a5d91fa5c06ee74a1cb4fc480fb3cf39033f
| 9,673
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/compute/latest/get_availability_set.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/compute/latest/get_availability_set.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/compute/latest/get_availability_set.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetAvailabilitySetResult',
'AwaitableGetAvailabilitySetResult',
'get_availability_set',
]
warnings.warn("""The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:compute:getAvailabilitySet'.""", DeprecationWarning)
@pulumi.output_type
class GetAvailabilitySetResult:
"""
Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Manage the availability of virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). <br><br> For more information on Azure planned maintenance, see [Planned maintenance for virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json) <br><br> Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability set.
"""
def __init__(__self__, id=None, location=None, name=None, platform_fault_domain_count=None, platform_update_domain_count=None, proximity_placement_group=None, sku=None, statuses=None, tags=None, type=None, virtual_machines=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if platform_fault_domain_count and not isinstance(platform_fault_domain_count, int):
raise TypeError("Expected argument 'platform_fault_domain_count' to be a int")
pulumi.set(__self__, "platform_fault_domain_count", platform_fault_domain_count)
if platform_update_domain_count and not isinstance(platform_update_domain_count, int):
raise TypeError("Expected argument 'platform_update_domain_count' to be a int")
pulumi.set(__self__, "platform_update_domain_count", platform_update_domain_count)
if proximity_placement_group and not isinstance(proximity_placement_group, dict):
raise TypeError("Expected argument 'proximity_placement_group' to be a dict")
pulumi.set(__self__, "proximity_placement_group", proximity_placement_group)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if statuses and not isinstance(statuses, list):
raise TypeError("Expected argument 'statuses' to be a list")
pulumi.set(__self__, "statuses", statuses)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if virtual_machines and not isinstance(virtual_machines, list):
raise TypeError("Expected argument 'virtual_machines' to be a list")
pulumi.set(__self__, "virtual_machines", virtual_machines)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="platformFaultDomainCount")
def platform_fault_domain_count(self) -> Optional[int]:
"""
Fault Domain count.
"""
return pulumi.get(self, "platform_fault_domain_count")
@property
@pulumi.getter(name="platformUpdateDomainCount")
def platform_update_domain_count(self) -> Optional[int]:
"""
Update Domain count.
"""
return pulumi.get(self, "platform_update_domain_count")
@property
@pulumi.getter(name="proximityPlacementGroup")
def proximity_placement_group(self) -> Optional['outputs.SubResourceResponse']:
"""
Specifies information about the proximity placement group that the availability set should be assigned to. <br><br>Minimum api-version: 2018-04-01.
"""
return pulumi.get(self, "proximity_placement_group")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
Sku of the availability set, only name is required to be set. See AvailabilitySetSkuTypes for possible set of values. Use 'Aligned' for virtual machines with managed disks and 'Classic' for virtual machines with unmanaged disks. Default value is 'Classic'.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def statuses(self) -> Sequence['outputs.InstanceViewStatusResponse']:
"""
The resource status information.
"""
return pulumi.get(self, "statuses")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualMachines")
def virtual_machines(self) -> Optional[Sequence['outputs.SubResourceResponse']]:
"""
A list of references to all virtual machines in the availability set.
"""
return pulumi.get(self, "virtual_machines")
class AwaitableGetAvailabilitySetResult(GetAvailabilitySetResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAvailabilitySetResult(
id=self.id,
location=self.location,
name=self.name,
platform_fault_domain_count=self.platform_fault_domain_count,
platform_update_domain_count=self.platform_update_domain_count,
proximity_placement_group=self.proximity_placement_group,
sku=self.sku,
statuses=self.statuses,
tags=self.tags,
type=self.type,
virtual_machines=self.virtual_machines)
def get_availability_set(availability_set_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAvailabilitySetResult:
"""
Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Manage the availability of virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). <br><br> For more information on Azure planned maintenance, see [Planned maintenance for virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json) <br><br> Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability set.
Latest API Version: 2020-12-01.
:param str availability_set_name: The name of the availability set.
:param str resource_group_name: The name of the resource group.
"""
pulumi.log.warn("get_availability_set is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:compute:getAvailabilitySet'.")
__args__ = dict()
__args__['availabilitySetName'] = availability_set_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:compute/latest:getAvailabilitySet', __args__, opts=opts, typ=GetAvailabilitySetResult).value
return AwaitableGetAvailabilitySetResult(
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
platform_fault_domain_count=__ret__.platform_fault_domain_count,
platform_update_domain_count=__ret__.platform_update_domain_count,
proximity_placement_group=__ret__.proximity_placement_group,
sku=__ret__.sku,
statuses=__ret__.statuses,
tags=__ret__.tags,
type=__ret__.type,
virtual_machines=__ret__.virtual_machines)
| 48.124378
| 862
| 0.701023
|
eb957f58185d9e99346f5ca7cdc859641147bda0
| 11,579
|
py
|
Python
|
fuocore/core/player.py
|
AmyLewis/feeluown-core
|
0aecb39ce49504b04fa54a391260e9976220a288
|
[
"MIT"
] | null | null | null |
fuocore/core/player.py
|
AmyLewis/feeluown-core
|
0aecb39ce49504b04fa54a391260e9976220a288
|
[
"MIT"
] | null | null | null |
fuocore/core/player.py
|
AmyLewis/feeluown-core
|
0aecb39ce49504b04fa54a391260e9976220a288
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
fuocore.player
~~~~~~~~~~~~~~
fuocore media player.
"""
from abc import ABCMeta, abstractmethod
from enum import Enum
import logging
import random
from mpv import MPV, MpvEventID, MpvEventEndFile
from fuocore.dispatch import Signal
logger = logging.getLogger(__name__)
class State(Enum):
stopped = 0
paused = 1
playing = 2
class PlaybackMode(Enum):
one_loop = 0
sequential = 1
loop = 2
random = 3
class Playlist(object):
"""player playlist provide a list of song model to play
NOTE - Design: Why we use song model instead of url? Theoretically,
using song model may increase the coupling. However, simple url
do not obtain enough metadata.
"""
def __init__(self, songs=None, playback_mode=PlaybackMode.loop):
"""
:param songs: list of :class:`fuocore.models.SongModel`
:param playback_mode: :class:`fuocore.player.PlaybackMode`
"""
self._current_song = None
self._songs = songs or []
self._playback_mode = playback_mode
# signals
self.playback_mode_changed = Signal()
self.song_changed = Signal()
def __len__(self):
return len(self._songs)
def __getitem__(self, index):
"""overload [] operator"""
return self._songs[index]
def add(self, song):
"""insert a song after current song"""
if song in self._songs:
return
if self._current_song is None:
self._songs.append(song)
else:
index = self._songs.index(self._current_song)
self._songs.insert(index + 1, song)
logger.debug('Add %s to player playlist', song)
def remove(self, song):
"""Remove song from playlist.
If song is current song, remove the song and play next. Otherwise,
just remove it.
"""
if song in self._songs:
if self._current_song is None:
self._songs.remove(song)
elif song == self._current_song:
self.current_song = self.next_song
self._songs.remove(song)
else:
self._songs.remove(song)
logger.debug('Remove {} from player playlist'.format(song))
else:
logger.debug('Remove failed: {} not in playlist'.format(song))
def clear(self):
self.current_song = None
self._songs = []
def list(self):
return self._songs
@property
def current_song(self):
return self._current_song
@current_song.setter
def current_song(self, song):
"""change current song, emit song changed singal"""
self._last_song = self.current_song
if song is None:
self._current_song = None
# add it to playlist if song not in playlist
elif song in self._songs:
self._current_song = song
else:
self.add(song)
self._current_song = song
self.song_changed.emit(song)
@property
def playback_mode(self):
return self._playback_mode
@playback_mode.setter
def playback_mode(self, playback_mode):
self._playback_mode = playback_mode
self.playback_mode_changed.emit(playback_mode)
@property
def next_song(self):
next_song = None
if not self._songs:
return None
if self.current_song is None:
return self._songs[0]
if self.playback_mode == PlaybackMode.random:
next_song = random.choice(range(0, len(self._songs)))
elif self.playback_mode == PlaybackMode.one_loop:
next_song = self.current_song
else:
current_index = self._songs.index(self.current_song)
if current_index == len(self._songs) - 1:
if self.playback_mode == PlaybackMode.loop:
next_song = self._songs[0]
elif self.playback_mode == PlaybackMode.sequential:
next_song = None
else:
next_song = self._songs[current_index + 1]
return next_song
@property
def previous_song(self):
"""return to previous played song, if previous played song not exists,
get the song before current song in playback mode order.
"""
if not self._songs:
return None
if self.current_song is None:
return self._songs[0]
previous_song = None
if self.playback_mode == PlaybackMode.random:
previous_song = random.choice(self._songs)
elif self.playback_mode == PlaybackMode.one_loop:
previous_song = self.current_song
else:
current_index = self._songs.index(self.current_song)
previous_song = self._songs[current_index - 1]
return previous_song
class AbstractPlayer(metaclass=ABCMeta):
def __init__(self, playlist=Playlist(), **kwargs):
self._position = 0 # seconds
self._volume = 100 # (0, 100)
self._playlist = playlist
self._state = State.stopped
self._duration = None
self.position_changed = Signal()
self.state_changed = Signal()
self.song_finished = Signal()
self.duration_changed = Signal()
self.media_changed = Signal()
@property
def state(self):
"""player state
:return: :class:`fuocore.engine.State`
"""
return self._state
@state.setter
def state(self, value):
self._state = value
self.state_changed.emit(value)
@property
def current_song(self):
return self._playlist.current_song
@property
def playlist(self):
"""player playlist
:return: :class:`fuocore.engine.Playlist`
"""
return self._playlist
@playlist.setter
def playlist(self, playlist):
self._playlist = playlist
@property
def position(self):
"""player position, the units is seconds"""
return self._position
@position.setter
def position(self, position):
self._position = position
@property
def volume(self):
return self._volume
@volume.setter
def volume(self, value):
value = 0 if value < 0 else value
value = 100 if value > 100 else value
self._volume = value
@property
def duration(self):
"""player media duration, the units is seconds"""
return self._duration
@duration.setter
def duration(self, value):
if value is not None and value != self._duration:
self._duration = value
self.duration_changed.emit(value)
@abstractmethod
def play(self, url):
"""play media
:param url: a local file absolute path, or a http url that refers to a
media file
"""
@abstractmethod
def play_song(self, song):
"""play media by song model
:param song: :class:`fuocore.models.SongModel`
"""
@abstractmethod
def resume(self):
"""play playback"""
@abstractmethod
def pause(self):
"""pause player"""
@abstractmethod
def toggle(self):
"""toggle player state"""
@abstractmethod
def stop(self):
"""stop player"""
@abstractmethod
def initialize(self):
""""initialize player"""
@abstractmethod
def shutdown(self):
"""shutdown player, do some clean up here"""
class MpvPlayer(AbstractPlayer):
"""
player will always play playlist current song. player will listening to
playlist ``song_changed`` signal and change the current playback.
"""
def __init__(self):
super(MpvPlayer, self).__init__()
self._mpv = MPV(ytdl=False,
input_default_bindings=True,
input_vo_keyboard=True)
self._playlist = Playlist()
self._playlist.song_changed.connect(self._on_song_changed)
def initialize(self):
self._mpv.observe_property(
'time-pos',
lambda name, position: self._on_position_changed(position)
)
self._mpv.observe_property(
'duration',
lambda name, duration: self._on_duration_changed(duration)
)
# self._mpv.register_event_callback(lambda event: self._on_event(event))
self._mpv.event_callbacks.append(self._on_event)
self.song_finished.connect(self.play_next)
logger.info('Player initialize finished.')
def shutdown(self):
del self._mpv
def play(self, url):
# NOTE - API DESGIN: we should return None, see
# QMediaPlayer API reference for more details.
logger.debug("Player will play: '%s'", url)
# Clear playlist before play next song,
# otherwise, mpv will seek to the last position and play.
self._mpv.playlist_clear()
self._mpv.play(url)
self._mpv.pause = False
self.state = State.playing
self.media_changed.emit(url)
def play_song(self, song):
if self.playlist.current_song is not None and \
self.playlist.current_song == song:
logger.warning('the song to be played is same as current song')
return
url = song.url
if url is not None:
self._playlist.current_song = song
else:
raise ValueError("invalid song: song url can't be None")
def play_next(self):
self.playlist.current_song = self.playlist.next_song
def play_previous(self):
self.playlist.current_song = self.playlist.previous_song
def resume(self):
self._mpv.pause = False
self.state = State.playing
def pause(self):
self._mpv.pause = True
self.state = State.paused
def toggle(self):
self._mpv.pause = not self._mpv.pause
if self._mpv.pause:
self.state = State.paused
else:
self.state = State.playing
def stop(self):
logger.info('stop player...')
self._mpv.pause = True
self.state = State.stopped
self._mpv.playlist_clear()
@property
def position(self):
return self._position
@position.setter
def position(self, position):
self._mpv.seek(position, reference='absolute')
self._position = position
@AbstractPlayer.volume.setter
def volume(self, value):
super(MpvPlayer, MpvPlayer).volume.__set__(self, value)
self._mpv.volume = self.volume
def _on_position_changed(self, position):
self._position = position
self.position_changed.emit(position)
def _on_duration_changed(self, duration):
"""listening to mpv duration change event"""
logger.info('player receive duration changed signal')
self.duration = duration
def _on_song_changed(self, song):
logger.debug('player received song changed signal')
if song is not None:
logger.info('Will play song: %s' % self._playlist.current_song)
self.play(song.url)
else:
self.stop()
logger.info('playlist provide no song anymore.')
def _on_event(self, event):
if event['event_id'] == MpvEventID.END_FILE:
reason = event['event']['reason']
logger.debug('Current song finished. reason: %d' % reason)
if self.state != State.stopped and reason != MpvEventEndFile.ABORTED:
self.song_finished.emit()
| 28.310513
| 81
| 0.606702
|
3d7c8fd0f8087f1c4c1a9d522fc4f517bd3153b7
| 7,470
|
py
|
Python
|
pgoapi/protos/pogoprotos/enums/activity_type_pb2.py
|
z3cr3t/api
|
960c0c10a76e1f46e99de7f8de8644b00cf95d0d
|
[
"MIT"
] | null | null | null |
pgoapi/protos/pogoprotos/enums/activity_type_pb2.py
|
z3cr3t/api
|
960c0c10a76e1f46e99de7f8de8644b00cf95d0d
|
[
"MIT"
] | null | null | null |
pgoapi/protos/pogoprotos/enums/activity_type_pb2.py
|
z3cr3t/api
|
960c0c10a76e1f46e99de7f8de8644b00cf95d0d
|
[
"MIT"
] | null | null | null |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/enums/activity_type.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/enums/activity_type.proto',
package='pogoprotos.enums',
syntax='proto3',
serialized_pb=_b('\n$pogoprotos/enums/activity_type.proto\x12\x10pogoprotos.enums*\xeb\x06\n\x0c\x41\x63tivityType\x12\x14\n\x10\x41\x43TIVITY_UNKNOWN\x10\x00\x12\x1a\n\x16\x41\x43TIVITY_CATCH_POKEMON\x10\x01\x12!\n\x1d\x41\x43TIVITY_CATCH_LEGEND_POKEMON\x10\x02\x12\x19\n\x15\x41\x43TIVITY_FLEE_POKEMON\x10\x03\x12\x18\n\x14\x41\x43TIVITY_DEFEAT_FORT\x10\x04\x12\x1b\n\x17\x41\x43TIVITY_EVOLVE_POKEMON\x10\x05\x12\x16\n\x12\x41\x43TIVITY_HATCH_EGG\x10\x06\x12\x14\n\x10\x41\x43TIVITY_WALK_KM\x10\x07\x12\x1e\n\x1a\x41\x43TIVITY_POKEDEX_ENTRY_NEW\x10\x08\x12\x1e\n\x1a\x41\x43TIVITY_CATCH_FIRST_THROW\x10\t\x12\x1d\n\x19\x41\x43TIVITY_CATCH_NICE_THROW\x10\n\x12\x1e\n\x1a\x41\x43TIVITY_CATCH_GREAT_THROW\x10\x0b\x12\"\n\x1e\x41\x43TIVITY_CATCH_EXCELLENT_THROW\x10\x0c\x12\x1c\n\x18\x41\x43TIVITY_CATCH_CURVEBALL\x10\r\x12%\n!ACTIVITY_CATCH_FIRST_CATCH_OF_DAY\x10\x0e\x12\x1c\n\x18\x41\x43TIVITY_CATCH_MILESTONE\x10\x0f\x12\x1a\n\x16\x41\x43TIVITY_TRAIN_POKEMON\x10\x10\x12\x18\n\x14\x41\x43TIVITY_SEARCH_FORT\x10\x11\x12\x1c\n\x18\x41\x43TIVITY_RELEASE_POKEMON\x10\x12\x12\"\n\x1e\x41\x43TIVITY_HATCH_EGG_SMALL_BONUS\x10\x13\x12#\n\x1f\x41\x43TIVITY_HATCH_EGG_MEDIUM_BONUS\x10\x14\x12\"\n\x1e\x41\x43TIVITY_HATCH_EGG_LARGE_BONUS\x10\x15\x12 \n\x1c\x41\x43TIVITY_DEFEAT_GYM_DEFENDER\x10\x16\x12\x1e\n\x1a\x41\x43TIVITY_DEFEAT_GYM_LEADER\x10\x17\x12+\n\'ACTIVITY_CATCH_FIRST_CATCH_STREAK_BONUS\x10\x18\x12)\n%ACTIVITY_SEARCH_FORT_FIRST_OF_THE_DAY\x10\x19\x12%\n!ACTIVITY_SEARCH_FORT_STREAK_BONUS\x10\x1a\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_ACTIVITYTYPE = _descriptor.EnumDescriptor(
name='ActivityType',
full_name='pogoprotos.enums.ActivityType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ACTIVITY_UNKNOWN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACTIVITY_CATCH_POKEMON', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACTIVITY_CATCH_LEGEND_POKEMON', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACTIVITY_FLEE_POKEMON', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACTIVITY_DEFEAT_FORT', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACTIVITY_EVOLVE_POKEMON', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACTIVITY_HATCH_EGG', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACTIVITY_WALK_KM', index=7, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACTIVITY_POKEDEX_ENTRY_NEW', index=8, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACTIVITY_CATCH_FIRST_THROW', index=9, number=9,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACTIVITY_CATCH_NICE_THROW', index=10, number=10,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACTIVITY_CATCH_GREAT_THROW', index=11, number=11,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACTIVITY_CATCH_EXCELLENT_THROW', index=12, number=12,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACTIVITY_CATCH_CURVEBALL', index=13, number=13,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACTIVITY_CATCH_FIRST_CATCH_OF_DAY', index=14, number=14,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACTIVITY_CATCH_MILESTONE', index=15, number=15,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACTIVITY_TRAIN_POKEMON', index=16, number=16,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACTIVITY_SEARCH_FORT', index=17, number=17,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACTIVITY_RELEASE_POKEMON', index=18, number=18,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACTIVITY_HATCH_EGG_SMALL_BONUS', index=19, number=19,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACTIVITY_HATCH_EGG_MEDIUM_BONUS', index=20, number=20,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACTIVITY_HATCH_EGG_LARGE_BONUS', index=21, number=21,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACTIVITY_DEFEAT_GYM_DEFENDER', index=22, number=22,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACTIVITY_DEFEAT_GYM_LEADER', index=23, number=23,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACTIVITY_CATCH_FIRST_CATCH_STREAK_BONUS', index=24, number=24,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACTIVITY_SEARCH_FORT_FIRST_OF_THE_DAY', index=25, number=25,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACTIVITY_SEARCH_FORT_STREAK_BONUS', index=26, number=26,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=59,
serialized_end=934,
)
_sym_db.RegisterEnumDescriptor(_ACTIVITYTYPE)
ActivityType = enum_type_wrapper.EnumTypeWrapper(_ACTIVITYTYPE)
ACTIVITY_UNKNOWN = 0
ACTIVITY_CATCH_POKEMON = 1
ACTIVITY_CATCH_LEGEND_POKEMON = 2
ACTIVITY_FLEE_POKEMON = 3
ACTIVITY_DEFEAT_FORT = 4
ACTIVITY_EVOLVE_POKEMON = 5
ACTIVITY_HATCH_EGG = 6
ACTIVITY_WALK_KM = 7
ACTIVITY_POKEDEX_ENTRY_NEW = 8
ACTIVITY_CATCH_FIRST_THROW = 9
ACTIVITY_CATCH_NICE_THROW = 10
ACTIVITY_CATCH_GREAT_THROW = 11
ACTIVITY_CATCH_EXCELLENT_THROW = 12
ACTIVITY_CATCH_CURVEBALL = 13
ACTIVITY_CATCH_FIRST_CATCH_OF_DAY = 14
ACTIVITY_CATCH_MILESTONE = 15
ACTIVITY_TRAIN_POKEMON = 16
ACTIVITY_SEARCH_FORT = 17
ACTIVITY_RELEASE_POKEMON = 18
ACTIVITY_HATCH_EGG_SMALL_BONUS = 19
ACTIVITY_HATCH_EGG_MEDIUM_BONUS = 20
ACTIVITY_HATCH_EGG_LARGE_BONUS = 21
ACTIVITY_DEFEAT_GYM_DEFENDER = 22
ACTIVITY_DEFEAT_GYM_LEADER = 23
ACTIVITY_CATCH_FIRST_CATCH_STREAK_BONUS = 24
ACTIVITY_SEARCH_FORT_FIRST_OF_THE_DAY = 25
ACTIVITY_SEARCH_FORT_STREAK_BONUS = 26
DESCRIPTOR.enum_types_by_name['ActivityType'] = _ACTIVITYTYPE
# @@protoc_insertion_point(module_scope)
| 40.819672
| 1,520
| 0.741901
|
34a0a466f1c95aafe7a6e8fdd068f0dc898b7523
| 537
|
py
|
Python
|
code/547.Number-of-Provinces.py
|
Aden-Q/leetcode
|
ebd4804edd4f172b9981b22c18d9ff654cf20762
|
[
"Apache-2.0"
] | 1
|
2019-09-22T03:08:14.000Z
|
2019-09-22T03:08:14.000Z
|
code/547.Number-of-Provinces.py
|
Aden-Q/leetcode
|
ebd4804edd4f172b9981b22c18d9ff654cf20762
|
[
"Apache-2.0"
] | null | null | null |
code/547.Number-of-Provinces.py
|
Aden-Q/leetcode
|
ebd4804edd4f172b9981b22c18d9ff654cf20762
|
[
"Apache-2.0"
] | null | null | null |
class Solution:
def findCircleNum(self, isConnected: List[List[int]]) -> int:
n = len(isConnected)
visited = [False] * n
def dfs(s):
nonlocal isConnected
if visited[s]:
return
visited[s] = True
for i in range(n):
if isConnected[s][i]:
dfs(i)
ct = 0
for i in range(n):
if not visited[i]:
ct += 1
dfs(i)
return ct
| 26.85
| 65
| 0.39851
|
40a5df0b823f67c0008300d42b4f8224c67a5a58
| 610
|
py
|
Python
|
saltstack/pillar_sls_rendering_error/srv/modules/pillar/id_info.py
|
gladiatr72/gaze_upon
|
fdeb9c8f860bd5b4ef388471b793434a2131ccf7
|
[
"MIT"
] | null | null | null |
saltstack/pillar_sls_rendering_error/srv/modules/pillar/id_info.py
|
gladiatr72/gaze_upon
|
fdeb9c8f860bd5b4ef388471b793434a2131ccf7
|
[
"MIT"
] | null | null | null |
saltstack/pillar_sls_rendering_error/srv/modules/pillar/id_info.py
|
gladiatr72/gaze_upon
|
fdeb9c8f860bd5b4ef388471b793434a2131ccf7
|
[
"MIT"
] | null | null | null |
# vim: ft=python
import logging
from stephens_little_package import traceit, PrettyLog
log = logging.getLogger(__name__)
@traceit
def ext_pillar(minion_id, pillar):
retval = {}
if __grains__.get('wants_me_some_id_info', False):
id = __opts__['id']
exp = __salt__['id_info.explode'](id)
if exp['project'] != 'mten':
retval['project_list'] = exp['project_list']
del(exp['project_list'])
retval.update(exp)
retval['id_info'] = exp
log.critical('and then, PILLAR ID_INFO: \n{0}'.format(PrettyLog(retval)))
return retval
| 21.785714
| 81
| 0.631148
|
a41e180d206ce2f2841065f021ce013eb137a491
| 416
|
py
|
Python
|
api/migrations/0003_auto_20200627_1908.py
|
nemesis-umbrella/HorarioAPI
|
f2638cd7f1b969ac2583807b33b811f4c04f9c90
|
[
"MIT"
] | null | null | null |
api/migrations/0003_auto_20200627_1908.py
|
nemesis-umbrella/HorarioAPI
|
f2638cd7f1b969ac2583807b33b811f4c04f9c90
|
[
"MIT"
] | null | null | null |
api/migrations/0003_auto_20200627_1908.py
|
nemesis-umbrella/HorarioAPI
|
f2638cd7f1b969ac2583807b33b811f4c04f9c90
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.6 on 2020-06-28 00:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20200627_1828'),
]
operations = [
migrations.AlterField(
model_name='asistencia',
name='puntualidad',
field=models.BooleanField(blank=True, max_length=1, null=True),
),
]
| 21.894737
| 75
| 0.612981
|
e8d86d480e5247fef80c4230ee55e14a3176998d
| 9,472
|
py
|
Python
|
homeassistant/components/proximity.py
|
kdschlosser/home-assistant
|
a94a24f6f83508642e220fadf2799789dc32a25b
|
[
"Apache-2.0"
] | 2
|
2020-06-17T01:23:01.000Z
|
2020-06-18T22:17:14.000Z
|
homeassistant/components/proximity.py
|
kdschlosser/home-assistant
|
a94a24f6f83508642e220fadf2799789dc32a25b
|
[
"Apache-2.0"
] | 6
|
2021-02-08T21:02:40.000Z
|
2022-03-12T00:52:16.000Z
|
homeassistant/components/proximity.py
|
kdschlosser/home-assistant
|
a94a24f6f83508642e220fadf2799789dc32a25b
|
[
"Apache-2.0"
] | 1
|
2020-01-09T12:36:16.000Z
|
2020-01-09T12:36:16.000Z
|
"""
Support for tracking the proximity of a device.
Component to monitor the proximity of devices to a particular zone and the
direction of travel.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/proximity/
"""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (
CONF_ZONE, CONF_DEVICES, CONF_UNIT_OF_MEASUREMENT)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import track_state_change
from homeassistant.util.distance import convert
from homeassistant.util.location import distance
_LOGGER = logging.getLogger(__name__)
ATTR_DIR_OF_TRAVEL = 'dir_of_travel'
ATTR_DIST_FROM = 'dist_to_zone'
ATTR_NEAREST = 'nearest'
CONF_IGNORED_ZONES = 'ignored_zones'
CONF_TOLERANCE = 'tolerance'
DEFAULT_DIR_OF_TRAVEL = 'not set'
DEFAULT_DIST_TO_ZONE = 'not set'
DEFAULT_NEAREST = 'not set'
DEFAULT_PROXIMITY_ZONE = 'home'
DEFAULT_TOLERANCE = 1
DEPENDENCIES = ['zone', 'device_tracker']
DOMAIN = 'proximity'
UNITS = ['km', 'm', 'mi', 'ft']
ZONE_SCHEMA = vol.Schema({
vol.Optional(CONF_ZONE, default=DEFAULT_PROXIMITY_ZONE): cv.string,
vol.Optional(CONF_DEVICES, default=[]):
vol.All(cv.ensure_list, [cv.entity_id]),
vol.Optional(CONF_IGNORED_ZONES, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_TOLERANCE, default=DEFAULT_TOLERANCE): cv.positive_int,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): vol.All(cv.string, vol.In(UNITS)),
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: cv.schema_with_slug_keys(ZONE_SCHEMA),
}, extra=vol.ALLOW_EXTRA)
def setup_proximity_component(hass, name, config):
"""Set up the individual proximity component."""
ignored_zones = config.get(CONF_IGNORED_ZONES)
proximity_devices = config.get(CONF_DEVICES)
tolerance = config.get(CONF_TOLERANCE)
proximity_zone = name
unit_of_measurement = config.get(
CONF_UNIT_OF_MEASUREMENT, hass.config.units.length_unit)
zone_id = 'zone.{}'.format(config.get(CONF_ZONE))
proximity = Proximity(hass, proximity_zone, DEFAULT_DIST_TO_ZONE,
DEFAULT_DIR_OF_TRAVEL, DEFAULT_NEAREST,
ignored_zones, proximity_devices, tolerance,
zone_id, unit_of_measurement)
proximity.entity_id = '{}.{}'.format(DOMAIN, proximity_zone)
proximity.schedule_update_ha_state()
track_state_change(
hass, proximity_devices, proximity.check_proximity_state_change)
return True
def setup(hass, config):
"""Get the zones and offsets from configuration.yaml."""
for zone, proximity_config in config[DOMAIN].items():
setup_proximity_component(hass, zone, proximity_config)
return True
class Proximity(Entity):
"""Representation of a Proximity."""
def __init__(self, hass, zone_friendly_name, dist_to, dir_of_travel,
nearest, ignored_zones, proximity_devices, tolerance,
proximity_zone, unit_of_measurement):
"""Initialize the proximity."""
self.hass = hass
self.friendly_name = zone_friendly_name
self.dist_to = dist_to
self.dir_of_travel = dir_of_travel
self.nearest = nearest
self.ignored_zones = ignored_zones
self.proximity_devices = proximity_devices
self.tolerance = tolerance
self.proximity_zone = proximity_zone
self._unit_of_measurement = unit_of_measurement
@property
def name(self):
"""Return the name of the entity."""
return self.friendly_name
@property
def state(self):
"""Return the state."""
return self.dist_to
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return self._unit_of_measurement
@property
def state_attributes(self):
"""Return the state attributes."""
return {
ATTR_DIR_OF_TRAVEL: self.dir_of_travel,
ATTR_NEAREST: self.nearest,
}
def check_proximity_state_change(self, entity, old_state, new_state):
"""Perform the proximity checking."""
entity_name = new_state.name
devices_to_calculate = False
devices_in_zone = ''
zone_state = self.hass.states.get(self.proximity_zone)
proximity_latitude = zone_state.attributes.get('latitude')
proximity_longitude = zone_state.attributes.get('longitude')
# Check for devices in the monitored zone.
for device in self.proximity_devices:
device_state = self.hass.states.get(device)
if device_state is None:
devices_to_calculate = True
continue
if device_state.state not in self.ignored_zones:
devices_to_calculate = True
# Check the location of all devices.
if (device_state.state).lower() == (self.friendly_name).lower():
device_friendly = device_state.name
if devices_in_zone != '':
devices_in_zone = devices_in_zone + ', '
devices_in_zone = devices_in_zone + device_friendly
# No-one to track so reset the entity.
if not devices_to_calculate:
self.dist_to = 'not set'
self.dir_of_travel = 'not set'
self.nearest = 'not set'
self.schedule_update_ha_state()
return
# At least one device is in the monitored zone so update the entity.
if devices_in_zone != '':
self.dist_to = 0
self.dir_of_travel = 'arrived'
self.nearest = devices_in_zone
self.schedule_update_ha_state()
return
# We can't check proximity because latitude and longitude don't exist.
if 'latitude' not in new_state.attributes:
return
# Collect distances to the zone for all devices.
distances_to_zone = {}
for device in self.proximity_devices:
# Ignore devices in an ignored zone.
device_state = self.hass.states.get(device)
if device_state.state in self.ignored_zones:
continue
# Ignore devices if proximity cannot be calculated.
if 'latitude' not in device_state.attributes:
continue
# Calculate the distance to the proximity zone.
dist_to_zone = distance(proximity_latitude,
proximity_longitude,
device_state.attributes['latitude'],
device_state.attributes['longitude'])
# Add the device and distance to a dictionary.
distances_to_zone[device] = round(
convert(dist_to_zone, 'm', self.unit_of_measurement), 1)
# Loop through each of the distances collected and work out the
# closest.
closest_device = None # type: str
dist_to_zone = None # type: float
for device in distances_to_zone:
if not dist_to_zone or distances_to_zone[device] < dist_to_zone:
closest_device = device
dist_to_zone = distances_to_zone[device]
# If the closest device is one of the other devices.
if closest_device != entity:
self.dist_to = round(distances_to_zone[closest_device])
self.dir_of_travel = 'unknown'
device_state = self.hass.states.get(closest_device)
self.nearest = device_state.name
self.schedule_update_ha_state()
return
# Stop if we cannot calculate the direction of travel (i.e. we don't
# have a previous state and a current LAT and LONG).
if old_state is None or 'latitude' not in old_state.attributes:
self.dist_to = round(distances_to_zone[entity])
self.dir_of_travel = 'unknown'
self.nearest = entity_name
self.schedule_update_ha_state()
return
# Reset the variables
distance_travelled = 0
# Calculate the distance travelled.
old_distance = distance(proximity_latitude, proximity_longitude,
old_state.attributes['latitude'],
old_state.attributes['longitude'])
new_distance = distance(proximity_latitude, proximity_longitude,
new_state.attributes['latitude'],
new_state.attributes['longitude'])
distance_travelled = round(new_distance - old_distance, 1)
# Check for tolerance
if distance_travelled < self.tolerance * -1:
direction_of_travel = 'towards'
elif distance_travelled > self.tolerance:
direction_of_travel = 'away_from'
else:
direction_of_travel = 'stationary'
# Update the proximity entity
self.dist_to = round(dist_to_zone)
self.dir_of_travel = direction_of_travel
self.nearest = entity_name
self.schedule_update_ha_state()
_LOGGER.debug('proximity.%s update entity: distance=%s: direction=%s: '
'device=%s', self.friendly_name, round(dist_to_zone),
direction_of_travel, entity_name)
_LOGGER.info('%s: proximity calculation complete', entity_name)
| 36.856031
| 79
| 0.648226
|
44740f67d813af2e5d9b1e2a6d538f82fcc81e1c
| 48,357
|
py
|
Python
|
tests/test_intl.py
|
daobook/sphinx
|
ef8daca1f9a82ede9b4b0b5cde93f3414cee3dfe
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_intl.py
|
daobook/sphinx
|
ef8daca1f9a82ede9b4b0b5cde93f3414cee3dfe
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_intl.py
|
daobook/sphinx
|
ef8daca1f9a82ede9b4b0b5cde93f3414cee3dfe
|
[
"BSD-2-Clause"
] | null | null | null |
"""Test message patching for internationalization purposes.
Runs the text builder in the test root.
"""
import os
import re
import pygments
import pytest
from babel.messages import mofile, pofile
from babel.messages.catalog import Catalog
from docutils import nodes
from sphinx import locale
from sphinx.testing.util import (assert_node, assert_not_re_search, assert_re_search,
assert_startswith, etree_parse, path, strip_escseq)
from sphinx.util import docutils
sphinx_intl = pytest.mark.sphinx(
testroot='intl',
confoverrides={
'language': 'xx', 'locale_dirs': ['.'],
'gettext_compact': False,
},
)
pygments_version = tuple(int(v) for v in pygments.__version__.split('.'))
def read_po(pathname):
with pathname.open() as f:
return pofile.read_po(f)
def write_mo(pathname, po):
with pathname.open('wb') as f:
return mofile.write_mo(f, po)
@pytest.fixture(autouse=True)
def setup_intl(app_params):
srcdir = path(app_params.kwargs['srcdir'])
for dirpath, _dirs, files in os.walk(srcdir):
dirpath = path(dirpath)
for f in [f for f in files if f.endswith('.po')]:
po = dirpath / f
mo = srcdir / 'xx' / 'LC_MESSAGES' / (
os.path.relpath(po[:-3], srcdir) + '.mo')
if not mo.parent.exists():
mo.parent.makedirs()
if not mo.exists() or mo.stat().st_mtime < po.stat().st_mtime:
# compile .mo file only if needed
write_mo(mo, read_po(po))
@pytest.fixture(autouse=True)
def _info(app):
yield
print('# language:', app.config.language)
print('# locale_dirs:', app.config.locale_dirs)
def elem_gettexts(elem):
return [_f for _f in [s.strip() for s in elem.itertext()] if _f]
def elem_getref(elem):
return elem.attrib.get('refid') or elem.attrib.get('refuri')
def assert_elem(elem, texts=None, refs=None, names=None):
if texts is not None:
_texts = elem_gettexts(elem)
assert _texts == texts
if refs is not None:
_refs = [elem_getref(x) for x in elem.findall('reference')]
assert _refs == refs
if names is not None:
_names = elem.attrib.get('names').split()
assert _names == names
def assert_count(expected_expr, result, count):
find_pair = (expected_expr, result)
assert len(re.findall(*find_pair)) == count, find_pair
@sphinx_intl
@pytest.mark.sphinx('text')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_text_emit_warnings(app, warning):
app.build()
# test warnings in translation
warnings = getwarning(warning)
warning_expr = ('.*/warnings.txt:4:<translated>:1: '
'WARNING: Inline literal start-string without end-string.\n')
assert_re_search(warning_expr, warnings)
@sphinx_intl
@pytest.mark.sphinx('text')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_text_warning_node(app):
app.build()
# test warnings in translation
result = (app.outdir / 'warnings.txt').read_text()
expect = ("3. I18N WITH REST WARNINGS"
"\n**************************\n"
"\nLINE OF >>``<<BROKEN LITERAL MARKUP.\n")
assert result == expect
@sphinx_intl
@pytest.mark.sphinx('text')
@pytest.mark.test_params(shared_result='test_intl_basic')
@pytest.mark.xfail(os.name != 'posix', reason="Not working on windows")
def test_text_title_underline(app):
app.build()
# --- simple translation; check title underlines
result = (app.outdir / 'bom.txt').read_text()
expect = ("2. Datei mit UTF-8"
"\n******************\n" # underline matches new translation
"\nThis file has umlauts: äöü.\n")
assert result == expect
@sphinx_intl
@pytest.mark.sphinx('text')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_text_subdirs(app):
app.build()
# --- check translation in subdirs
result = (app.outdir / 'subdir' / 'index.txt').read_text()
assert_startswith(result, "1. subdir contents\n******************\n")
@sphinx_intl
@pytest.mark.sphinx('text')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_text_inconsistency_warnings(app, warning):
app.build()
# --- check warnings for inconsistency in number of references
result = (app.outdir / 'refs_inconsistency.txt').read_text()
expect = ("8. I18N WITH REFS INCONSISTENCY"
"\n*******************************\n"
"\n* FOR CITATION [ref3].\n"
"\n* reference FOR reference.\n"
"\n* ORPHAN REFERENCE: I18N WITH REFS INCONSISTENCY.\n"
"\n[1] THIS IS A AUTO NUMBERED FOOTNOTE.\n"
"\n[ref2] THIS IS A CITATION.\n"
"\n[100] THIS IS A NUMBERED FOOTNOTE.\n")
assert result == expect
warnings = getwarning(warning)
warning_fmt = ('.*/refs_inconsistency.txt:\\d+: '
'WARNING: inconsistent %(reftype)s in translated message.'
' original: %(original)s, translated: %(translated)s\n')
expected_warning_expr = (
warning_fmt % {
'reftype': 'footnote references',
'original': "\\['\\[#\\]_'\\]",
'translated': "\\[\\]"
} +
warning_fmt % {
'reftype': 'footnote references',
'original': "\\['\\[100\\]_'\\]",
'translated': "\\[\\]"
} +
warning_fmt % {
'reftype': 'references',
'original': "\\['reference_'\\]",
'translated': "\\['reference_', 'reference_'\\]"
} +
warning_fmt % {
'reftype': 'references',
'original': "\\[\\]",
'translated': "\\['`I18N WITH REFS INCONSISTENCY`_'\\]"
})
assert_re_search(expected_warning_expr, warnings)
expected_citation_warning_expr = (
'.*/refs_inconsistency.txt:\\d+: WARNING: Citation \\[ref2\\] is not referenced.\n' +
'.*/refs_inconsistency.txt:\\d+: WARNING: citation not found: ref3')
assert_re_search(expected_citation_warning_expr, warnings)
@sphinx_intl
@pytest.mark.sphinx('text')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_text_literalblock_warnings(app, warning):
app.build()
# --- check warning for literal block
result = (app.outdir / 'literalblock.txt').read_text()
expect = ("9. I18N WITH LITERAL BLOCK"
"\n**************************\n"
"\nCORRECT LITERAL BLOCK:\n"
"\n this is"
"\n literal block\n"
"\nMISSING LITERAL BLOCK:\n"
"\n<SYSTEM MESSAGE:")
assert_startswith(result, expect)
warnings = getwarning(warning)
expected_warning_expr = ('.*/literalblock.txt:\\d+: '
'WARNING: Literal block expected; none found.')
assert_re_search(expected_warning_expr, warnings)
@sphinx_intl
@pytest.mark.sphinx('text')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_text_definition_terms(app):
app.build()
# --- definition terms: regression test for #975, #2198, #2205
result = (app.outdir / 'definition_terms.txt').read_text()
expect = ("13. I18N WITH DEFINITION TERMS"
"\n******************************\n"
"\nSOME TERM"
"\n THE CORRESPONDING DEFINITION\n"
"\nSOME *TERM* WITH LINK"
"\n THE CORRESPONDING DEFINITION #2\n"
"\nSOME **TERM** WITH : CLASSIFIER1 : CLASSIFIER2"
"\n THE CORRESPONDING DEFINITION\n"
"\nSOME TERM WITH : CLASSIFIER[]"
"\n THE CORRESPONDING DEFINITION\n")
assert result == expect
@sphinx_intl
@pytest.mark.sphinx('text')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_text_glossary_term(app, warning):
app.build()
# --- glossary terms: regression test for #1090
result = (app.outdir / 'glossary_terms.txt').read_text()
expect = (r"""18. I18N WITH GLOSSARY TERMS
****************************
SOME NEW TERM
THE CORRESPONDING GLOSSARY
SOME OTHER NEW TERM
THE CORRESPONDING GLOSSARY #2
LINK TO *SOME NEW TERM*.
TRANSLATED GLOSSARY SHOULD BE SORTED BY TRANSLATED TERMS:
TRANSLATED TERM XXX
DEFINE XXX
TRANSLATED TERM YYY
DEFINE YYY
TRANSLATED TERM ZZZ
VVV
DEFINE ZZZ
""")
assert result == expect
warnings = getwarning(warning)
assert 'term not in glossary' not in warnings
@sphinx_intl
@pytest.mark.sphinx('text')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_text_glossary_term_inconsistencies(app, warning):
app.build()
# --- glossary term inconsistencies: regression test for #1090
result = (app.outdir / 'glossary_terms_inconsistency.txt').read_text()
expect = ("19. I18N WITH GLOSSARY TERMS INCONSISTENCY"
"\n******************************************\n"
"\n1. LINK TO *SOME NEW TERM*.\n")
assert result == expect
warnings = getwarning(warning)
expected_warning_expr = (
'.*/glossary_terms_inconsistency.txt:\\d+: '
'WARNING: inconsistent term references in translated message.'
" original: \\[':term:`Some term`', ':term:`Some other term`'\\],"
" translated: \\[':term:`SOME NEW TERM`'\\]\n")
assert_re_search(expected_warning_expr, warnings)
@sphinx_intl
@pytest.mark.sphinx('gettext')
@pytest.mark.test_params(shared_result='test_intl_gettext')
def test_gettext_section(app):
app.build()
# --- section
expect = read_po(app.srcdir / 'xx' / 'LC_MESSAGES' / 'section.po')
actual = read_po(app.outdir / 'section.pot')
for expect_msg in [m for m in expect if m.id]:
assert expect_msg.id in [m.id for m in actual if m.id]
@sphinx_intl
@pytest.mark.sphinx('text')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_text_section(app):
app.build()
# --- section
result = (app.outdir / 'section.txt').read_text()
expect = read_po(app.srcdir / 'xx' / 'LC_MESSAGES' / 'section.po')
for expect_msg in [m for m in expect if m.id]:
assert expect_msg.string in result
@sphinx_intl
@pytest.mark.sphinx('text')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_text_seealso(app):
app.build()
# --- seealso
result = (app.outdir / 'seealso.txt').read_text()
expect = ("12. I18N WITH SEEALSO"
"\n*********************\n"
"\nSee also: SHORT TEXT 1\n"
"\nSee also: LONG TEXT 1\n"
"\nSee also:\n"
"\n SHORT TEXT 2\n"
"\n LONG TEXT 2\n")
assert result == expect
@sphinx_intl
@pytest.mark.sphinx('text')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_text_figure_captions(app):
app.build()
# --- figure captions: regression test for #940
result = (app.outdir / 'figure.txt').read_text()
expect = ("14. I18N WITH FIGURE CAPTION"
"\n****************************\n"
"\n [image]MY CAPTION OF THE FIGURE\n"
"\n MY DESCRIPTION PARAGRAPH1 OF THE FIGURE.\n"
"\n MY DESCRIPTION PARAGRAPH2 OF THE FIGURE.\n"
"\n"
"\n14.1. FIGURE IN THE BLOCK"
"\n=========================\n"
"\nBLOCK\n"
"\n [image]MY CAPTION OF THE FIGURE\n"
"\n MY DESCRIPTION PARAGRAPH1 OF THE FIGURE.\n"
"\n MY DESCRIPTION PARAGRAPH2 OF THE FIGURE.\n"
"\n"
"\n"
"14.2. IMAGE URL AND ALT\n"
"=======================\n"
"\n"
"[image: I18N -> IMG][image]\n"
"\n"
" [image: IMG -> I18N][image]\n"
"\n"
"\n"
"14.3. IMAGE ON SUBSTITUTION\n"
"===========================\n"
"\n"
"\n"
"14.4. IMAGE UNDER NOTE\n"
"======================\n"
"\n"
"Note:\n"
"\n"
" [image: i18n under note][image]\n"
"\n"
" [image: img under note][image]\n")
assert result == expect
@sphinx_intl
@pytest.mark.sphinx('text')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_text_rubric(app):
app.build()
# --- rubric: regression test for pull request #190
result = (app.outdir / 'rubric.txt').read_text()
expect = ("I18N WITH RUBRIC"
"\n****************\n"
"\n-[ RUBRIC TITLE ]-\n"
"\n"
"\nRUBRIC IN THE BLOCK"
"\n===================\n"
"\nBLOCK\n"
"\n -[ RUBRIC TITLE ]-\n")
assert result == expect
@sphinx_intl
@pytest.mark.sphinx('text')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_text_docfields(app):
app.build()
# --- docfields
result = (app.outdir / 'docfields.txt').read_text()
expect = ("21. I18N WITH DOCFIELDS"
"\n***********************\n"
"\nclass Cls1\n"
"\n Parameters:"
"\n **param** -- DESCRIPTION OF PARAMETER param\n"
"\nclass Cls2\n"
"\n Parameters:"
"\n * **foo** -- DESCRIPTION OF PARAMETER foo\n"
"\n * **bar** -- DESCRIPTION OF PARAMETER bar\n"
"\nclass Cls3(values)\n"
"\n Raises:"
"\n **ValueError** -- IF THE VALUES ARE OUT OF RANGE\n"
"\nclass Cls4(values)\n"
"\n Raises:"
"\n * **TypeError** -- IF THE VALUES ARE NOT VALID\n"
"\n * **ValueError** -- IF THE VALUES ARE OUT OF RANGE\n"
"\nclass Cls5\n"
"\n Returns:"
'\n A NEW "Cls3" INSTANCE\n')
assert result == expect
@sphinx_intl
@pytest.mark.sphinx('text')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_text_admonitions(app):
app.build()
# --- admonitions
# #1206: gettext did not translate admonition directive's title
# seealso: https://docutils.sourceforge.io/docs/ref/rst/directives.html#admonitions
result = (app.outdir / 'admonitions.txt').read_text()
directives = (
"attention", "caution", "danger", "error", "hint",
"important", "note", "tip", "warning", "admonition")
for d in directives:
assert d.upper() + " TITLE" in result
assert d.upper() + " BODY" in result
# for #4938 `1. ` prefixed admonition title
assert "1. ADMONITION TITLE" in result
@sphinx_intl
@pytest.mark.sphinx('gettext')
@pytest.mark.test_params(shared_result='test_intl_gettext')
def test_gettext_toctree(app):
app.build()
# --- toctree (index.rst)
expect = read_po(app.srcdir / 'xx' / 'LC_MESSAGES' / 'index.po')
actual = read_po(app.outdir / 'index.pot')
for expect_msg in [m for m in expect if m.id]:
assert expect_msg.id in [m.id for m in actual if m.id]
# --- toctree (toctree.rst)
expect = read_po(app.srcdir / 'xx' / 'LC_MESSAGES' / 'toctree.po')
actual = read_po(app.outdir / 'toctree.pot')
for expect_msg in [m for m in expect if m.id]:
assert expect_msg.id in [m.id for m in actual if m.id]
@sphinx_intl
@pytest.mark.sphinx('gettext')
@pytest.mark.test_params(shared_result='test_intl_gettext')
def test_gettext_table(app):
app.build()
# --- toctree
expect = read_po(app.srcdir / 'xx' / 'LC_MESSAGES' / 'table.po')
actual = read_po(app.outdir / 'table.pot')
for expect_msg in [m for m in expect if m.id]:
assert expect_msg.id in [m.id for m in actual if m.id]
@sphinx_intl
@pytest.mark.sphinx('text')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_text_table(app):
app.build()
# --- toctree
result = (app.outdir / 'table.txt').read_text()
expect = read_po(app.srcdir / 'xx' / 'LC_MESSAGES' / 'table.po')
for expect_msg in [m for m in expect if m.id]:
assert expect_msg.string in result
@sphinx_intl
@pytest.mark.sphinx('text')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_text_toctree(app):
app.build()
# --- toctree (index.rst)
# Note: index.rst contains contents that is not shown in text.
result = (app.outdir / 'index.txt').read_text()
assert 'CONTENTS' in result
assert 'TABLE OF CONTENTS' in result
# --- toctree (toctree.rst)
result = (app.outdir / 'toctree.txt').read_text()
expect = read_po(app.srcdir / 'xx' / 'LC_MESSAGES' / 'toctree.po')
for expect_msg in [m for m in expect if m.id]:
assert expect_msg.string in result
@sphinx_intl
@pytest.mark.sphinx('gettext')
@pytest.mark.test_params(shared_result='test_intl_gettext')
def test_gettext_topic(app):
app.build()
# --- topic
expect = read_po(app.srcdir / 'xx' / 'LC_MESSAGES' / 'topic.po')
actual = read_po(app.outdir / 'topic.pot')
for expect_msg in [m for m in expect if m.id]:
assert expect_msg.id in [m.id for m in actual if m.id]
@sphinx_intl
@pytest.mark.sphinx('text')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_text_topic(app):
app.build()
# --- topic
result = (app.outdir / 'topic.txt').read_text()
expect = read_po(app.srcdir / 'xx' / 'LC_MESSAGES' / 'topic.po')
for expect_msg in [m for m in expect if m.id]:
assert expect_msg.string in result
@sphinx_intl
@pytest.mark.sphinx('gettext')
@pytest.mark.test_params(shared_result='test_intl_gettext')
def test_gettext_definition_terms(app):
app.build()
# --- definition terms: regression test for #2198, #2205
expect = read_po(app.srcdir / 'xx' / 'LC_MESSAGES' / 'definition_terms.po')
actual = read_po(app.outdir / 'definition_terms.pot')
for expect_msg in [m for m in expect if m.id]:
assert expect_msg.id in [m.id for m in actual if m.id]
@sphinx_intl
@pytest.mark.sphinx('gettext')
@pytest.mark.test_params(shared_result='test_intl_gettext')
def test_gettext_glossary_terms(app, warning):
app.build()
# --- glossary terms: regression test for #1090
expect = read_po(app.srcdir / 'xx' / 'LC_MESSAGES' / 'glossary_terms.po')
actual = read_po(app.outdir / 'glossary_terms.pot')
for expect_msg in [m for m in expect if m.id]:
assert expect_msg.id in [m.id for m in actual if m.id]
warnings = warning.getvalue().replace(os.sep, '/')
assert 'term not in glossary' not in warnings
@sphinx_intl
@pytest.mark.sphinx('gettext')
@pytest.mark.test_params(shared_result='test_intl_gettext')
def test_gettext_glossary_term_inconsistencies(app):
app.build()
# --- glossary term inconsistencies: regression test for #1090
expect = read_po(app.srcdir / 'xx' / 'LC_MESSAGES' / 'glossary_terms_inconsistency.po')
actual = read_po(app.outdir / 'glossary_terms_inconsistency.pot')
for expect_msg in [m for m in expect if m.id]:
assert expect_msg.id in [m.id for m in actual if m.id]
@sphinx_intl
@pytest.mark.sphinx('gettext')
@pytest.mark.test_params(shared_result='test_intl_gettext')
def test_gettext_literalblock(app):
app.build()
# --- gettext builder always ignores ``only`` directive
expect = read_po(app.srcdir / 'xx' / 'LC_MESSAGES' / 'literalblock.po')
actual = read_po(app.outdir / 'literalblock.pot')
for expect_msg in [m for m in expect if m.id]:
if len(expect_msg.id.splitlines()) == 1:
# compare tranlsations only labels
assert expect_msg.id in [m.id for m in actual if m.id]
else:
pass # skip code-blocks and literalblocks
@sphinx_intl
@pytest.mark.sphinx('gettext')
@pytest.mark.test_params(shared_result='test_intl_gettext')
def test_gettext_buildr_ignores_only_directive(app):
app.build()
# --- gettext builder always ignores ``only`` directive
expect = read_po(app.srcdir / 'xx' / 'LC_MESSAGES' / 'only.po')
actual = read_po(app.outdir / 'only.pot')
for expect_msg in [m for m in expect if m.id]:
assert expect_msg.id in [m.id for m in actual if m.id]
@sphinx_intl
# use individual shared_result directory to avoid "incompatible doctree" error
@pytest.mark.sphinx(testroot='builder-gettext-dont-rebuild-mo')
def test_gettext_dont_rebuild_mo(make_app, app_params):
# --- don't rebuild by .mo mtime
def get_number_of_update_targets(app_):
app_.env.find_files(app_.config, app_.builder)
_, updated, _ = app_.env.get_outdated_files(config_changed=False)
return len(updated)
args, kwargs = app_params
# phase1: build document with non-gettext builder and generate mo file in srcdir
app0 = make_app('dummy', *args, **kwargs)
app0.build()
assert (app0.srcdir / 'xx' / 'LC_MESSAGES' / 'bom.mo').exists()
# Since it is after the build, the number of documents to be updated is 0
assert get_number_of_update_targets(app0) == 0
# When rewriting the timestamp of mo file, the number of documents to be
# updated will be changed.
mtime = (app0.srcdir / 'xx' / 'LC_MESSAGES' / 'bom.mo').stat().st_mtime
(app0.srcdir / 'xx' / 'LC_MESSAGES' / 'bom.mo').utime((mtime + 5, mtime + 5))
assert get_number_of_update_targets(app0) == 1
# Because doctree for gettext builder can not be shared with other builders,
# erase doctreedir before gettext build.
app0.doctreedir.rmtree()
# phase2: build document with gettext builder.
# The mo file in the srcdir directory is retained.
app = make_app('gettext', *args, **kwargs)
app.build()
# Since it is after the build, the number of documents to be updated is 0
assert get_number_of_update_targets(app) == 0
# Even if the timestamp of the mo file is updated, the number of documents
# to be updated is 0. gettext builder does not rebuild because of mo update.
(app0.srcdir / 'xx' / 'LC_MESSAGES' / 'bom.mo').utime((mtime + 10, mtime + 10))
assert get_number_of_update_targets(app) == 0
@sphinx_intl
@pytest.mark.sphinx('html')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_html_meta(app):
app.build()
# --- test for meta
result = (app.outdir / 'index.html').read_text()
expected_expr = '<meta content="TESTDATA FOR I18N" name="description" />'
assert expected_expr in result
expected_expr = '<meta content="I18N, SPHINX, MARKUP" name="keywords" />'
assert expected_expr in result
expected_expr = '<p class="caption" role="heading"><span class="caption-text">HIDDEN TOC</span></p>'
assert expected_expr in result
@sphinx_intl
@pytest.mark.sphinx('html')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_html_footnotes(app):
app.build()
# --- test for #955 cant-build-html-with-footnotes-when-using
# expect no error by build
(app.outdir / 'footnote.html').read_text()
@sphinx_intl
@pytest.mark.sphinx('html')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_html_undefined_refs(app):
app.build()
# --- links to undefined reference
result = (app.outdir / 'refs_inconsistency.html').read_text()
expected_expr = ('<a class="reference external" '
'href="http://www.example.com">reference</a>')
assert len(re.findall(expected_expr, result)) == 2
expected_expr = ('<a class="reference internal" '
'href="#reference">reference</a>')
assert len(re.findall(expected_expr, result)) == 0
expected_expr = ('<a class="reference internal" '
'href="#i18n-with-refs-inconsistency">I18N WITH '
'REFS INCONSISTENCY</a>')
assert len(re.findall(expected_expr, result)) == 1
@sphinx_intl
@pytest.mark.sphinx('html')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_html_index_entries(app):
app.build()
# --- index entries: regression test for #976
result = (app.outdir / 'genindex.html').read_text()
def wrap(tag, keyword):
start_tag = "<%s[^>]*>" % tag
end_tag = "</%s>" % tag
return r"%s\s*%s\s*%s" % (start_tag, keyword, end_tag)
def wrap_nest(parenttag, childtag, keyword):
start_tag1 = "<%s[^>]*>" % parenttag
start_tag2 = "<%s[^>]*>" % childtag
return r"%s\s*%s\s*%s" % (start_tag1, keyword, start_tag2)
expected_exprs = [
wrap('a', 'NEWSLETTER'),
wrap('a', 'MAILING LIST'),
wrap('a', 'RECIPIENTS LIST'),
wrap('a', 'FIRST SECOND'),
wrap('a', 'SECOND THIRD'),
wrap('a', 'THIRD, FIRST'),
wrap_nest('li', 'ul', 'ENTRY'),
wrap_nest('li', 'ul', 'SEE'),
wrap('a', 'MODULE'),
wrap('a', 'KEYWORD'),
wrap('a', 'OPERATOR'),
wrap('a', 'OBJECT'),
wrap('a', 'EXCEPTION'),
wrap('a', 'STATEMENT'),
wrap('a', 'BUILTIN'),
]
for expr in expected_exprs:
assert_re_search(expr, result, re.M)
@sphinx_intl
@pytest.mark.sphinx('html')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_html_versionchanges(app):
app.build()
# --- versionchanges
result = (app.outdir / 'versionchange.html').read_text()
def get_content(result, name):
matched = re.search(r'<div class="%s">\n*(.*?)</div>' % name,
result, re.DOTALL)
if matched:
return matched.group(1)
else:
return ''
expect1 = (
"""<p><span class="versionmodified deprecated">Deprecated since version 1.0: </span>"""
"""THIS IS THE <em>FIRST</em> PARAGRAPH OF DEPRECATED.</p>\n"""
"""<p>THIS IS THE <em>SECOND</em> PARAGRAPH OF DEPRECATED.</p>\n""")
matched_content = get_content(result, "deprecated")
assert expect1 == matched_content
expect2 = (
"""<p><span class="versionmodified added">New in version 1.0: </span>"""
"""THIS IS THE <em>FIRST</em> PARAGRAPH OF VERSIONADDED.</p>\n""")
matched_content = get_content(result, "versionadded")
assert expect2 == matched_content
expect3 = (
"""<p><span class="versionmodified changed">Changed in version 1.0: </span>"""
"""THIS IS THE <em>FIRST</em> PARAGRAPH OF VERSIONCHANGED.</p>\n""")
matched_content = get_content(result, "versionchanged")
assert expect3 == matched_content
@sphinx_intl
@pytest.mark.sphinx('html')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_html_docfields(app):
app.build()
# --- docfields
# expect no error by build
(app.outdir / 'docfields.html').read_text()
@sphinx_intl
@pytest.mark.sphinx('html')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_html_template(app):
app.build()
# --- gettext template
result = (app.outdir / 'contents.html').read_text()
assert "WELCOME" in result
assert "SPHINX 2013.120" in result
@sphinx_intl
@pytest.mark.sphinx('html')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_html_rebuild_mo(app):
app.build()
# --- rebuild by .mo mtime
app.builder.build_update()
app.env.find_files(app.config, app.builder)
_, updated, _ = app.env.get_outdated_files(config_changed=False)
assert len(updated) == 0
mtime = (app.srcdir / 'xx' / 'LC_MESSAGES' / 'bom.mo').stat().st_mtime
(app.srcdir / 'xx' / 'LC_MESSAGES' / 'bom.mo').utime((mtime + 5, mtime + 5))
app.env.find_files(app.config, app.builder)
_, updated, _ = app.env.get_outdated_files(config_changed=False)
assert len(updated) == 1
@sphinx_intl
@pytest.mark.sphinx('xml')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_xml_footnotes(app, warning):
app.build()
# --- footnotes: regression test for fix #955, #1176
et = etree_parse(app.outdir / 'footnote.xml')
secs = et.findall('section')
para0 = secs[0].findall('paragraph')
assert_elem(
para0[0],
['I18N WITH FOOTNOTE', 'INCLUDE THIS CONTENTS',
'2', '[ref]', '1', '100', '*', '. SECOND FOOTNOTE_REF', '100', '.'],
['i18n-with-footnote', 'ref'])
# check node_id for footnote_references which refer same footnote (refs: #3002)
assert para0[0][4].text == para0[0][6].text == '100'
assert para0[0][4].attrib['ids'] != para0[0][6].attrib['ids']
footnote0 = secs[0].findall('footnote')
assert_elem(
footnote0[0],
['1', 'THIS IS A AUTO NUMBERED FOOTNOTE.'],
None,
['1'])
assert_elem(
footnote0[1],
['100', 'THIS IS A NUMBERED FOOTNOTE.'],
None,
['100'])
assert_elem(
footnote0[2],
['2', 'THIS IS A AUTO NUMBERED NAMED FOOTNOTE.'],
None,
['named'])
assert_elem(
footnote0[3],
['*', 'THIS IS A AUTO SYMBOL FOOTNOTE.'],
None,
None)
citation0 = secs[0].findall('citation')
assert_elem(
citation0[0],
['ref', 'THIS IS A NAMED FOOTNOTE.'],
None,
['ref'])
warnings = getwarning(warning)
warning_expr = '.*/footnote.xml:\\d*: SEVERE: Duplicate ID: ".*".\n'
assert_not_re_search(warning_expr, warnings)
@sphinx_intl
@pytest.mark.sphinx('xml')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_xml_footnote_backlinks(app):
app.build()
# --- footnote backlinks: i18n test for #1058
et = etree_parse(app.outdir / 'footnote.xml')
secs = et.findall('section')
para0 = secs[0].findall('paragraph')
refs0 = para0[0].findall('footnote_reference')
refid2id = {r.attrib.get('refid'): r.attrib.get('ids') for r in refs0}
footnote0 = secs[0].findall('footnote')
for footnote in footnote0:
ids = footnote.attrib.get('ids')
backrefs = footnote.attrib.get('backrefs').split()
assert refid2id[ids] in backrefs
@sphinx_intl
@pytest.mark.sphinx('xml')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_xml_refs_in_python_domain(app):
app.build()
# --- refs in the Python domain
et = etree_parse(app.outdir / 'refs_python_domain.xml')
secs = et.findall('section')
# regression test for fix #1363
para0 = secs[0].findall('paragraph')
assert_elem(
para0[0],
['SEE THIS DECORATOR:', 'sensitive_variables()', '.'],
['sensitive.sensitive_variables'])
@sphinx_intl
@pytest.mark.sphinx('xml')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_xml_keep_external_links(app):
app.build()
# --- keep external links: regression test for #1044
et = etree_parse(app.outdir / 'external_links.xml')
secs = et.findall('section')
para0 = secs[0].findall('paragraph')
# external link check
assert_elem(
para0[0],
['EXTERNAL LINK TO', 'Python', '.'],
['http://python.org/index.html'])
# internal link check
assert_elem(
para0[1],
['EXTERNAL LINKS', 'IS INTERNAL LINK.'],
['i18n-with-external-links'])
# inline link check
assert_elem(
para0[2],
['INLINE LINK BY', 'THE SPHINX SITE', '.'],
['http://sphinx-doc.org'])
# unnamed link check
assert_elem(
para0[3],
['UNNAMED', 'LINK', '.'],
['http://google.com'])
# link target swapped translation
para1 = secs[1].findall('paragraph')
assert_elem(
para1[0],
['LINK TO', 'external2', 'AND', 'external1', '.'],
['https://www.google.com/external2',
'https://www.google.com/external1'])
assert_elem(
para1[1],
['LINK TO', 'THE PYTHON SITE', 'AND', 'THE SPHINX SITE', '.'],
['http://python.org', 'http://sphinx-doc.org'])
# multiple references in the same line
para2 = secs[2].findall('paragraph')
assert_elem(
para2[0],
['LINK TO', 'EXTERNAL LINKS', ',', 'Python', ',',
'THE SPHINX SITE', ',', 'UNNAMED', 'AND',
'THE PYTHON SITE', '.'],
['i18n-with-external-links', 'http://python.org/index.html',
'http://sphinx-doc.org', 'http://google.com',
'http://python.org'])
@sphinx_intl
@pytest.mark.sphinx('xml')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_xml_role_xref(app):
app.build()
# --- role xref: regression test for #1090, #1193
et = etree_parse(app.outdir / 'role_xref.xml')
sec1, sec2 = et.findall('section')
para1, = sec1.findall('paragraph')
assert_elem(
para1,
['LINK TO', "I18N ROCK'N ROLE XREF", ',', 'CONTENTS', ',',
'SOME NEW TERM', '.'],
['i18n-role-xref', 'index',
'glossary_terms#term-Some-term'])
para2 = sec2.findall('paragraph')
assert_elem(
para2[0],
['LINK TO', 'SOME OTHER NEW TERM', 'AND', 'SOME NEW TERM', '.'],
['glossary_terms#term-Some-other-term',
'glossary_terms#term-Some-term'])
assert_elem(
para2[1],
['LINK TO', 'LABEL', 'AND',
'SAME TYPE LINKS', 'AND', 'SAME TYPE LINKS', '.'],
['i18n-role-xref', 'same-type-links', 'same-type-links'])
assert_elem(
para2[2],
['LINK TO', 'I18N WITH GLOSSARY TERMS', 'AND', 'CONTENTS', '.'],
['glossary_terms', 'index'])
assert_elem(
para2[3],
['LINK TO', '--module', 'AND', '-m', '.'],
['cmdoption-module', 'cmdoption-m'])
assert_elem(
para2[4],
['LINK TO', 'env2', 'AND', 'env1', '.'],
['envvar-env2', 'envvar-env1'])
assert_elem(
para2[5],
['LINK TO', 'token2', 'AND', 'token1', '.'],
[]) # TODO: how do I link token role to productionlist?
assert_elem(
para2[6],
['LINK TO', 'same-type-links', 'AND', "i18n-role-xref", '.'],
['same-type-links', 'i18n-role-xref'])
@sphinx_intl
@pytest.mark.sphinx('xml')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_xml_warnings(app, warning):
app.build()
# warnings
warnings = getwarning(warning)
assert 'term not in glossary' not in warnings
assert 'undefined label' not in warnings
assert 'unknown document' not in warnings
@sphinx_intl
@pytest.mark.sphinx('xml')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_xml_label_targets(app):
app.build()
# --- label targets: regression test for #1193, #1265
et = etree_parse(app.outdir / 'label_target.xml')
secs = et.findall('section')
para0 = secs[0].findall('paragraph')
assert_elem(
para0[0],
['X SECTION AND LABEL', 'POINT TO', 'implicit-target', 'AND',
'X SECTION AND LABEL', 'POINT TO', 'section-and-label', '.'],
['implicit-target', 'section-and-label'])
para1 = secs[1].findall('paragraph')
assert_elem(
para1[0],
['X EXPLICIT-TARGET', 'POINT TO', 'explicit-target', 'AND',
'X EXPLICIT-TARGET', 'POINT TO DUPLICATED ID LIKE', 'id1',
'.'],
['explicit-target', 'id1'])
para2 = secs[2].findall('paragraph')
assert_elem(
para2[0],
['X IMPLICIT SECTION NAME', 'POINT TO',
'implicit-section-name', '.'],
['implicit-section-name'])
sec2 = secs[2].findall('section')
para2_0 = sec2[0].findall('paragraph')
assert_elem(
para2_0[0],
['`X DUPLICATED SUB SECTION`_', 'IS BROKEN LINK.'],
[])
para3 = secs[3].findall('paragraph')
assert_elem(
para3[0],
['X', 'bridge label',
'IS NOT TRANSLATABLE BUT LINKED TO TRANSLATED ' +
'SECTION TITLE.'],
['label-bridged-target-section'])
assert_elem(
para3[1],
['X', 'bridge label', 'POINT TO',
'LABEL BRIDGED TARGET SECTION', 'AND', 'bridge label2',
'POINT TO', 'SECTION AND LABEL', '. THE SECOND APPEARED',
'bridge label2', 'POINT TO CORRECT TARGET.'],
['label-bridged-target-section',
'section-and-label',
'section-and-label'])
@sphinx_intl
@pytest.mark.sphinx('html')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_additional_targets_should_not_be_translated(app):
app.build()
# [literalblock.txt]
result = (app.outdir / 'literalblock.html').read_text()
# title should be translated
expected_expr = 'CODE-BLOCKS'
assert_count(expected_expr, result, 2)
# ruby code block should not be translated but be highlighted
expected_expr = """<span class="s1">'result'</span>"""
assert_count(expected_expr, result, 1)
# C code block without lang should not be translated and *ruby* highlighted
expected_expr = """<span class="c1">#include <stdlib.h></span>"""
assert_count(expected_expr, result, 1)
# C code block with lang should not be translated but be *C* highlighted
if pygments_version < (2, 10, 0):
expected_expr = ("""<span class="cp">#include</span> """
"""<span class="cpf"><stdio.h></span>""")
else:
expected_expr = ("""<span class="cp">#include</span>"""
"""<span class="w"> </span>"""
"""<span class="cpf"><stdio.h></span>""")
assert_count(expected_expr, result, 1)
# literal block in list item should not be translated
expected_expr = ("""<span class="n">literal</span>"""
"""<span class="o">-</span>"""
"""<span class="n">block</span>\n"""
"""<span class="k">in</span> """
"""<span class="n">list</span>""")
assert_count(expected_expr, result, 1)
# doctest block should not be translated but be highlighted
expected_expr = (
"""<span class="gp">>>> </span>"""
"""<span class="kn">import</span> <span class="nn">sys</span> """
"""<span class="c1"># sys importing</span>""")
assert_count(expected_expr, result, 1)
# [raw.txt]
result = (app.outdir / 'raw.html').read_text()
# raw block should not be translated
if docutils.__version_info__ < (0, 17):
expected_expr = """<iframe src="http://sphinx-doc.org"></iframe></div>"""
assert_count(expected_expr, result, 1)
else:
expected_expr = """<iframe src="http://sphinx-doc.org"></iframe></section>"""
assert_count(expected_expr, result, 1)
# [figure.txt]
result = (app.outdir / 'figure.html').read_text()
# src for image block should not be translated (alt is translated)
expected_expr = """<img alt="I18N -> IMG" src="_images/i18n.png" />"""
assert_count(expected_expr, result, 1)
# src for figure block should not be translated (alt is translated)
expected_expr = """<img alt="IMG -> I18N" src="_images/img.png" />"""
assert_count(expected_expr, result, 1)
@sphinx_intl
@pytest.mark.sphinx(
'html',
srcdir='test_additional_targets_should_be_translated',
confoverrides={
'language': 'xx', 'locale_dirs': ['.'],
'gettext_compact': False,
'gettext_additional_targets': [
'index',
'literal-block',
'doctest-block',
'raw',
'image',
],
}
)
def test_additional_targets_should_be_translated(app):
app.build()
# [literalblock.txt]
result = (app.outdir / 'literalblock.html').read_text()
# title should be translated
expected_expr = 'CODE-BLOCKS'
assert_count(expected_expr, result, 2)
# ruby code block should be translated and be highlighted
expected_expr = """<span class="s1">'RESULT'</span>"""
assert_count(expected_expr, result, 1)
# C code block without lang should be translated and *ruby* highlighted
expected_expr = """<span class="c1">#include <STDLIB.H></span>"""
assert_count(expected_expr, result, 1)
# C code block with lang should be translated and be *C* highlighted
if pygments_version < (2, 10, 0):
expected_expr = ("""<span class="cp">#include</span> """
"""<span class="cpf"><STDIO.H></span>""")
else:
expected_expr = ("""<span class="cp">#include</span>"""
"""<span class="w"> </span>"""
"""<span class="cpf"><STDIO.H></span>""")
assert_count(expected_expr, result, 1)
# literal block in list item should be translated
expected_expr = ("""<span class="no">LITERAL</span>"""
"""<span class="o">-</span>"""
"""<span class="no">BLOCK</span>\n"""
"""<span class="no">IN</span> """
"""<span class="no">LIST</span>""")
assert_count(expected_expr, result, 1)
# doctest block should not be translated but be highlighted
expected_expr = (
"""<span class="gp">>>> </span>"""
"""<span class="kn">import</span> <span class="nn">sys</span> """
"""<span class="c1"># SYS IMPORTING</span>""")
assert_count(expected_expr, result, 1)
# [raw.txt]
result = (app.outdir / 'raw.html').read_text()
# raw block should be translated
if docutils.__version_info__ < (0, 17):
expected_expr = """<iframe src="HTTP://SPHINX-DOC.ORG"></iframe></div>"""
assert_count(expected_expr, result, 1)
else:
expected_expr = """<iframe src="HTTP://SPHINX-DOC.ORG"></iframe></section>"""
assert_count(expected_expr, result, 1)
# [figure.txt]
result = (app.outdir / 'figure.html').read_text()
# alt and src for image block should be translated
expected_expr = """<img alt="I18N -> IMG" src="_images/img.png" />"""
assert_count(expected_expr, result, 1)
# alt and src for figure block should be translated
expected_expr = """<img alt="IMG -> I18N" src="_images/i18n.png" />"""
assert_count(expected_expr, result, 1)
@sphinx_intl
@pytest.mark.sphinx('text')
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_text_references(app, warning):
app.builder.build_specific([app.srcdir / 'refs.txt'])
warnings = warning.getvalue().replace(os.sep, '/')
warning_expr = 'refs.txt:\\d+: ERROR: Unknown target name:'
assert_count(warning_expr, warnings, 0)
@pytest.mark.sphinx(
'dummy', testroot='images',
srcdir='test_intl_images',
confoverrides={'language': 'xx'}
)
@pytest.mark.xfail(os.name != 'posix', reason="Not working on windows")
def test_image_glob_intl(app):
app.build()
# index.rst
doctree = app.env.get_doctree('index')
assert_node(doctree[0][1], nodes.image, uri='rimg.xx.png',
candidates={'*': 'rimg.xx.png'})
assert isinstance(doctree[0][2], nodes.figure)
assert_node(doctree[0][2][0], nodes.image, uri='rimg.xx.png',
candidates={'*': 'rimg.xx.png'})
assert_node(doctree[0][3], nodes.image, uri='img.*',
candidates={'application/pdf': 'img.pdf',
'image/gif': 'img.gif',
'image/png': 'img.png'})
assert isinstance(doctree[0][4], nodes.figure)
assert_node(doctree[0][4][0], nodes.image, uri='img.*',
candidates={'application/pdf': 'img.pdf',
'image/gif': 'img.gif',
'image/png': 'img.png'})
# subdir/index.rst
doctree = app.env.get_doctree('subdir/index')
assert_node(doctree[0][1], nodes.image, uri='subdir/rimg.xx.png',
candidates={'*': 'subdir/rimg.xx.png'})
assert_node(doctree[0][2], nodes.image, uri='subdir/svgimg.*',
candidates={'application/pdf': 'subdir/svgimg.pdf',
'image/svg+xml': 'subdir/svgimg.xx.svg'})
assert isinstance(doctree[0][3], nodes.figure)
assert_node(doctree[0][3][0], nodes.image, uri='subdir/svgimg.*',
candidates={'application/pdf': 'subdir/svgimg.pdf',
'image/svg+xml': 'subdir/svgimg.xx.svg'})
@pytest.mark.sphinx(
'dummy', testroot='images',
srcdir='test_intl_images',
confoverrides={
'language': 'xx',
'figure_language_filename': '{root}{ext}.{language}',
}
)
@pytest.mark.xfail(os.name != 'posix', reason="Not working on windows")
def test_image_glob_intl_using_figure_language_filename(app):
app.build()
# index.rst
doctree = app.env.get_doctree('index')
assert_node(doctree[0][1], nodes.image, uri='rimg.png.xx',
candidates={'*': 'rimg.png.xx'})
assert isinstance(doctree[0][2], nodes.figure)
assert_node(doctree[0][2][0], nodes.image, uri='rimg.png.xx',
candidates={'*': 'rimg.png.xx'})
assert_node(doctree[0][3], nodes.image, uri='img.*',
candidates={'application/pdf': 'img.pdf',
'image/gif': 'img.gif',
'image/png': 'img.png'})
assert isinstance(doctree[0][4], nodes.figure)
assert_node(doctree[0][4][0], nodes.image, uri='img.*',
candidates={'application/pdf': 'img.pdf',
'image/gif': 'img.gif',
'image/png': 'img.png'})
# subdir/index.rst
doctree = app.env.get_doctree('subdir/index')
assert_node(doctree[0][1], nodes.image, uri='subdir/rimg.png',
candidates={'*': 'subdir/rimg.png'})
assert_node(doctree[0][2], nodes.image, uri='subdir/svgimg.*',
candidates={'application/pdf': 'subdir/svgimg.pdf',
'image/svg+xml': 'subdir/svgimg.svg'})
assert isinstance(doctree[0][3], nodes.figure)
assert_node(doctree[0][3][0], nodes.image, uri='subdir/svgimg.*',
candidates={'application/pdf': 'subdir/svgimg.pdf',
'image/svg+xml': 'subdir/svgimg.svg'})
def getwarning(warnings):
return strip_escseq(warnings.getvalue().replace(os.sep, '/'))
@pytest.mark.sphinx('html', testroot='basic',
srcdir='gettext_allow_fuzzy_translations',
confoverrides={
'language': 'de',
'gettext_allow_fuzzy_translations': True
})
def test_gettext_allow_fuzzy_translations(app):
locale_dir = app.srcdir / 'locales' / 'de' / 'LC_MESSAGES'
locale_dir.makedirs()
with (locale_dir / 'index.po').open('wb') as f:
catalog = Catalog()
catalog.add('features', 'FEATURES', flags=('fuzzy',))
pofile.write_po(f, catalog)
app.build()
content = (app.outdir / 'index.html').read_text()
assert 'FEATURES' in content
@pytest.mark.sphinx('html', testroot='basic',
srcdir='gettext_disallow_fuzzy_translations',
confoverrides={
'language': 'de',
'gettext_allow_fuzzy_translations': False
})
def test_gettext_disallow_fuzzy_translations(app):
locale_dir = app.srcdir / 'locales' / 'de' / 'LC_MESSAGES'
locale_dir.makedirs()
with (locale_dir / 'index.po').open('wb') as f:
catalog = Catalog()
catalog.add('features', 'FEATURES', flags=('fuzzy',))
pofile.write_po(f, catalog)
app.build()
content = (app.outdir / 'index.html').read_text()
assert 'FEATURES' not in content
@pytest.mark.sphinx('html', testroot='basic', confoverrides={'language': 'de'})
def test_customize_system_message(make_app, app_params, sphinx_test_tempdir):
try:
# clear translators cache
locale.translators.clear()
# prepare message catalog (.po)
locale_dir = sphinx_test_tempdir / 'basic' / 'locales' / 'de' / 'LC_MESSAGES'
locale_dir.makedirs()
with (locale_dir / 'sphinx.po').open('wb') as f:
catalog = Catalog()
catalog.add('Quick search', 'QUICK SEARCH')
pofile.write_po(f, catalog)
# construct application and convert po file to .mo
args, kwargs = app_params
app = make_app(*args, **kwargs)
assert (locale_dir / 'sphinx.mo').exists()
assert app.translator.gettext('Quick search') == 'QUICK SEARCH'
app.build()
content = (app.outdir / 'index.html').read_text()
assert 'QUICK SEARCH' in content
finally:
locale.translators.clear()
| 35.117647
| 104
| 0.603263
|
5b50c0d0d6819e10770bf62ca65017e622e2a31e
| 3,074
|
py
|
Python
|
img_process.py
|
willsirius/Openrave_Painter
|
2584722ee23f97baa54c56d7c9fffcbe895a3cc6
|
[
"MIT"
] | null | null | null |
img_process.py
|
willsirius/Openrave_Painter
|
2584722ee23f97baa54c56d7c9fffcbe895a3cc6
|
[
"MIT"
] | null | null | null |
img_process.py
|
willsirius/Openrave_Painter
|
2584722ee23f97baa54c56d7c9fffcbe895a3cc6
|
[
"MIT"
] | null | null | null |
import cv2
import time
import openravepy
import numpy as np
img = cv2.imread('obj.jpg')
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
(thresh, im_bw) = cv2.threshold(img, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
im_bw = cv2.threshold(im_bw, thresh, 255, cv2.THRESH_BINARY)[1]
rows, cols = im_bw.shape[:2] # dimensions of resized m-cap
img = []
for i in range(0,rows) :
for j in range(0,cols) :
if im_bw[i][j] < 255 and i%10 == 0 and j%10 == 0:
img.append([-0.2, j*0.001 - 0.1,-i*0.001+1])
if not __openravepy_build_doc__:
from openravepy import *
from numpy import *
def waitrobot(robot):
"""busy wait for robot completion"""
while not robot.GetController().IsDone():
time.sleep(0.01)
def tuckarms(env,robot):
with env:
jointnames = ['l_shoulder_lift_joint','l_elbow_flex_joint','l_wrist_flex_joint','r_shoulder_lift_joint','r_elbow_flex_joint','r_wrist_flex_joint']
robot.SetActiveDOFs([robot.GetJoint(name).GetDOFIndex() for name in jointnames])
robot.SetActiveDOFValues([1.29023451,-2.32099996,-0.69800004,1.27843491,-2.32100002,-0.69799996]);
robot.GetController().SetDesired(robot.GetDOFValues());
waitrobot(robot)
if __name__ == "__main__":
env = Environment()
env.SetViewer('qtcoin')
collisionChecker = RaveCreateCollisionChecker(env,'ode')
env.SetCollisionChecker(collisionChecker)
env.Reset()
# load a scene from ProjectRoom environment XML file
env.Load('hw3.env.xml')
time.sleep(0.1)
# 1) get the 1st robot that is inside the loaded scene
# 2) assign it to the variable named 'robot'
robot = env.GetRobots()[0]
# jointnames = ['J0','J1','J2','J3','J4','J5']
# robot.SetActiveDOFs([robot.GetJoint(name).GetDOFIndex() for name in jointnames])
print robot
tuckarms(env, robot)
robot.SetActiveManipulator(robot.GetManipulator('rightarm_torso'))
T = robot.GetManipulator('rightarm_torso').GetTransform()
point = [T[0,3], T[1,3], T[2,3]]
print T
print point
with env:
ikmodel = openravepy.databases.inversekinematics.InverseKinematicsModel(robot,iktype=IkParameterization.Type.Translation3D)
print ikmodel
if not ikmodel.load():
ikmodel.autogenerate()
#h = env.plot3(Tee[0:3,3],10) # plot one point
#raw_input('press any key')
handles = [];
joint_names = ['torso_lift_joint','r_shoulder_pan_joint','r_shoulder_lift_joint', 'r_upper_arm_roll_joint','r_elbow_flex_joint', 'r_forearm_roll_joint', 'r_wrist_flex_joint']
robot.SetActiveDOFs([robot.GetJoint(name).GetDOFIndex() for name in joint_names]);
raw_input("Press enter to continue...")
for point in img:
color = [0,0,0]
handles.append(env.plot3(points=point,pointsize=2.0, colors=color))
solutions = ikmodel.manip.FindIKSolution(IkParameterization(point, IkParameterization.Type.Translation3D),IkFilterOptions.CheckEnvCollisions)
robot.SetActiveDOFValues(solutions)
robot.GetController().SetDesired(robot.GetDOFValues());
T = robot.GetManipulator('rightarm_torso').GetTransform()
print "Endeffector After IK"
point = [T[0,3], T[1,3], T[2,3]]
print point
time.sleep(0.02)
raw_input("Press enter to exit...")
| 33.053763
| 175
| 0.736174
|
134dab9647763062c11ff584c1b98d399d6eda74
| 5,399
|
py
|
Python
|
src/GAN_CNNRegression/train.py
|
MatthieuSarkis/criticality
|
2939d7408bcd5d3bf7f237cfe56c836cf361e64f
|
[
"Apache-2.0"
] | null | null | null |
src/GAN_CNNRegression/train.py
|
MatthieuSarkis/criticality
|
2939d7408bcd5d3bf7f237cfe56c836cf361e64f
|
[
"Apache-2.0"
] | null | null | null |
src/GAN_CNNRegression/train.py
|
MatthieuSarkis/criticality
|
2939d7408bcd5d3bf7f237cfe56c836cf361e64f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Written by Adel Sohbi and Matthieu Sarkis, https://github.com/adelshb, https://github.com/MatthieuSarkis
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Train the GAN model."""
from argparse import ArgumentParser
import os
from datetime import datetime
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from dcgan import make_generator_model
from utils import train_step, val_pred_loss
from logger import Logger
def main(args, logger_plots = False):
save_dir = os.path.join(args.save_dir, datetime.now().strftime("%Y.%m.%d.%H.%M.%S"))
tensorboard_writer_gen_loss = tf.summary.create_file_writer(save_dir + "/tensorboard/gen_loss")
tensorboard_writer_gen_reg = tf.summary.create_file_writer(save_dir + "/tensorboard/gen_reg")
tensorboard_writer_gen_signed_loss = tf.summary.create_file_writer(save_dir + "/tensorboard/gen_signed_loss")
tensorboard_writer_val_loss = tf.summary.create_file_writer(save_dir + "/tensorboard/val_loss")
print("Tensorboard command line: {}".format("tensorboard --logdir " + save_dir + "/tensorboard"))
generator = make_generator_model(args.noise_dim)
cnn = tf.keras.models.load_model(args.CNN_model_path)
loss_function = tf.keras.losses.MeanAbsoluteError()
generator_optimizer = tf.keras.optimizers.Adam(args.lr)
checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer, generator=generator)
checkpoint_dir = os.path.join(save_dir, 'training_checkpoints')
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
logger = Logger(save_dir=save_dir)
for epoch in range(args.epochs):
logger.set_time_stamp(1)
noise = tf.random.normal([args.batch_size, args.noise_dim], mean=args.noise_mean, stddev=args.noise_std)
(gen_loss, gen_reg), gen_signed_loss= train_step(generator= generator,
cnn= cnn,
generator_optimizer= generator_optimizer,
loss_function= loss_function,
noise= noise,
l= args.reg_coeff)
val_loss, val_mean_pred, val_stddev = val_pred_loss(generator = generator,
cnn = cnn,
loss_function = tf.keras.losses.MeanAbsoluteError(),
wanted_output = 0.5928,
noise_dim = 100,
test_size = 1000,
noise_mean = 0,
noise_stddev = 1.0)
with tensorboard_writer_gen_loss.as_default():
tf.summary.scalar("Loss", gen_loss, step=epoch)
with tensorboard_writer_gen_reg.as_default():
tf.summary.scalar("Loss", gen_reg, step=epoch)
with tensorboard_writer_gen_signed_loss.as_default():
tf.summary.scalar("Loss", gen_signed_loss, step=epoch)
with tensorboard_writer_val_loss.as_default():
tf.summary.scalar("Loss", val_loss, step=epoch)
if (epoch + 1) % args.ckpt_freq == 0:
checkpoint.save(file_prefix= checkpoint_prefix)
logger.set_time_stamp(2)
logger.logs['generator_loss'].append(gen_loss)
logger.logs['generator_reg'].append(gen_reg)
logger.logs['generator_signed_loss'].append(gen_signed_loss)
logger.logs['val_loss'].append(val_loss)
logger.logs['val_pred_mean'].append(val_mean_pred)
logger.logs['val_pred_stddev'].append(val_stddev)
logger.save_logs()
if logger_plots == True:
logger.generate_plots(generator= generator,
cnn= cnn,
epoch= epoch,
noise_dim= args.noise_dim,
bins_number= args.bins_number)
logger.print_status(epoch=epoch)
tf.keras.models.save_model(generator, save_dir)
logger.save_metadata(vars(args))
if __name__ == "__main__":
parser = ArgumentParser()
# Model parameters
parser.add_argument("--noise_dim", type=int, default=100)
parser.add_argument("--noise_mean", type=float, default=0.0)
parser.add_argument("--noise_std", type=float, default=1.0)
# Training parameters
parser.add_argument("--batch_size", type=int, default=256)
parser.add_argument("--epochs", type=int, default=500)
parser.add_argument("--reg_coeff", type=float, default=1.0)
parser.add_argument("--lr", type=float, default=1e-3)
# Evaluation parameters
parser.add_argument("--CNN_model_path", type=str, default="./saved_models/CNN_L128_N10000/saved-model.h5")
parser.add_argument("--bins_number", type=int, default=100)
# Save parameters
parser.add_argument("--save_dir", type=str, default="./saved_models/gan_cnn_regression")
parser.add_argument("--ckpt_freq", type=int, default=10)
args = parser.parse_args()
main(args)
| 42.179688
| 113
| 0.65438
|
056147966b1fda2532b1e086eddede0934dd7ef2
| 1,494
|
py
|
Python
|
Setting/config.py
|
jyun790430/CrawlerH
|
4329c42dfbf3eacba9711eaf1dc90106166ecdcc
|
[
"MIT"
] | null | null | null |
Setting/config.py
|
jyun790430/CrawlerH
|
4329c42dfbf3eacba9711eaf1dc90106166ecdcc
|
[
"MIT"
] | 3
|
2021-03-31T19:49:20.000Z
|
2021-12-13T20:38:02.000Z
|
Setting/config.py
|
jyun790430/CrawlerH
|
4329c42dfbf3eacba9711eaf1dc90106166ecdcc
|
[
"MIT"
] | null | null | null |
#coding:utf-8
MYSQL_CONN = {
'host' : '127.0.0.1',
'db': 'crawlh',
'user': 'root',
'passwd': 'root',
'port': 3306
}
PRONHUB_CATEGORY = [
'https://cn.pornhub.com/video/search?search=%E7%A0%B4%E8%A7%A3'
]
PRONHUB_PREMIUM_CATEGORY = [
'https://cn.pornhubpremium.com/video?c=111&hd=1&premium=1'
]
XVIDEOS_CATRGORY = [
"https://www.xvideos.com/best/2020-01",
"https://www.xvideos.com/best/2019-12"
]
VIDEO_FILE_PATH = "/Users/bryson/videoH"
### Server ID
SERVER_ID = 0
### Thread
THREAD_NUM_XVIDEOX = 1
THREAD_NUM_PRONHUB = 1
THREAD_NUM_PRONHUB_PREMIUM = 1
### Spider Xvideos
XVIDEOS_WAIT_S_NEXT_PAGE_URL = 0
XVIDEOS_WAIT_S_NEXT_VIDEO_URL = 0
### Spider Pronhub
PRONHUB_WAIT_S_NEXT_PAGE_URL = 4
PRONHUB_WAIT_S_NEXT_VIDEO_URL = 30
### Spider Pronhub Preminum
PRONHUB_PREMINUM_WAIT_S_NEXT_PAGE_URL = 4
PRONHUB_PREMINUM_WAIT_S_NEXT_VIDEO_URL = 30
### Download video, wait second to next
XVIDEOS_WAIT_S_NEXT_DOWNLOAD = 20
PRONHUB_WAIT_S_NEXT_DOWNLOAD = 90
PRONHUB_PREMINUM_WAIT_S_NEXT_DOWNLOAD = 90
DOWNLOAD_DURATION = 3600
### Cookie
XVIDEOS_COOKIE = "f1403e7e43a3759dsd8VC07olgiro3pnojtnuaayu13qsS7UkSm5QNdHkVdUC12I23K2bkXZtELu_lr7KMfMaXsPOP8HNLVsgSdZrscRC9gdGIhhSevZctyq31QiKNY-yP2XAC3_K5ofbw_rmbhawtBByNKCjsEddnfItYNB-BV-OhJLRawaeVIVh_FF5bUa-xbBBjP59agkj9Cp5mQNKfWKCxx2e_prj-YQFyFoY_CQgkdnv2OV6SF2Xdg%3D"
PRONHUB_PREMIUM_COOKIE = "v1Uo81aOoMAF0Hy-bBWsqx7eDaLS5-jHQpCp1kQF9GTIExNTg3MjgzNTM4bzJVVEVGd1o2bTl0NG9ydTFGZmdsOG9iVEVJX0ZJMTlJem1hN2Z4TA.."
| 25.322034
| 273
| 0.789826
|
a69211bb210d960690ff23acf96e009c7d18cf52
| 4,083
|
py
|
Python
|
data_structures/linked_list/deque_doubly.py
|
daltonkato/Python
|
d1b5b0bc5ceb18f5180a13930254d31159c7e648
|
[
"MIT"
] | null | null | null |
data_structures/linked_list/deque_doubly.py
|
daltonkato/Python
|
d1b5b0bc5ceb18f5180a13930254d31159c7e648
|
[
"MIT"
] | null | null | null |
data_structures/linked_list/deque_doubly.py
|
daltonkato/Python
|
d1b5b0bc5ceb18f5180a13930254d31159c7e648
|
[
"MIT"
] | null | null | null |
"""
Implementing Deque using DoublyLinkedList ...
Operations:
1. insertion in the front -> O(1)
2. insertion in the end -> O(1)
3. remove from the front -> O(1)
4. remove from the end -> O(1)
"""
class _DoublyLinkedBase:
"""A Private class (to be inherited)"""
class _Node:
__slots__ = "_prev", "_data", "_next"
def __init__(self, link_p, element, link_n):
self._prev = link_p
self._data = element
self._next = link_n
def has_next_and_prev(self):
return " Prev -> {0}, Next -> {1}".format(
self._prev is not None, self._next is not None
)
def __init__(self):
self._header = self._Node(None, None, None)
self._trailer = self._Node(None, None, None)
self._header._next = self._trailer
self._trailer._prev = self._header
self._size = 0
def __len__(self):
return self._size
def is_empty(self):
return self.__len__() == 0
def _insert(self, predecessor, e, successor):
# Create new_node by setting it's prev.link -> header
# setting it's next.link -> trailer
new_node = self._Node(predecessor, e, successor)
predecessor._next = new_node
successor._prev = new_node
self._size += 1
return self
def _delete(self, node):
predecessor = node._prev
successor = node._next
predecessor._next = successor
successor._prev = predecessor
self._size -= 1
temp = node._data
node._prev = node._next = node._data = None
del node
return temp
class LinkedDeque(_DoublyLinkedBase):
def first(self):
"""return first element
>>> d = LinkedDeque()
>>> d.add_first('A').first()
'A'
>>> d.add_first('B').first()
'B'
"""
if self.is_empty():
raise Exception("List is empty")
return self._header._next._data
def last(self):
"""return last element
>>> d = LinkedDeque()
>>> d.add_last('A').last()
'A'
>>> d.add_last('B').last()
'B'
"""
if self.is_empty():
raise Exception("List is empty")
return self._trailer._prev._data
# DEque Insert Operations (At the front, At the end)
def add_first(self, element):
"""insertion in the front
>>> LinkedDeque().add_first('AV').first()
'AV'
"""
return self._insert(self._header, element, self._header._next)
def add_last(self, element):
"""insertion in the end
>>> LinkedDeque().add_last('B').last()
'B'
"""
return self._insert(self._trailer._prev, element, self._trailer)
# DEqueu Remove Operations (At the front, At the end)
def remove_first(self):
"""removal from the front
>>> d = LinkedDeque()
>>> d.is_empty()
True
>>> d.remove_first()
Traceback (most recent call last):
...
IndexError: remove_first from empty list
>>> d.add_first('A') # doctest: +ELLIPSIS
<data_structures.linked_list.deque_doubly.LinkedDeque object at ...
>>> d.remove_first()
'A'
>>> d.is_empty()
True
"""
if self.is_empty():
raise IndexError("remove_first from empty list")
return self._delete(self._header._next)
def remove_last(self):
"""removal in the end
>>> d = LinkedDeque()
>>> d.is_empty()
True
>>> d.remove_last()
Traceback (most recent call last):
...
IndexError: remove_first from empty list
>>> d.add_first('A') # doctest: +ELLIPSIS
<data_structures.linked_list.deque_doubly.LinkedDeque object at ...
>>> d.remove_last()
'A'
>>> d.is_empty()
True
"""
if self.is_empty():
raise IndexError("remove_first from empty list")
return self._delete(self._trailer._prev)
| 28.354167
| 75
| 0.55278
|
401e81bb2bdd923f8c901b49121472e38efa1cc1
| 35,636
|
py
|
Python
|
irlmethods/general_deep_maxent.py
|
ranok92/deepirl
|
88c7e76986243cf0b988d8d7dc0eef6b58e07864
|
[
"MIT"
] | 2
|
2019-01-04T22:03:15.000Z
|
2019-04-03T00:16:11.000Z
|
irlmethods/general_deep_maxent.py
|
ranok92/deepirl
|
88c7e76986243cf0b988d8d7dc0eef6b58e07864
|
[
"MIT"
] | null | null | null |
irlmethods/general_deep_maxent.py
|
ranok92/deepirl
|
88c7e76986243cf0b988d8d7dc0eef6b58e07864
|
[
"MIT"
] | null | null | null |
"""
Implements deep maxent IRL (Wulfmeier et. all) in a general, feature-type
agnostic way.
"""
import sys
import random
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam
from tensorboardX import SummaryWriter
sys.path.insert(0, "..")
from neural_nets.base_network import BaseNN
from irlmethods.irlUtils import play_features as play
from irlmethods.irlUtils import lcr_regularizer, monotonic_regularizer
from rlmethods.rlutils import play_complete
import utils
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
class RewardNet(BaseNN):
"""Reward network"""
def __init__(self, state_dims, hidden_dims=128):
super(RewardNet, self).__init__()
self.input = nn.Linear(state_dims, hidden_dims)
self.linear1 = nn.Linear(hidden_dims, hidden_dims)
self.linear2 = nn.Linear(hidden_dims, hidden_dims)
self.head = nn.Linear(hidden_dims, 1)
def forward(self, x):
x = F.relu(self.input(x))
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = torch.tanh(self.head(x))
return x
class GeneralDeepMaxent:
"""
Implements deep maxent IRL (Wulfmeier et. al) in a state-type agnostic way.
"""
def __init__(
self,
rl,
env,
expert_trajectories,
learning_rate=1e-3,
l2_regularization=1e-5,
save_folder="./",
saving_interval=10,
):
# RL related
self.rl = rl
self.feature_extractor = self.rl.feature_extractor
# environment attributes
self.env = env
state_size = self.feature_extractor.extract_features(
env.reset()
).shape[0]
# reward net
self.reward_net = RewardNet(state_size, hidden_dims=256)
self.reward_net = self.reward_net.to(DEVICE)
self.reward_optim = Adam(
self.reward_net.parameters(),
lr=learning_rate,
weight_decay=l2_regularization,
)
# expert info
self.expert_trajectories = [
traj.to(torch.float).to(DEVICE) for traj in expert_trajectories
]
# logging and saving
self.save_path = Path(save_folder)
self.tbx_writer = SummaryWriter(
str(self.save_path / "tensorboard_logs")
)
# highjack RL method's tbx_writer
self.rl.tbx_writer = self.tbx_writer
self.data_table = utils.DataTable()
# training meta
self.training_i = 0
self.saving_interval = saving_interval
def save_models(self, filename=None):
self.rl.policy.save(str(self.save_path / "policy"), filename=filename)
self.reward_net.save(
str(self.save_path / "reward_net"), filename=filename
)
def generate_trajectories(
self, num_trajectories, max_env_steps, stochastic,
):
"""
Generate trajectories in environemnt using leanred RL policy.
:param num_trajectories: number of trajectories to generate.
:type num_trajectories: int
:param max_env_steps: max steps to take in environment (rollout length.)
:type max_env_steps: int
:return: list of features encountered in playthrough.
:rtype: list of tensors of shape (num_states x feature_length)
"""
states = []
for _ in range(num_trajectories):
generated_states = play(
self.env,
self.rl.policy,
self.feature_extractor,
max_env_steps,
stochastic,
)
states.append(generated_states)
return states
def discounted_rewards(self, rewards, gamma, account_for_terminal_state):
discounted_sum = 0
t = 0
gamma_t = 1
for t, reward in enumerate(rewards[:-1]):
discounted_sum += gamma_t * reward
gamma_t *= gamma
if account_for_terminal_state:
discounted_sum += (
(gamma / (1 - gamma)) * gamma ** (t + 1) * rewards[-1]
)
else:
discounted_sum += gamma_t * rewards[-1]
return discounted_sum
def train_episode(
self,
num_rl_episodes,
max_rl_episode_length,
num_trajectory_samples,
max_env_steps,
reset_training,
account_for_terminal_state,
gamma,
stochastic_sampling,
):
"""
perform IRL training.
:param num_rl_episodes: Number of RL iterations for this IRL iteration.
:type num_rl_episodes: int.
:param max_rl_episode_length: maximum number of environment steps to
take when doing rollouts using learned RL agent.
:type max_rl_episode_length: int
:param num_trajectory_samples: Number of trajectories to sample using
learned RL agent.
:type num_trajectory_samples: int
:param max_env_steps: maximum number of environment steps to take,
both when training RL agent and when generating rollouts.
:type max_env_steps: int
:param reset_training: Whether to reset RL training every iteration
or not.
:type reset_training: Boolean.
:param account_for_terminal_state: Whether to account for a state
being terminal or not. If true, (gamma/1-gamma)*R will be immitated
by padding the trajectory with its ending state until max_env_steps
length is reached. e.g. if max_env_steps is 5, the trajectory [s_0,
s_1, s_2] will be padded to [s_0, s_1, s_2, s_2, s_2].
:type account_for_terminal_state: Boolean.
:param gamma: The discounting factor.
:type gamma: float.
:param stochastic_sampling: Sample trajectories using stochastic
policy instead of deterministic 'best action policy'
:type stochastic_sampling: Boolean.
"""
# expert loss
expert_loss = 0
for traj in self.expert_trajectories:
expert_rewards = self.reward_net(traj)
expert_loss += self.discounted_rewards(
expert_rewards, gamma, account_for_terminal_state
)
# policy loss
trajectories = self.generate_trajectories(
num_trajectory_samples, max_env_steps, stochastic_sampling
)
policy_loss = 0
for traj in trajectories:
policy_rewards = self.reward_net(traj)
policy_loss += self.discounted_rewards(
policy_rewards, gamma, account_for_terminal_state
)
policy_loss = (
len(self.expert_trajectories) / num_trajectory_samples
) * policy_loss
# Backpropagate IRL loss
loss = policy_loss - expert_loss
self.reward_optim.zero_grad()
loss.backward()
self.reward_optim.step()
# train RL agent
if reset_training:
self.rl.reset_training()
self.rl.train(
num_rl_episodes,
max_rl_episode_length,
reward_network=self.reward_net,
)
# logging
self.tbx_writer.add_scalar(
"IRL/policy_loss", policy_loss, self.training_i
)
self.tbx_writer.add_scalar(
"IRL/expert_loss", expert_loss, self.training_i
)
self.tbx_writer.add_scalar("IRL/total_loss", loss, self.training_i)
self.data_table.add_row(
{
"IRL/policy_loss": policy_loss.item(),
"IRL/expert_loss": expert_loss.item(),
"IRL/total_loss": loss.item(),
},
self.training_i,
)
# save policy and reward network
# TODO: make a uniform dumping function for all agents.
self.save_models(filename="{}.pt".format(self.training_i))
# increment training counter
self.training_i += 1
def train(
self,
num_irl_episodes,
num_rl_episodes,
max_rl_episode_length,
num_trajectory_samples,
max_env_steps,
reset_training=False,
account_for_terminal_state=False,
gamma=0.99,
stochastic_sampling=False,
):
"""
Runs the train_episode() function for 'num_irl_episodes' times. Other
parameters are identical to the aforementioned function, with the same
description and requirements.
"""
for _ in range(num_irl_episodes):
print("IRL episode {}".format(self.training_i), end="\r")
self.train_episode(
num_rl_episodes,
max_rl_episode_length,
num_trajectory_samples,
max_env_steps,
reset_training,
account_for_terminal_state,
gamma,
stochastic_sampling,
)
class MixingDeepMaxent(GeneralDeepMaxent):
def __init__(
self,
rl,
env,
expert_trajectories,
learning_rate=0.001,
l2_regularization=1e-05,
save_folder="./",
saving_interval=25,
):
super().__init__(
rl,
env,
expert_trajectories,
learning_rate=learning_rate,
l2_regularization=l2_regularization,
save_folder=save_folder,
saving_interval=saving_interval,
)
# expert and training datasets
self.all_trajectories = random.sample(
expert_trajectories, len(expert_trajectories)
)
self.expert_label_trajectories = [
traj.to(torch.float).to(DEVICE)
for traj in self.all_trajectories[
: len(self.all_trajectories) // 2
]
]
self.expert_train_trajectories = [
traj.to(torch.float).to(DEVICE)
for traj in self.all_trajectories[
len(self.all_trajectories) // 2 :
]
]
self.pre_data_table = utils.DataTable()
# initial model save
self.save_models(filename="initial_save.pt")
def train_episode(
self,
num_rl_episodes,
max_rl_episode_length,
max_env_steps,
reset_training,
account_for_terminal_state,
gamma,
stochastic_sampling,
num_expert_samples,
num_policy_samples,
):
"""
perform IRL with mix-in of expert samples.
:param num_rl_episodes: Number of RL iterations for this IRL iteration.
:type num_rl_episodes: int.
:param max_rl_episode_length: maximum number of environment steps to
take when doing rollouts using learned RL agent.
:type max_rl_episode_length: int
:param num_trajectory_samples: Number of trajectories to sample using
learned RL agent.
:type num_trajectory_samples: int
:param max_env_steps: maximum number of environment steps to take,
both when training RL agent and when generating rollouts.
:type max_env_steps: int
:param reset_training: Whether to reset RL training every iteration
or not.
:type reset_training: Boolean.
:param account_for_terminal_state: Whether to account for a state
being terminal or not. If true, (gamma/1-gamma)*R will be immitated
by padding the trajectory with its ending state until max_env_steps
length is reached. e.g. if max_env_steps is 5, the trajectory [s_0,
s_1, s_2] will be padded to [s_0, s_1, s_2, s_2, s_2].
:type account_for_terminal_state: Boolean.
:param gamma: The discounting factor.
:type gamma: float.
:param stochastic_sampling: Sample trajectories using stochastic
policy instead of deterministic 'best action policy'
:type stochastic_sampling: Boolean.
"""
# expert loss
expert_loss = 0
expert_samples = random.sample(
self.expert_trajectories, num_expert_samples
)
for traj in expert_samples:
expert_rewards = self.reward_net(traj)
expert_loss += self.discounted_rewards(
expert_rewards, gamma, account_for_terminal_state
)
# policy loss
trajectories = self.generate_trajectories(
num_expert_samples // 2, max_env_steps, stochastic_sampling
)
# mix in expert samples.
trajectories.extend(
random.sample(self.expert_trajectories, num_policy_samples // 2)
)
policy_loss = 0
for traj in trajectories:
policy_rewards = self.reward_net(traj)
policy_loss += self.discounted_rewards(
policy_rewards, gamma, account_for_terminal_state
)
policy_loss = (num_expert_samples / num_policy_samples) * policy_loss
# Backpropagate IRL loss
loss = policy_loss - expert_loss
self.reward_optim.zero_grad()
loss.backward()
self.reward_optim.step()
# train RL agent
if reset_training:
self.rl.reset_training()
self.rl.train(
num_rl_episodes,
max_rl_episode_length,
reward_network=self.reward_net,
)
# logging
self.tbx_writer.add_scalar(
"IRL/policy_loss", policy_loss, self.training_i
)
self.tbx_writer.add_scalar(
"IRL/expert_loss", expert_loss, self.training_i
)
self.tbx_writer.add_scalar("IRL/total_loss", loss, self.training_i)
self.data_table.add_row(
{
"IRL/policy_loss": policy_loss.item(),
"IRL/expert_loss": expert_loss.item(),
"IRL/total_loss": loss.item(),
},
self.training_i,
)
# save policy and reward network
# TODO: make a uniform dumping function for all agents.
if (self.training_i + 1) % self.saving_interval == 0:
self.save_models(filename="{}.pt".format(self.training_i))
# increment training counter
self.training_i += 1
def pre_train_episode(
self, num_trajectory_samples, account_for_terminal_state, gamma,
):
"""
perform IRL pre-training by using only expert samples.
:param num_trajectory_samples: Number of trajectories to sample using
learned RL agent.
:type num_trajectory_samples: int
:param account_for_terminal_state: Whether to account for a state
being terminal or not. If true, (gamma/1-gamma)*R will be immitated
by padding the trajectory with its ending state until max_env_steps
length is reached. e.g. if max_env_steps is 5, the trajectory [s_0,
s_1, s_2] will be padded to [s_0, s_1, s_2, s_2, s_2].
:type account_for_terminal_state: Boolean.
:param gamma: The discounting factor.
:type gamma: float.
"""
# expert loss
expert_loss = 0
expert_sample = random.sample(
self.expert_label_trajectories, num_trajectory_samples
)
for traj in expert_sample:
expert_rewards = self.reward_net(traj)
expert_loss += self.discounted_rewards(
expert_rewards, gamma, account_for_terminal_state
)
# policy loss
trajectories = random.sample(
self.expert_train_trajectories, num_trajectory_samples
)
generator_loss = 0
for traj in trajectories:
policy_rewards = self.reward_net(traj)
generator_loss += self.discounted_rewards(
policy_rewards, gamma, account_for_terminal_state
)
generator_loss = (
len(self.expert_trajectories) / num_trajectory_samples
) * generator_loss
# Backpropagate IRL loss
loss = generator_loss - expert_loss
self.reward_optim.zero_grad()
loss.backward()
self.reward_optim.step()
# logging
self.tbx_writer.add_scalar(
"pre_IRL/generator_loss", generator_loss, self.training_i
)
self.tbx_writer.add_scalar(
"pre_IRL/expert_loss", expert_loss, self.training_i
)
self.tbx_writer.add_scalar("pre_IRL/total_loss", loss, self.training_i)
self.pre_data_table.add_row(
{
"pre_IRL/policy_loss": generator_loss.item(),
"pre_IRL/expert_loss": expert_loss.item(),
"pre_IRL/total_loss": loss.item(),
},
self.training_i,
)
# save policy and reward network
self.reward_net.save(
str(self.save_path / "reward_net"),
filename="pre_{}.pt".format(self.training_i),
)
# increment training counter
self.training_i += 1
def pre_train(
self,
num_pretrain_episodes,
num_trajectory_samples,
account_for_terminal_state=False,
gamma=0.99,
):
"""
Runs the train_episode() function for 'num_irl_episodes' times. Other
parameters are identical to the aforementioned function, with the same
description and requirements.
"""
for _ in range(num_pretrain_episodes):
print(
"IRL pre-training episode {}".format(self.training_i), end="\r"
)
self.pre_train_episode(
num_trajectory_samples, account_for_terminal_state, gamma
)
def train(
self,
num_irl_episodes,
num_rl_episodes,
max_rl_episode_length,
max_env_steps,
reset_training=False,
account_for_terminal_state=False,
gamma=0.99,
stochastic_sampling=False,
num_expert_samples=64,
num_policy_samples=64,
):
"""
Runs the train_episode() function for 'num_irl_episodes' times. Other
parameters are identical to the aforementioned function, with the same
description and requirements.
"""
for _ in range(num_irl_episodes):
print("IRL episode {}".format(self.training_i), end="\r")
self.train_episode(
num_rl_episodes,
max_rl_episode_length,
max_env_steps,
reset_training,
account_for_terminal_state,
gamma,
stochastic_sampling,
num_expert_samples,
num_policy_samples,
)
# final model save
self.save_models(filename="final.pt")
class GCL(MixingDeepMaxent):
def generate_trajectories(self, num_trajectories, max_env_steps, ped_id=None):
"""
Generate trajectories in environemnt using leanred RL policy.
:param num_trajectories: number of trajectories to generate.
:type num_trajectories: int
:param max_env_steps: max steps to take in environment (rollout length.)
:type max_env_steps: int
:return: list of features encountered in playthrough.
:rtype: list of tensors of shape (num_states x feature_length)
"""
buffers = []
for _ in range(num_trajectories):
generated_buffer = play_complete(
self.rl.policy,
self.env,
self.feature_extractor,
max_env_steps,
ped_id=ped_id
)
buffers.append(generated_buffer)
return buffers
def train_episode(
self,
num_rl_episodes,
max_rl_episode_length,
max_env_steps,
reset_training,
account_for_terminal_state,
gamma,
stochastic_sampling,
num_expert_samples,
num_policy_samples,
):
"""
perform IRL with mix-in of expert samples.
:param num_rl_episodes: Number of RL iterations for this IRL iteration.
:type num_rl_episodes: int.
:param max_rl_episode_length: maximum number of environment steps to
take when doing rollouts using learned RL agent.
:type max_rl_episode_length: int
:param num_trajectory_samples: Number of trajectories to sample using
learned RL agent.
:type num_trajectory_samples: int
:param max_env_steps: maximum number of environment steps to take,
both when training RL agent and when generating rollouts.
:type max_env_steps: int
:param reset_training: Whether to reset RL training every iteration
or not.
:type reset_training: Boolean.
:param account_for_terminal_state: Whether to account for a state
being terminal or not. If true, (gamma/1-gamma)*R will be immitated
by padding the trajectory with its ending state until max_env_steps
length is reached. e.g. if max_env_steps is 5, the trajectory [s_0,
s_1, s_2] will be padded to [s_0, s_1, s_2, s_2, s_2].
:type account_for_terminal_state: Boolean.
:param gamma: The discounting factor.
:type gamma: float.
:param stochastic_sampling: Sample trajectories using stochastic
policy instead of deterministic 'best action policy'
:type stochastic_sampling: Boolean.
"""
# regularizers
g_lcr = 0
g_mono = 0
# expert loss
expert_loss = 0
expert_samples = random.sample(
self.expert_trajectories, num_expert_samples
)
for traj in expert_samples:
expert_rewards = self.reward_net(traj)
# update regularizers
g_lcr += lcr_regularizer(expert_rewards)
g_mono += monotonic_regularizer(expert_rewards)
expert_loss += self.discounted_rewards(
expert_rewards, gamma, account_for_terminal_state
)
# policy loss
trajectories = self.generate_trajectories(
num_expert_samples, max_env_steps
)
rewards = []
log_pis = []
for traj in trajectories:
states = [
torch.from_numpy(tran.state).to(torch.float).to(DEVICE)
for tran in traj
]
states.append(
torch.from_numpy(traj[-1].next_state)
.to(torch.float)
.to(DEVICE)
)
states = torch.stack(states)
reward = self.reward_net(states)
#update regularizers
g_lcr += lcr_regularizer(reward)
g_mono += lcr_regularizer(reward)
reward_sum = self.discounted_rewards(reward, gamma, traj[-1].done)
rewards.append(reward_sum)
log_pi = [
torch.from_numpy(tran.action_log_prob)
.to(torch.float)
.to(DEVICE)
for tran in traj
]
log_pis.append(torch.tensor(log_pi).sum())
# log sum exp trick
exponents = torch.cat(rewards) - torch.tensor(log_pis).to(DEVICE)
max_exponent = torch.max(exponents)
log_Z = max_exponent + torch.log(
torch.exp(exponents - max_exponent).sum()
)
policy_loss = log_Z
policy_loss = (num_expert_samples) * policy_loss
# Backpropagate IRL loss
loss = policy_loss - expert_loss + g_mono + g_lcr
self.reward_optim.zero_grad()
loss.backward()
self.reward_optim.step()
# train RL agent
if reset_training:
self.rl.reset_training()
self.rl.train(
num_rl_episodes,
max_rl_episode_length,
reward_network=self.reward_net,
)
# logging
self.tbx_writer.add_scalar(
"IRL/policy_loss", policy_loss, self.training_i
)
self.tbx_writer.add_scalar(
"IRL/expert_loss", expert_loss, self.training_i
)
self.tbx_writer.add_scalar("IRL/total_loss", loss, self.training_i)
self.tbx_writer.add_scalar("IRL/log_Z", log_Z.item(), self.training_i)
self.data_table.add_row(
{
"IRL/policy_loss": policy_loss.item(),
"IRL/expert_loss": expert_loss.item(),
"IRL/total_loss": loss.item(),
"IRL/log_Z": log_Z.item(),
},
self.training_i,
)
# save policy and reward network
# TODO: make a uniform dumping function for all agents.
if (self.training_i + 1) % self.saving_interval == 0:
self.save_models(filename="{}.pt".format(self.training_i))
# increment training counter
self.training_i += 1
class PerTrajGCL(GCL):
def train_episode(
self,
num_rl_episodes,
max_rl_episode_length,
max_env_steps,
reset_training,
account_for_terminal_state,
gamma,
stochastic_sampling,
num_expert_samples,
num_policy_samples,
):
"""
perform IRL with mix-in of expert samples.
:param num_rl_episodes: Number of RL iterations for this IRL iteration.
:type num_rl_episodes: int.
:param max_rl_episode_length: maximum number of environment steps to
take when doing rollouts using learned RL agent.
:type max_rl_episode_length: int
:param num_trajectory_samples: Number of trajectories to sample using
learned RL agent.
:type num_trajectory_samples: int
:param max_env_steps: maximum number of environment steps to take,
both when training RL agent and when generating rollouts.
:type max_env_steps: int
:param reset_training: Whether to reset RL training every iteration
or not.
:type reset_training: Boolean.
:param account_for_terminal_state: Whether to account for a state
being terminal or not. If true, (gamma/1-gamma)*R will be immitated
by padding the trajectory with its ending state until max_env_steps
length is reached. e.g. if max_env_steps is 5, the trajectory [s_0,
s_1, s_2] will be padded to [s_0, s_1, s_2, s_2, s_2].
:type account_for_terminal_state: Boolean.
:param gamma: The discounting factor.
:type gamma: float.
:param stochastic_sampling: Sample trajectories using stochastic
policy instead of deterministic 'best action policy'
:type stochastic_sampling: Boolean.
"""
# regularizers
g_lcr = 0
g_mono = 0
# expert loss
expert_loss = 0
expert_samples = random.sample(
list(enumerate(self.expert_trajectories)), num_expert_samples
)
for _, traj in expert_samples:
expert_rewards = self.reward_net(traj)
expert_loss += self.discounted_rewards(
expert_rewards, gamma, account_for_terminal_state
)
# update regularizers
g_lcr += lcr_regularizer(expert_rewards)
g_mono += monotonic_regularizer(expert_rewards)
# policy loss
trajectories = []
for idx, _ in expert_samples:
trajectories.extend(
self.generate_trajectories(
num_policy_samples, max_env_steps, idx + 1
)
)
policy_loss = 0
# mix in expert samples.
expert_mixin_samples = random.sample(
self.expert_trajectories, num_policy_samples // 2
)
rewards = []
log_pis = []
for traj in trajectories:
states = [
torch.from_numpy(tran.state).to(torch.float).to(DEVICE)
for tran in traj
]
states.append(
torch.from_numpy(traj[-1].next_state)
.to(torch.float)
.to(DEVICE)
)
states = torch.stack(states)
reward = self.reward_net(states)
# update regularizers
g_lcr += lcr_regularizer(reward)
g_mono += monotonic_regularizer(reward)
reward_sum = self.discounted_rewards(reward, gamma, traj[-1].done)
rewards.append(reward_sum)
log_pi = [
torch.from_numpy(tran.action_log_prob)
.to(torch.float)
.to(DEVICE)
for tran in traj
]
log_pis.append(torch.tensor(log_pi).sum())
# log sum exp trick
exponents = torch.cat(rewards) - torch.tensor(log_pis).to(DEVICE)
max_exponent = torch.max(exponents)
log_Z = max_exponent + torch.log(
torch.exp(exponents - max_exponent).sum()
)
policy_loss += log_Z
policy_loss = (num_expert_samples) * policy_loss
# Backpropagate IRL loss
loss = policy_loss - expert_loss + g_mono + g_lcr
self.reward_optim.zero_grad()
loss.backward()
self.reward_optim.step()
# train RL agent
if reset_training:
self.rl.reset_training()
self.rl.train(
num_rl_episodes,
max_rl_episode_length,
reward_network=self.reward_net,
)
# logging
self.tbx_writer.add_scalar(
"IRL/policy_loss", policy_loss, self.training_i
)
self.tbx_writer.add_scalar(
"IRL/expert_loss", expert_loss, self.training_i
)
self.tbx_writer.add_scalar("IRL/total_loss", loss, self.training_i)
self.tbx_writer.add_scalar("IRL/log_Z", log_Z.item(), self.training_i)
self.data_table.add_row(
{
"IRL/policy_loss": policy_loss.item(),
"IRL/expert_loss": expert_loss.item(),
"IRL/total_loss": loss.item(),
"IRL/log_Z": log_Z.item(),
},
self.training_i,
)
# save policy and reward network
# TODO: make a uniform dumping function for all agents.
if (self.training_i + 1) % self.saving_interval == 0:
self.save_models(filename="{}.pt".format(self.training_i))
# increment training counter
self.training_i += 1
class ExpertOnlyMaxent:
"""
Implements expert only deep maxent, using only expert demonstrations and
no environment interaction.
"""
def __init__(
self,
state_size,
expert_trajectories,
learning_rate=1e-3,
l2_regularization=1e-5,
save_folder="./",
):
# reward net
self.reward_net = RewardNet(state_size, hidden_dims=256)
self.reward_net = self.reward_net.to(DEVICE)
self.reward_optim = Adam(
self.reward_net.parameters(),
lr=learning_rate,
weight_decay=l2_regularization,
)
# expert and training datasets
self.all_trajectories = random.sample(
expert_trajectories, len(expert_trajectories)
)
self.expert_trajectories = [
traj.to(torch.float).to(DEVICE)
for traj in self.all_trajectories[
: len(self.all_trajectories) // 2
]
]
self.training_trajectories = [
traj.to(torch.float).to(DEVICE)
for traj in self.all_trajectories[
len(self.all_trajectories) // 2 :
]
]
# logging and saving
self.save_path = Path(save_folder)
self.tbx_writer = SummaryWriter(
str(self.save_path / "tensorboard_logs")
)
self.data_table = utils.DataTable()
# training meta
self.training_i = 0
def discounted_rewards(self, rewards, gamma, account_for_terminal_state):
discounted_sum = 0
t = 0
gamma_t = 1
for t, reward in enumerate(rewards[:-1]):
discounted_sum += gamma_t * reward
gamma_t *= gamma
if account_for_terminal_state:
discounted_sum += (
(gamma / (1 - gamma)) * gamma ** (t + 1) * rewards[-1]
)
else:
discounted_sum += gamma_t * rewards[-1]
return discounted_sum
def train_episode(
self, num_trajectory_samples, account_for_terminal_state, gamma,
):
"""
perform IRL pre-training by using only expert samples.
:param num_trajectory_samples: Number of trajectories to sample using
learned RL agent.
:type num_trajectory_samples: int
:param account_for_terminal_state: Whether to account for a state
being terminal or not. If true, (gamma/1-gamma)*R will be immitated
by padding the trajectory with its ending state until max_env_steps
length is reached. e.g. if max_env_steps is 5, the trajectory [s_0,
s_1, s_2] will be padded to [s_0, s_1, s_2, s_2, s_2].
:type account_for_terminal_state: Boolean.
:param gamma: The discounting factor.
:type gamma: float.
"""
# expert loss
expert_loss = 0
expert_sample = random.sample(
self.expert_trajectories, num_trajectory_samples
)
for traj in expert_sample:
expert_rewards = self.reward_net(traj)
expert_loss += self.discounted_rewards(
expert_rewards, gamma, account_for_terminal_state
)
# policy loss
trajectories = random.sample(
self.training_trajectories, num_trajectory_samples
)
generator_loss = 0
for traj in trajectories:
policy_rewards = self.reward_net(traj)
generator_loss += self.discounted_rewards(
policy_rewards, gamma, account_for_terminal_state
)
generator_loss = (
len(self.expert_trajectories) / num_trajectory_samples
) * generator_loss
# Backpropagate IRL loss
loss = generator_loss - expert_loss
self.reward_optim.zero_grad()
loss.backward()
self.reward_optim.step()
# logging
self.tbx_writer.add_scalar(
"IRL/generator_loss", generator_loss, self.training_i
)
self.tbx_writer.add_scalar(
"IRL/expert_loss", expert_loss, self.training_i
)
self.tbx_writer.add_scalar("IRL/total_loss", loss, self.training_i)
self.data_table.add_row(
{
"IRL/policy_loss": generator_loss.item(),
"IRL/expert_loss": expert_loss.item(),
"IRL/total_loss": loss.item(),
},
self.training_i,
)
# save policy and reward network
self.reward_net.save(str(self.save_path / "reward_net"))
# increment training counter
self.training_i += 1
def train(
self,
num_episodes,
num_trajectory_samples,
account_for_terminal_state=False,
gamma=0.99,
):
"""
Runs the train_episode() function for 'num_irl_episodes' times. Other
parameters are identical to the aforementioned function, with the same
description and requirements.
"""
for _ in range(num_episodes):
print(
"IRL pre-training episode {}".format(self.training_i), end="\r"
)
self.train_episode(
num_trajectory_samples, account_for_terminal_state, gamma
)
| 31.425044
| 82
| 0.599282
|
3a19ce4041e2b28589f65f5103ddcbb885dafa55
| 5,286
|
py
|
Python
|
prim's-algo-py.py
|
kapilbindal/Python-for-beginner
|
a3e02dc3fe1b847dbda24455400b52b9408a61dd
|
[
"MIT"
] | 48
|
2018-10-05T19:29:56.000Z
|
2021-12-31T04:59:37.000Z
|
prim's-algo-py.py
|
kapilbindal/Python-for-beginner
|
a3e02dc3fe1b847dbda24455400b52b9408a61dd
|
[
"MIT"
] | 127
|
2018-10-05T19:34:28.000Z
|
2022-02-10T08:50:16.000Z
|
prim's-algo-py.py
|
kapilbindal/Python-for-beginner
|
a3e02dc3fe1b847dbda24455400b52b9408a61dd
|
[
"MIT"
] | 168
|
2018-10-05T19:30:01.000Z
|
2022-01-10T11:28:21.000Z
|
class Graph:
def __init__(self):
# dictionary containing keys that map to the corresponding vertex object
self.vertices = {}
def add_vertex(self, key):
"""Add a vertex with the given key to the graph."""
vertex = Vertex(key)
self.vertices[key] = vertex
def get_vertex(self, key):
"""Return vertex object with the corresponding key."""
return self.vertices[key]
def __contains__(self, key):
return key in self.vertices
def add_edge(self, src_key, dest_key, weight=1):
"""Add edge from src_key to dest_key with given weight."""
self.vertices[src_key].add_neighbour(self.vertices[dest_key], weight)
def does_edge_exist(self, src_key, dest_key):
"""Return True if there is an edge from src_key to dest_key."""
return self.vertices[src_key].does_it_point_to(self.vertices[dest_key])
def display(self):
print('Vertices: ', end='')
for v in self:
print(v.get_key(), end=' ')
print()
print('Edges: ')
for v in self:
for dest in v.get_neighbours():
w = v.get_weight(dest)
print('(src={}, dest={}, weight={}) '.format(v.get_key(),
dest.get_key(), w))
def __len__(self):
return len(self.vertices)
def __iter__(self):
return iter(self.vertices.values())
class Vertex:
def __init__(self, key):
self.key = key
self.points_to = {}
def get_key(self):
"""Return key corresponding to this vertex object."""
return self.key
def add_neighbour(self, dest, weight):
"""Make this vertex point to dest with given edge weight."""
self.points_to[dest] = weight
def get_neighbours(self):
"""Return all vertices pointed to by this vertex."""
return self.points_to.keys()
def get_weight(self, dest):
"""Get weight of edge from this vertex to dest."""
return self.points_to[dest]
def does_it_point_to(self, dest):
"""Return True if this vertex points to dest."""
return dest in self.points_to
def mst_prim(g):
"""Return a minimum cost spanning tree of the connected graph g."""
mst = Graph() # create new Graph object to hold the MST
# if graph is empty
if not g:
return mst
nearest_neighbour = {}
smallest_distance = {}
unvisited = set(g)
u = next(iter(g)) # select any one vertex from g
mst.add_vertex(u.get_key()) # add a copy of it to the MST
unvisited.remove(u)
for n in u.get_neighbours():
if n is u:
continue
nearest_neighbour[n] = mst.get_vertex(u.get_key())
smallest_distance[n] = u.get_weight(n)
while (smallest_distance):
outside_mst = min(smallest_distance, key=smallest_distance.get)
# get the nearest neighbour inside the MST
inside_mst = nearest_neighbour[outside_mst]
mst.add_vertex(outside_mst.get_key())
mst.add_edge(outside_mst.get_key(), inside_mst.get_key(),
smallest_distance[outside_mst])
mst.add_edge(inside_mst.get_key(), outside_mst.get_key(),
smallest_distance[outside_mst])
unvisited.remove(outside_mst)
del smallest_distance[outside_mst]
del nearest_neighbour[outside_mst]
for n in outside_mst.get_neighbours():
if n in unvisited:
if n not in smallest_distance:
smallest_distance[n] = outside_mst.get_weight(n)
nearest_neighbour[n] = mst.get_vertex(outside_mst.get_key())
else:
if smallest_distance[n] > outside_mst.get_weight(n):
smallest_distance[n] = outside_mst.get_weight(n)
nearest_neighbour[n] = mst.get_vertex(outside_mst.get_key())
return mst
g = Graph()
print('Undirected Graph')
print('Menu')
print('add vertex <key>')
print('add edge <src> <dest> <weight>')
print('mst')
print('display')
print('quit')
while True:
do = input('What would you like to do? ').split()
operation = do[0]
if operation == 'add':
suboperation = do[1]
if suboperation == 'vertex':
key = int(do[2])
if key not in g:
g.add_vertex(key)
else:
print('Vertex already exists.')
elif suboperation == 'edge':
src = int(do[2])
dest = int(do[3])
weight = int(do[4])
if src not in g:
print('Vertex {} does not exist.'.format(src))
elif dest not in g:
print('Vertex {} does not exist.'.format(dest))
else:
if not g.does_edge_exist(src, dest):
g.add_edge(src, dest, weight)
g.add_edge(dest, src, weight)
else:
print('Edge already exists.')
elif operation == 'mst':
mst = mst_prim(g)
print('Minimum Spanning Tree:')
mst.display()
print()
elif operation == 'display':
g.display()
print()
elif operation == 'quit':
break
| 31.652695
| 84
| 0.573023
|
f88cc96096c4c14e60ec5770bc98e16ecf169790
| 984
|
py
|
Python
|
osm_policy_module/core/exceptions.py
|
TCSOSM-20/POL
|
8ba99ff372d2d8ef13b2c6865b16c0869fa04069
|
[
"Apache-2.0"
] | null | null | null |
osm_policy_module/core/exceptions.py
|
TCSOSM-20/POL
|
8ba99ff372d2d8ef13b2c6865b16c0869fa04069
|
[
"Apache-2.0"
] | null | null | null |
osm_policy_module/core/exceptions.py
|
TCSOSM-20/POL
|
8ba99ff372d2d8ef13b2c6865b16c0869fa04069
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2018 Whitestack, LLC
# *************************************************************
# This file is part of OSM Monitoring module
# All Rights Reserved to Whitestack, LLC
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# For those usages not covered by the Apache License, Version 2.0 please
# contact: bdiaz@whitestack.com or glavado@whitestack.com
##
class ManagementVduNotFound(Exception):
pass
class VdurNotFound(Exception):
pass
| 30.75
| 75
| 0.703252
|
25f93b177fcb561caf0d421dc8b8848cc5f6e864
| 1,372
|
py
|
Python
|
tests/test_CKANTransform.py
|
bcgov/bcdc_2_bcdc
|
37f9641ee434fd9a40baf2d652a28a8bede642ce
|
[
"Apache-2.0"
] | 1
|
2020-01-06T19:28:16.000Z
|
2020-01-06T19:28:16.000Z
|
tests/test_CKANTransform.py
|
bcgov/bcdc_2_bcdc
|
37f9641ee434fd9a40baf2d652a28a8bede642ce
|
[
"Apache-2.0"
] | 12
|
2020-04-17T19:25:52.000Z
|
2021-11-01T21:22:47.000Z
|
tests/test_CKANTransform.py
|
bcgov/bcdc_2_bcdc
|
37f9641ee434fd9a40baf2d652a28a8bede642ce
|
[
"Apache-2.0"
] | 1
|
2020-04-14T15:50:42.000Z
|
2020-04-14T15:50:42.000Z
|
"""used to verify methods in ckanCompare
"""
import logging
import pytest
import constants
import CKANTransform
import pprint
# pylint: disable=logging-format-interpolation
LOGGER = logging.getLogger(__name__)
PP = pprint.PrettyPrinter(indent=4)
def test_validateType(Transform):
validTypes = constants.VALID_TRANSFORM_TYPES
# assert valid types do not raise exception
for validType in validTypes:
CKANTransform.validateType(validType)
inValidTypes = [i + '_INVALID' for i in validTypes]
LOGGER.debug(f"invalidTypes: {inValidTypes}")
# assert invalid types raise exception
with pytest.raises(CKANTransform.InValidTransformationTypeError):
for inValidType in inValidTypes:
CKANTransform.validateType(inValidType)
def test_transformConfig(TransformationConfig):
userType = constants.TRANSFORM_TYPE_USERS
orgType = constants.TRANSFORM_TYPE_ORGS
config_user_auto = TransformationConfig.getAutoPopulatedProperties(userType)
config_user_user = TransformationConfig.getUserPopulatedProperties(userType)
spacer = "_" * 100
LOGGER.debug(f"{spacer}")
LOGGER.debug(f"config_auto: {config_user_auto}")
LOGGER.debug(f"config_user: {config_user_user}")
config_org_user = TransformationConfig.getUserPopulatedProperties(orgType)
LOGGER.debug(f"config_org_user: {config_org_user}")
| 31.906977
| 80
| 0.773324
|
a5eef10691281dc59ce4a214904965089c20649d
| 1,100
|
py
|
Python
|
migrations/versions/ef22dd6df3a8_.py
|
gzxultra/FlaskLoginManagement
|
0a36a36fa1322e91a35735280bb119c94d016592
|
[
"MIT"
] | null | null | null |
migrations/versions/ef22dd6df3a8_.py
|
gzxultra/FlaskLoginManagement
|
0a36a36fa1322e91a35735280bb119c94d016592
|
[
"MIT"
] | null | null | null |
migrations/versions/ef22dd6df3a8_.py
|
gzxultra/FlaskLoginManagement
|
0a36a36fa1322e91a35735280bb119c94d016592
|
[
"MIT"
] | null | null | null |
"""empty message
Revision ID: ef22dd6df3a8
Revises: 66814cba0685
Create Date: 2016-03-24 22:02:52.556712
"""
# revision identifiers, used by Alembic.
revision = 'ef22dd6df3a8'
down_revision = '66814cba0685'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('about_me', sa.Text(), nullable=True))
op.add_column('users', sa.Column('last_seen', sa.DateTime(), nullable=True))
op.add_column('users', sa.Column('location', sa.String(length=64), nullable=True))
op.add_column('users', sa.Column('member_since', sa.DateTime(), nullable=True))
op.add_column('users', sa.Column('name', sa.String(length=64), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'name')
op.drop_column('users', 'member_since')
op.drop_column('users', 'location')
op.drop_column('users', 'last_seen')
op.drop_column('users', 'about_me')
### end Alembic commands ###
| 31.428571
| 86
| 0.687273
|
d8b2438a78ed241dfad95623bb95bb3e170aa12e
| 8,247
|
py
|
Python
|
greent/services/omnicorp_postgres.py
|
TranslatorIIPrototypes/robo-commons
|
a915d80b70f7e68a70f6a5f7ff6e732d2e02db06
|
[
"MIT"
] | 1
|
2020-02-05T20:00:52.000Z
|
2020-02-05T20:00:52.000Z
|
greent/services/omnicorp_postgres.py
|
TranslatorIIPrototypes/robo-commons
|
a915d80b70f7e68a70f6a5f7ff6e732d2e02db06
|
[
"MIT"
] | 12
|
2020-05-07T16:40:15.000Z
|
2020-06-16T13:23:13.000Z
|
greent/services/omnicorp_postgres.py
|
TranslatorIIPrototypes/robo-commons
|
a915d80b70f7e68a70f6a5f7ff6e732d2e02db06
|
[
"MIT"
] | 6
|
2018-02-23T20:25:50.000Z
|
2019-11-21T14:55:52.000Z
|
from string import Template
import json
import os
import logging
from greent.service import Service
from greent.triplestore import TripleStore
from greent.util import LoggingUtil
from greent.util import Text
from greent import node_types
from pprint import pprint
import datetime
from collections import defaultdict
import time
import psycopg2
logger = LoggingUtil.init_logging(__name__, logging.INFO)
"""OMNICORP IS NO LONGER CALLED FROM INTERFACES"""
class OmniCorp(Service):
def __init__(self, context): #triplestore):
super(OmniCorp, self).__init__("omnicorp", context)
db = context.config['OMNICORP_DB']
user = context.config['OMNICORP_USER']
port = context.config['OMNICORP_PORT']
host = context.config['OMNICORP_HOST']
password = context.config['OMNICORP_PASSWORD']
self.prefixes = set(['UBERON', 'BSPO', 'PATO', 'GO', 'MONDO', 'HP', 'ENVO', 'OBI', 'CL', 'SO', 'CHEBI', 'HGNC', 'MESH'])
#self.conn = psycopg2.connect(dbname=db, user=user, host=host, port=port, password=password)
self.conn = None
self.nsingle = 0
self.total_single_call = datetime.timedelta()
self.npair = 0
self.total_pair_call = datetime.timedelta()
def __del__(self):
self.conn.close()
def get_omni_identifier(self,node):
#Let's start with just the 'best' identifier
identifier = node.id
prefix = Text.get_curie(node.id)
if prefix not in self.prefixes:
#logger.debug("What kinda tomfoolery is this?")
#logger.debug(f"{node.id} {node.type}")
#logger.debug(f"{node.synonyms}")
return None
return identifier
def get_shared_pmids (self, node1, node2):
id1 = self.get_omni_identifier(node1)
id2 = self.get_omni_identifier(node2)
if id1 is None or id2 is None:
return []
done = False
ntries = 0
pmids = self.postgres_get_shared_pmids( id1,id2 )
if pmids is None:
logger.error("OmniCorp gave up")
return None
return [ f'PMID:{p}' for p in pmids ]
def postgres_get_shared_pmids(self, id1, id2):
prefix1 = Text.get_curie(id1)
prefix2 = Text.get_curie(id2)
start = datetime.datetime.now()
cur = self.conn.cursor()
statement = f'''SELECT a.pubmedid
FROM omnicorp.{prefix1} a
JOIN omnicorp.{prefix2} b ON a.pubmedid = b.pubmedid
WHERE a.curie = %s
AND b.curie = %s '''
cur.execute(statement, (id1, id2))
pmids = [ x[0] for x in cur.fetchall() ]
cur.close()
end = datetime.datetime.now()
self.total_pair_call += (end-start)
logger.debug(f'Found {len(pmids)} shared ids in {end-start}. Total {self.total_pair_call}')
self.npair += 1
if self.npair % 100 == 0:
logger.info(f'NCalls: {self.npair} Total time: {self.total_pair_call} Avg Time: {self.total_pair_call/self.npair}')
return pmids
def count_pmids(self, node):
identifier = self.get_omni_identifier(node)
if identifier is None:
return 0
prefix = Text.get_curie(identifier)
start = datetime.datetime.now()
cur = self.conn.cursor()
statement = f'SELECT COUNT(pubmedid) from omnicorp.{prefix} WHERE curie = %s'
cur.execute(statement, (identifier,))
n = cur.fetchall()[0][0]
cur.close()
end = datetime.datetime.now()
self.total_single_call += (end-start)
logger.debug(f'Found {n} pmids in {end-start}. Total {self.total_single_call}')
self.nsingle += 1
if self.nsingle % 100 == 0:
logger.info(f'NCalls: {self.nsingle} Total time: {self.total_single_call} Avg Time: {self.total_single_call/self.nsingle}')
return n
'''
def query_omnicorp (self, query):
""" Execute and return the result of a SPARQL query. """
return self.triplestore.execute_query (query)
def sparql_get_all_shared_pmids (self, identifier_list):
text = """
PREFIX dct: <http://purl.org/dc/terms/>
SELECT DISTINCT ?pubmed ?term1 ?term2
WHERE {
hint:Query hint:analytic true .
VALUES ?term1 $id_list_a
VALUES ?term2 $id_list_b
?pubmed dct:references ?term1 .
?pubmed dct:references ?term2 .
FILTER(STR(?term1) < STR(?term2))
}
"""
start = datetime.datetime.now()
results = self.triplestore.query_template(
inputs = { 'id_list_a': identifier_list, 'id_list_b': identifier_list },
outputs = [ 'term1','term2','pubmed' ],
template_text = text,
post = True
)
end = datetime.datetime.now()
logger.debug(f'Completed in: {end-start}')
return results
def sparql_count_pmids (self, identifier):
text = """
PREFIX dct: <http://purl.org/dc/terms/>
SELECT (COUNT(DISTINCT ?pubmed) as ?count)
WHERE {
hint:Query hint:analytic true .
?pubmed dct:references <$identifier> .
}
"""
results = self.triplestore.query_template(
inputs = { 'identifier': identifier },
outputs = [ 'count' ],
template_text = text,
)
return results
def sparql_get_shared_pmids (self, identifier_a, identifier_b):
text = """
PREFIX dct: <http://purl.org/dc/terms/>
SELECT DISTINCT ?pubmed
WHERE {
hint:Query hint:analytic true .
?pubmed dct:references <$id_a> .
?pubmed dct:references <$id_b> .
}
"""
results = self.triplestore.query_template(
inputs = { 'id_a': identifier_a, 'id_b': identifier_b },
outputs = [ 'pubmed' ],
template_text = text,
post = True
)
return results
def get_all_shared_pmids (self, nodes):
oiddict = {self.get_omni_identifier(n):n for n in nodes}
oids = [f'<{x}>' for x in filter(lambda n: n is not None, oiddict.keys())]
oidsstring = '{ ' + ' '.join(oids) + '}'
results = self.sparql_get_all_shared_pmids(oidsstring)
pubmeds = defaultdict( list )
for r in results:
k = (oiddict[r['term1']],oiddict[r['term2']])
pubmeds[k].append(f"PMID:{r['pubmed'].split('/')[-1]}")
for i,node_i in enumerate(nodes):
for node_j in nodes[:i]:
k_ij = (node_i, node_j)
k_ji = (node_j, node_i)
if k_ij not in pubmeds and k_ji not in pubmeds:
pubmeds[k_ij] = []
return pubmeds
def call_with_retries(self,fnc,args):
done = False
ntries = 0
maxtries = 100
rest_time = 10 #seconds
start = datetime.datetime.now()
while not done and ntries < maxtries:
try:
result = fnc(*args)
done = True
except:
logger.warn("OmniCorp error, retrying")
time.sleep(rest_time)
ntries += 1
if not done:
return None
else:
end = datetime.datetime.now()
logger.debug(f'Total call ntries: {ntries}, time: {end-start}')
return result
def count_pmids(self, node):
identifier = self.get_omni_identifier(node)
if identifier is None:
return 0
res = self.call_with_retries(self.sparql_count_pmids, [identifier])
if res is None:
return None
else:
logger.debug(f"Returned {res[0]['count']}")
return res[0]['count']
def get_shared_pmids (self, node1, node2):
id1 = self.get_omni_identifier(node1)
id2 = self.get_omni_identifier(node2)
if id1 is None or id2 is None:
return []
done = False
ntries = 0
pmids = self.call_with_retries(self.sparql_get_shared_pmids, [id1,id2])
if pmids is None:
logger.error("OmniCorp gave up")
return None
return [ p['pubmed'] for p in pmids ]
'''
| 35.701299
| 136
| 0.580575
|
3259bfaa09c2bc68028764c87a63e0adbf6a4a77
| 31,629
|
py
|
Python
|
eit/fem.py
|
ivo53/EIT-MPhys-Project
|
e907fdb99c2fb0bb2a33686385cfd98f89ec8cb2
|
[
"BSD-3-Clause"
] | 2
|
2020-09-07T01:24:57.000Z
|
2020-12-12T16:41:59.000Z
|
eit/fem.py
|
ivo53/EIT-MPhys-Project
|
e907fdb99c2fb0bb2a33686385cfd98f89ec8cb2
|
[
"BSD-3-Clause"
] | 1
|
2021-09-04T04:21:19.000Z
|
2021-09-11T11:42:00.000Z
|
eit/fem.py
|
ivo53/EIT-MPhys-Project
|
e907fdb99c2fb0bb2a33686385cfd98f89ec8cb2
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# pylint: disable=invalid-name, no-member, too-many-locals
# pylint: disable=too-many-instance-attributes
""" 2D/3D FEM routines """
# Copyright (c) Benyuan Liu. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
#from __future__ import division, absolute_import, print_function
from collections import namedtuple
import numpy as np
import matplotlib.pyplot as plt
import cupy as cp
from cupyx.scipy import sparse as sp
from scipy.optimize import minimize
from .utils import eit_scan_lines
from time import time
cp.cuda.Device(0).use()
class Forward(object):
""" FEM forward computing code """
#@profile
def __init__(self, mesh, el_pos, ne, z=None):
"""
A good FEM forward solver should only depend on
mesh structure and the position of electrodes
NOTE: the nodes are assumed continuous numbered.
Parameters
----------
mesh : dict
mesh structure
el_pos : NDArray
numbering of electrodes positions
"""
# save all arrays on GPU using CuPy
self.pts = cp.array(mesh['node'])
self.tri = cp.array(mesh['element'])
self.tri_perm = cp.array(mesh['perm'])
self.el_pos = cp.array(el_pos)
# reference electrodes [ref node should not be on electrodes]
ref_el = 0
while ref_el in self.el_pos:
ref_el = ref_el + 1
self.ref = ref_el
# infer dimensions from mesh
self.n_pts, self.n_dim = self.pts.shape
self.n_tri, self.n_vertices = self.tri.shape
self.ne = ne
self.n_per_el = int(self.el_pos.size / self.ne)
self.twoFromElectrode, self.nodeisElectrode, self.isValid = self.findTrianglesOnElectrodes()
if z is None:
self.z = 50. * cp.ones(self.ne)
else:
self.z = z
#@profile
def solve_eit(self, volt_mat=None, new_ind=None, ex_mat=None, step=1, perm=None, parser=None):
"""
EIT simulation, generate perturbation matrix and forward v
Parameters
----------
ex_mat : NDArray
numLines x n_el array, stimulation matrix
step : int
the configuration of measurement electrodes (default: adjacent)
perm : NDArray
Mx1 array, initial x0. must be the same size with self.tri_perm
parser : str
if parser is 'fmmu', within each stimulation pattern, diff_pairs
or boundary measurements are re-indexed and started
from the positive stimulus electrode
if parser is 'std', subtract_row start from the 1st electrode
Returns
-------
jac : NDArray
number of measures x n_E complex array, the Jacobian
v : NDArray
number of measures x 1 array, simulated boundary measures
b_matrix : NDArray
back-projection mappings (smear matrix)
"""
# deduce number of electrodes from number of electrodes and array of positions
if ex_mat is None:
ex_mat = cp.array(eit_scan_lines(self.ne, int(self.ne/2)))
else:
ex_mat = cp.array(ex_mat)
# initialize/extract the step (takes both integer for constant and array for variable steps)
if type(step) is int:
step_arr = step * cp.ones(ex_mat.shape[0])
elif type(step) is np.ndarray:
if np.shape(step)[0] >= ex_mat.shape[0]:
step_arr = cp.array(step)
else:
raise ValueError('Array is not long enough!')
elif (volt_mat is not None) and (new_ind is not None):
pass
else:
raise TypeError('Type of step is not int or ndarray!')
# initialize permitivity
if perm is None:
perm0 = self.tri_perm
elif cp.isscalar(perm):
perm0 = cp.ones(self.n_tri, dtype=float)
else:
assert perm.shape == (self.n_tri,)
perm0 = cp.array(perm)
# calculate stiffness matrix on each element
ke = self.calculate_ke()
#t_2 = pt()
# calculate global stiffness matrix
Ag = self.assemble_sparse(ke, self.tri, perm0, self.n_pts, ref=self.ref)
#t_1 = pt()
# calculate its inverse
r_matrix = cp.linalg.inv(Ag)
# extract values for electrodes
r_el = r_matrix[self.n_pts:]
#r_el = r_el.reshape(self.ne, self.n_per_el, r_el.shape[1])
#r_el = cp.mean(r_el, axis=1)
#t0 = pt()
'''
b = np.zeros((self.n_pts, ex_mat.shape[0]))
r_el_np = cp.asnumpy(r_el)
ex_mat_np = cp.asnumpy(ex_mat)
i = 0
for ex_line in ex_mat_np:
b_el = self.optimise_currents(r_el_np, ex_line)
b[ex_mat_np[i, 0] * self.n_per_el : (ex_mat_np[i, 0] + 1) * self.n_per_el, i] = b_el[:self.n_per_el]
b[ex_mat_np[i, 1] * self.n_per_el : (ex_mat_np[i, 1] + 1) * self.n_per_el, i] = b_el[self.n_per_el:]
i += 1
b = cp.array(b)'''
# set boundary conditions
b = self._natural_boundary(ex_mat)
# calculate f matrix (potential at nodes)
f = cp.einsum('ij, jh', r_matrix, b)
#t1 = pt()
# calculate Jacobian
jac_i = self.findJac(ex_mat, perm0, ke, f, r_el)
#t2 = pt()
f_el = f[:, self.n_pts:]
#print(f_el[5])
#f_el = f_el.reshape(f_el.shape[0], self.ne, self.n_per_el)
#f_el = np.mean(f_el, axis=2)
# generate all voltage measurements with given step
if volt_mat is None or new_ind is None:
volt_mat, new_ind = self.voltMeter(ex_mat, step_arr)
elif len(volt_mat) == len(new_ind):
volt_mat = cp.array(volt_mat, dtype='i1')
new_ind = cp.array(new_ind, dtype='i1')
else:
raise ValueError('volt_mat and new_ind must be arrays (or lists/tuples) shape (N, 2) and (N) respectively. N is number of measurements.')
#t3 = pt()
# find differences in voltage and in Jacobian at the measuring electrodes, since absolute values are not needed for conductivity reconstruction
V = self.substractRow(f_el, volt_mat, new_ind)
J = self.substractRow(jac_i, volt_mat, new_ind)
#t4 = pt()
# find smearing matrix from f (needed for backprojection)
B = self.smearing(f, f_el, volt_mat, new_ind)
#t5 = pt()
# optional: check time performance
'''
print('kg takes:', t_1-t_2)
print('inv takes:', t0-t_1)
print('dot product takes:', t1-t0)
print('Solve takes:', t2-t1)
print('voltmeter takes:', t3-t2)
print('subtract_row takes:', t4-t3)
print('Smearing takes:', t5-t4)
'''
#print("New FEM voltages:\n", f)
# return result as a tuple
pde_result = namedtuple("pde_result", ['jac', 'v', 'b_matrix'])
p = pde_result(jac=cp.asnumpy(J),
v=cp.asnumpy(V),
b_matrix=cp.asnumpy(B))
#print(J.shape)
return p
def _natural_boundary(self, ex_mat):
"""
Notes
-----
Generate the Neumann boundary condition. In utils.py,
you should note that ex_line is local indexed from 0...15,
which need to be converted to global node number using el_pos.
"""
drv_a_global_arr = ex_mat[:, 0]
drv_b_global_arr = ex_mat[:, 1]
row = cp.arange(ex_mat.shape[0])
b = cp.zeros((self.ne, ex_mat.shape[0]))
b[drv_a_global_arr, row] = 1e4
b[drv_b_global_arr, row] = -1e4
b_final = cp.zeros(( self.n_pts + self.ne, ex_mat.shape[0]))
b_final[self.n_pts:, :] = b[:]
return b_final
def findJac(self, ex_mat, perm0, ke, f, r_el):
'''
Calculates Jacobian for all measurements
takes:
ex_mat - array shape (n_source/sinks, 2) - excitation matrix with source and sink for each measurement
perm0 - array shape (n_triangles) - initial permittivity on each triangle
ke - array shape (n_triangles, n_vertices, n_vertices) - stiffness on each element matrix
f - array shape (n_nodes) - voltage on each node of mesh
r_el - inverse of global stiffness matrix on electrodes
returns:
jac - array shape ( n_measurements, n_electrodes, n_triangles) - Jacobian for all measurements
'''
# initialise array for Jacobian
jac = cp.zeros((ex_mat.shape[0], self.ne, self.n_tri), dtype=perm0.dtype)
# calculating jacobian
jac[:] = cp.einsum('ijk, jkp, ljp->lij', r_el[:, self.tri], ke, f[:, self.tri], optimize='optimal')
#jac = cp.zeros((ex_mat.shape[0], self.ne, self.n_tri), dtype=perm0.dtype)
#jac_all_el_pts = jac_all_el_pts.reshape((ex_mat.shape[0], self.ne, self.n_per_el, self.n_tri))
#jac[:] = (1. / self.n_per_el) * np.sum(jac_all_el_pts, axis=2)
return jac
def substractRow(self, f_el, volt_mat, new_ind):
'''
Finds values of f_el for all pairs of measuring electrodes and finds the difference of f_el between its value at the two electrodes.
takes:
f_el - 1d array
volt_mat - array shape (n_measurements, 2) - gives all volt. measurements
new_ind - array shape (n_measurements) - helps with finding the relevant source-sink pair for each volt. measurement
returns:
v_diff - array shape (n_measurements) - difference in voltages or whatever f_el is
'''
# get first and second measuring electrode
i = volt_mat[:, 0].astype(int)
j = volt_mat[:, 1].astype(int)
# perform subtraction
v_diff = f_el[new_ind, i] - f_el[new_ind, j]
#print(v_diff)
return v_diff
def smearing(self, f, f_el, volt_mat, new_ind):
'''
Produces B matrix by comparing voltages
takes:
f - array shape (n_nodes)
f_el - array shape (n_electrodes)
volt_mat - array shape (n_measurements, 2)
new_ind - array shape (n_measurements)
returns:
b-matrix - array shape (n_measurements, n_nodes)
'''
i = cp.arange(len(volt_mat))
f_volt0 = f_el[new_ind, volt_mat[:, 0].astype(int)]
f_volt1 = f_el[new_ind, volt_mat[:, 1].astype(int)]
min_fel = cp.minimum(f_volt0, f_volt1)
max_fel = cp.maximum(f_volt0, f_volt1)
b_matrix = cp.empty((len(volt_mat), self.n_pts+self.ne))
b_matrix[:] = (min_fel[:, None] < f[new_ind]) & (f[new_ind] <= max_fel[:, None])
return b_matrix
#@profile
def voltMeter(self, ex_mat, step_arr=None, parser=None):
'''
Returns all measurements with this step_arr and ex_mat
takes:
ex_mat - array shape (n_source/sinks, 2) - excitation matrix with source and sink for each measurement
step_arr - array shape (n_source/sinks) - step between measuring electrodes for each source/sink pair
parser - string
returns:
pair_mat - array shape (n_measurements, 2) - matrix with all possible meas. electrode combinations
ind_new - array shape (n_measurements) - helper array
'''
if step_arr is None:
step_arr = 1 + cp.arange((ex_mat.shape[0])) % (self.ne)
drv_a = ex_mat[:, 0]
drv_b = ex_mat[:, 1]
i0 = drv_a if parser == 'fmmu' else 0
A = cp.arange(i0, i0 + self.ne)
#M = cp.dot(cp.ones(ex_mat.shape[0])[:,None], A[None, :]) % self.ne
#N = (M + step_arr[:, None]) % self.ne
M = cp.arange(ex_mat.shape[0] * self.ne) % self.ne
N = (M.reshape((ex_mat.shape[0], self.ne)) + step_arr[:, None]) % self.ne
pair_mat = cp.stack((N.ravel(), M), axis=-1)
#ind_new = cp.arange(pair_mat.shape[0]) % ex_mat.shape[0]
ind_new = cp.arange(ex_mat.shape[0])
ind_new = cp.tile(ind_new, (self.ne, 1)).T.ravel()
#print('before indtest', ind_new[20:70])
nz2 = cp.where(pair_mat == drv_a[ind_new, None])
nz3 = cp.where(pair_mat == drv_b[ind_new, None])
#print(ind_new)
ind_ = cp.arange(pair_mat.shape[0])
ind_fin = cp.sum(ind_[:, None] == nz2[0][None], axis=1)
ind_fin2 = cp.sum(ind_[:, None] == nz3[0][None], axis=1)
ind_test = cp.less((ind_fin + ind_fin2), 0.5 * cp.ones(len(ind_fin)))
pair_mat = pair_mat[ind_test, :]
ind_new = ind_new[ind_test]
sort_index = cp.argsort(ind_new)
#print('after indtest', ind_new[20:70])
#meas = cp.concatenate((ex_mat[ind_new], pair_mat), axis=1)
#print(meas[20:70])
return pair_mat, ind_new
def optimise_currents(self, r_el, ex_line):
def charge_cons_src(b):
return np.sum(b[0:self.n_per_el]) - 1
def charge_cons_snk(b):
return np.sum(b[self.n_per_el:]) + 1
#the optimise function needs to be defined in the class so we can access the r_matrix
#redefine function to take only the values of currents for sources/sinks
def voltage_opt(b):
b_try = np.zeros(self.n_pts)
#set values of the currents based on the input to the function
b_try [ int(ex_line[0] * self.n_per_el) : int(( ex_line[0] + 1 ) * self.n_per_el) ] = b [0:int(self.n_per_el)]
b_try [ ex_line[1] * self.n_per_el : ( ex_line[1] + 1 ) * self.n_per_el ] = b [self.n_per_el:]
f_el = np.dot(r_el[0:self.ne * self.n_per_el], b_try)
f_el = f_el.reshape(self.ne, self.n_per_el)
mean_vector = np.mean(f_el, axis=1).reshape(self.ne, 1)
f_el = np.sum(np.abs(f_el - mean_vector))
return f_el
bound_src = [0., 1.]
bound_snk = [-1., 0.]
bounds = np.empty((self.n_per_el * 2, 2))
bounds[0:self.n_per_el] = bound_src
bounds[self.n_per_el:] = bound_snk
b0 = np.empty(self.n_per_el * 2)
b0[0:self.n_per_el] = 1. / self.n_per_el
b0[self.n_per_el:] = -1. / self.n_per_el
constraint1 = {'type' : 'eq', 'fun' : charge_cons_src}
constraint2 = {'type' : 'eq', 'fun' : charge_cons_snk}
constraint = [constraint1, constraint2]
solution = minimize(voltage_opt, b0, method = 'SLSQP', bounds = bounds, constraints = constraint)
#print(solution)
return solution.x
#@profile
def assemble_sparse(self, ke, tri, perm, n_pts, ref=0):
'''
function that assembles the global stiffness matrix from all element stiffness matrices
takes:
ke - stiffness on each element matrix - array shape (n_triangles, n_vertices, n_vertices)
tri - array with all indices (in pts array) of triangle vertices - shape (num_triangles, 3)
perm - array with permittivity in each element - array shape (num_triangles,)
n_pts - number of nodes - int
ref - electrode on which reference value is placed
returns:
K - global stiffness matrix - (n_pts, n_pts)
'''
n_tri, n_vertices = tri.shape
row = cp.tile(tri, (1, n_vertices))
i = cp.array([0, 3, 6, 1, 4, 7, 2, 5, 8])
row = row[:, i].ravel()
col = cp.tile(tri, (n_vertices)).reshape((tri.shape[0] * tri.shape[1] * n_vertices))
admittanceMatrixC2 = self.admittanceMatrixC2()
data = cp.multiply(ke[:], perm[:, None, None])
indexElectrode = cp.sort(self.tri[self.twoFromElectrode][self.isValid], axis=1)[:, 0] // self.n_per_el
data[self.twoFromElectrode][self.isValid] = (data[self.twoFromElectrode][self.isValid] + ((1/self.z[indexElectrode]))[:, None, None] * admittanceMatrixC2)
data = data.ravel()
ind = cp.argsort(row)
row = row[ind]
col = col[ind]
data = data[ind]
unique, counts = cp.unique(row, return_counts=True)
index_pointer = cp.zeros(n_pts + 1)
sum_count = cp.cumsum(counts)
index_pointer[unique[:]+1] = sum_count[:]
K = sp.csr_matrix((data, col, index_pointer), shape=(n_pts, n_pts), dtype=perm.dtype)
K = K.toarray()
A = cp.empty((self.n_pts + self.ne, self.n_pts + self.ne), dtype='f8')
if 0 <= self.ref < n_pts:
K[self.ref, :] = 0.
K[:, self.ref] = 0.
K[self.ref, self.ref] = 1.
A[:self.n_pts, :self.n_pts] = K[:]
admittanceMatrixE = self.admittanceMatrixE()
A[self.n_pts:, :self.n_pts] = admittanceMatrixE.T
A[:self.n_pts, self.n_pts:] = admittanceMatrixE
A[self.n_pts:, self.n_pts:] = self.admittanceMatrixD()
return A
def calculate_ke(self):
'''
function that calculates the element stiffness matrix on each element
takes:
pts - array that contains the coordinates of all nodes in the mesh - shape (n_nodes, 2)
tri - array with all indices (in pts array) of triangle vertices - shape (num_triangles, 3)
returns:
ke_array - an array of stiffness matrices for all elements (n_triangles, 3, 3)
'''
n_tri, n_vertices = self.tri.shape
ke_array = cp.zeros((n_tri, n_vertices, n_vertices))
coord = self.pts[self.tri[:,:]]
ke_array[:] = self.triangle_ke(coord)
return ke_array
def triangle_ke(self, coord):
'''
function that calculates ke
takes:
coord - coordinates of each triangle's nodes - shape (n_triangles, 3, 2)
returns:
ke_array - an array of stiffness matrices for all elements (n_triangles, 3, 3)
'''
s = cp.array(coord[:, [2, 0, 1]] - coord[:, [1, 2, 0]]) # shape (n_tri, 3, 2)
ke_matrix = cp.empty((len(coord), 3, 3))
area = cp.abs(0.5 * self.det2x2(s[:, 0], s[:, 1]))
ke_matrix[:] = cp.einsum('ijk,kli->ijl', s, s.T) / (4. * area[:, None, None])
return ke_matrix
def det2x2(self, s1, s2):
'''
Function that calculates the determinants of two arrays
'''
return s1[:, 0]*s2[:, 1] - s1[:, 1]*s2[:, 0]
def shapeFunctionParameters(self):
'''
return arrays of parameters for all shape functions in all triangles on electrodes - shape ((n_el * n_per_el - 1), 3, 3)
'''
#print(self.tri[twoFromElectrode][isValid])
pointsTri = self.pts[self.tri[self.twoFromElectrode][self.isValid]] # shape ((n_el * n_per_el - 1), 3, 2)
params = cp.empty((pointsTri.shape[0], 3, 3))
params[:, :, 0] = cp.multiply(pointsTri[:, [1, 2, 0], 0], pointsTri[:, [2, 0, 1], 1]) - cp.multiply(pointsTri[:, [2, 0, 1], 0], pointsTri[:, [1, 2, 0], 1])
params[:, :, 1] = pointsTri[:, [1, 2, 0], 1] - pointsTri[:, [2, 0, 1], 1]
params[:, :, 2] = - (pointsTri[:, [1, 2, 0], 0] - pointsTri[:, [2, 0, 1], 0])
return params
def findTrianglesOnElectrodes(self):
twoFromElectrode = (cp.sum(self.tri < self.ne * self.n_per_el, axis = 1) == 2)
nodeisElectrode = self.tri[twoFromElectrode][self.tri[twoFromElectrode] < self.ne * self.n_per_el].reshape(self.tri[twoFromElectrode].shape[0], 2)
isValid = ( nodeisElectrode[:, 0]//self.n_per_el - nodeisElectrode[:, 1]//self.n_per_el ) == 0
return twoFromElectrode,nodeisElectrode, isValid
def admittanceMatrixC2(self):
'''
compute matrix to calculate integral of two shape functions
over the length of the electrode (assuming they are non-zero) - shape ((n_el * n_per_el - 1), 3, 3, 3)
'''
shapeParams = self.shapeFunctionParameters()
whereIsZero = (cp.absolute(shapeParams) - 1e-12 < 0)
indexZero = cp.where(whereIsZero)
isConst = indexZero[2] == 2 # 1 for const x, 0 for const y
zeroShapeFunc = cp.array(indexZero[1])
indicesShapeFunctions = cp.outer(cp.ones(shapeParams.shape[0]), cp.arange(3))
indicesShapeFunctions[:, ~zeroShapeFunc] = 0
#print(indexZero)
outerOfShapeFunc = cp.einsum('ijk, ipq -> ijpkq', shapeParams, shapeParams)
#print(outerOfShapeFunc[0,0,0])
integratingMatrix = cp.empty((outerOfShapeFunc.shape[0], outerOfShapeFunc.shape[3], outerOfShapeFunc.shape[4]))
'''
for i in range(20):
#print(self.pts[nodeisElectrode[isValid], :][i])
print(nodeisElectrode[isValid][i])
print(self.tri[twoFromElectrode][isValid][i])
#print(nodeisElectrode[isValid])'''
sortedElNodeIndices = cp.sort(self.nodeisElectrode[self.isValid], axis=1)
#print(sortedElNodeIndices)
firstOrderY = cp.empty((outerOfShapeFunc.shape[0]))
secondOrderY = cp.empty((outerOfShapeFunc.shape[0]))
thirdOrderY = cp.empty((outerOfShapeFunc.shape[0]))
constX = cp.ones((outerOfShapeFunc.shape[0], 3))
firstOrderY[:] = self.pts[sortedElNodeIndices, :][:, 1, 1] - self.pts[sortedElNodeIndices, :][:, 0, 1] # y2 - y1
secondOrderY[:] = 0.5 * (cp.power(self.pts[sortedElNodeIndices, :][:, 1, 1], 2) - cp.power(self.pts[sortedElNodeIndices, :][:, 0, 1], 2)) # 1/2 (y2^2 - y1^2)
thirdOrderY[:] = 1./3. * (cp.power(self.pts[sortedElNodeIndices, :][:, 1, 1], 3) - cp.power(self.pts[sortedElNodeIndices, :][:, 0, 1], 3)) # 1/3 (y2^3 - y1^3)
constX[:, 1] = self.pts[sortedElNodeIndices, :][:, 1, 0]
constX = cp.einsum('ij, ik -> ijk', constX, constX)
integratingMatrix[:, 0, 0] = firstOrderY[:]
integratingMatrix[:, 0, 1] = firstOrderY[:]
integratingMatrix[:, 1, 0] = firstOrderY[:]
integratingMatrix[:, 0, 2] = secondOrderY[:]
integratingMatrix[:, 2, 0] = secondOrderY[:]
integratingMatrix[:, 1, 1] = firstOrderY[:]
integratingMatrix[:, 1, 2] = secondOrderY[:]
integratingMatrix[:, 2, 1] = secondOrderY[:]
integratingMatrix[:, 2, 2] = thirdOrderY[:]
integratingMatrix[:] = integratingMatrix * isConst[:, None, None]
#print(integratingMatrix)
#intm = cp.array(integratingMatrix)
#print(constX)
firstOrderX = cp.empty((outerOfShapeFunc.shape[0]))
secondOrderX = cp.empty((outerOfShapeFunc.shape[0]))
thirdOrderX = cp.empty((outerOfShapeFunc.shape[0]))
constY = cp.ones((outerOfShapeFunc.shape[0], 3))
firstOrderX[:] = self.pts[sortedElNodeIndices, :][:, 1, 0] - self.pts[sortedElNodeIndices, :][:, 0, 0] # x2 - x1
secondOrderX[:] = 0.5 * (cp.power(self.pts[sortedElNodeIndices, :][:, 1, 0], 2) - cp.power(self.pts[sortedElNodeIndices, :][:, 0, 0], 2)) # 1/2 (x2^2 - x1^2)
thirdOrderX[:] = 1./3. * (cp.power(self.pts[sortedElNodeIndices, :][:, 1, 0], 3) - cp.power(self.pts[sortedElNodeIndices, :][:, 0, 0], 3)) # 1/3 (x2^3 - x1^3)
constY[:, 2] = self.pts[sortedElNodeIndices, :][:, 1, 1]
constY = cp.einsum('ij, ik -> ijk', constY, constY)
#print(constY)
indicesConstX = cp.where(isConst)[0]
indicesConstY = cp.where(~isConst)[0]
#print(indicesConstY)
integratingMatrix[indicesConstY, 0, 0] = firstOrderX[indicesConstY]
integratingMatrix[indicesConstY, 0, 1] = secondOrderX[indicesConstY]
integratingMatrix[indicesConstY, 1, 0] = secondOrderX[indicesConstY]
integratingMatrix[indicesConstY, 0, 2] = firstOrderX[indicesConstY]
integratingMatrix[indicesConstY, 2, 0] = firstOrderX[indicesConstY]
integratingMatrix[indicesConstY, 1, 1] = thirdOrderX[indicesConstY]
integratingMatrix[indicesConstY, 1, 2] = secondOrderX[indicesConstY]
integratingMatrix[indicesConstY, 2, 1] = secondOrderX[indicesConstY]
integratingMatrix[indicesConstY, 2, 2] = firstOrderX[indicesConstY]
'''
for i in range(40):
print(intm[i])
print(integratingMatrix[i])
'''
integratingMatrix[indicesConstX] = cp.multiply(integratingMatrix[indicesConstX], constX[indicesConstX])
integratingMatrix[indicesConstY] = cp.multiply(integratingMatrix[indicesConstY], constY[indicesConstY])
admittanceMatrix = cp.einsum('ijklm, ilm -> ijk', outerOfShapeFunc, integratingMatrix)
admittanceMatrix[:] = cp.abs(admittanceMatrix)
admittanceMatrix[admittanceMatrix < 1e-18] = 0
#admittanceMatrix2 = cp.sum(cp.multiply(outerOfShapeFunc, integratingMatrix[:, None, None, :, :]), axis = [3,4])
#print(admittanceMatrix[:50,:50])
#number_of_equal = cp.sum(cp.equal(cp.round_(admittanceMatrix, 16), cp.round_(admittanceMatrix2, 16)))
#print(number_of_equal)
#print(number_of_equal == admittanceMatrix.shape[0] * admittanceMatrix.shape[1] * admittanceMatrix.shape[2])
return admittanceMatrix
def admittanceMatrixE(self):
shapeParams = self.shapeFunctionParameters()
whereIsZero = (cp.absolute(shapeParams) - 1e-12 < 0)
indexZero = cp.where(whereIsZero)
isConst = indexZero[2] == 2 # 1 for const x, 0 for const y
indicesConstX = cp.where(isConst)[0]
indicesConstY = cp.where(~isConst)[0]
sortedElNodeIndices = cp.sort(self.nodeisElectrode[self.isValid], axis=1)
admittanceMatrixE = cp.zeros((self.n_pts, self.ne))
shapeMatrix = cp.zeros((shapeParams.shape[0], shapeParams.shape[1], 2))
integratingMatrix = cp.zeros((shapeParams.shape[0], 2))
shapeMatrix[indicesConstY, :, 0] = shapeParams[indicesConstY, :, 0] + shapeParams[indicesConstY, :, 2] * self.pts[sortedElNodeIndices, :][indicesConstY, 1, 1][:, None]
shapeMatrix[indicesConstY, :, 1] = shapeParams[indicesConstY, :, 1]
shapeMatrix[indicesConstX, :, 0] = shapeParams[indicesConstX, :, 0] + shapeParams[indicesConstX, :, 1] * self.pts[sortedElNodeIndices, :][indicesConstX, 1, 0][:, None]
shapeMatrix[indicesConstX, :, 1] = shapeParams[indicesConstX, :, 2]
integratingMatrix[indicesConstY, 0] = self.pts[sortedElNodeIndices, :][indicesConstY, 1, 0] - self.pts[sortedElNodeIndices, :][indicesConstY, 0, 0]
integratingMatrix[indicesConstY, 1] = 0.5 * (cp.power(self.pts[sortedElNodeIndices, :][indicesConstY, 1, 0], 2) - cp.power(self.pts[sortedElNodeIndices, :][indicesConstY, 0, 0], 2))
integratingMatrix[indicesConstX, 0] = self.pts[sortedElNodeIndices, :][indicesConstX, 1, 1] - self.pts[sortedElNodeIndices, :][indicesConstX, 0, 1]
integratingMatrix[indicesConstX, 1] = 0.5 * (cp.power(self.pts[sortedElNodeIndices, :][indicesConstX, 1, 1], 2) - cp.power(self.pts[sortedElNodeIndices, :][indicesConstX, 0, 1], 2))
#print(integratingMatrix.shape)
integrals = cp.einsum('ijk, ik -> ij', shapeMatrix, integratingMatrix)
integrals[:] = cp.abs(integrals)
#integr = cp.sum(cp.multiply(shapeMatrix, integratingMatrix[:, None]), axis=2)
#print(cp.sum(cp.round_(integrals, 16) == cp.round_(integr, 16)))
indexElectrode = sortedElNodeIndices[:, 0] // self.n_per_el
#print(indexElectrode)
integrals = - integrals / self.z[indexElectrode][:, None, None]
integrals = integrals.ravel()
indexElectrode = cp.tile(indexElectrode, (self.n_per_el, 1)).T.ravel()
#print(self.tri[twoFromElectrode][isValid])
indexNode = self.tri[self.twoFromElectrode][self.isValid].ravel()
#admittanceMatrixE [self.tri[twoFromElectrode][isValid].ravel(), indexElectrode] += integrals.ravel()
indSort = cp.argsort(indexNode)
indexNode = indexNode[indSort]
indexElectrode = indexElectrode[indSort]
integrals = integrals[indSort]
unique, counts = cp.unique(indexNode, return_counts=True)
#print("number of unique entries", unique.shape)
#print("counts \n", counts)
index_pointer = cp.zeros(self.n_pts + 1)
sum_count = cp.cumsum(counts)
#print(sum_count)
index_pointer[unique[:]+1] = sum_count[:]
#print(index_pointer)
nonzeroes = cp.nonzero(index_pointer)[0]
#print(nonzeroes)
mask = cp.zeros(index_pointer.shape[0], dtype='b1')
mask[nonzeroes] = True
mask[0] = True
zeroes = cp.where(~mask)[0]
#time_loop = time()
while (index_pointer[1:]==0).any():
index_pointer[zeroes] = index_pointer[zeroes - 1]
#for z in zeroes:
# index_pointer[z] = index_pointer[z-1]
'''for i in range(index_pointer.shape[0]):
if i == 0:
continue
elif index_pointer[i] == 0:
index_pointer[i] = index_pointer[i-1]'''
#print('time for loop ',time()-time_loop)
#index_pointer2 = cp.arange(self.n_pts + 1)
#print('indexEl', indexElectrode)
#print(index_pointer)
admittanceMatrixE = sp.csr_matrix((integrals, indexElectrode, index_pointer), shape=(self.n_pts, self.ne), dtype=integrals.dtype)
adm = admittanceMatrixE.toarray();
#print(integrals)
#print(indexNode)
#print(indexElectrode)
#a = (sortedElNodeIndices[0,0])
#print(adm[4])
# print(adm[:,1])
#print('sum zeroes ',cp.sum(adm>0))
return adm;
def admittanceMatrixD(self):
all_el_nodes_coords = self.pts[:(self.ne * self.n_per_el)].reshape((self.ne, self.n_per_el, 2))
lengths = cp.linalg.norm((all_el_nodes_coords[:, 0] - all_el_nodes_coords[:, (self.n_per_el - 1)]), axis=1)
admittanceMatrixD = cp.diag(lengths/self.z)
return admittanceMatrixD
def _k_tetrahedron(xy):
"""
given a point-matrix of an element, solving for Kij analytically
using barycentric coordinates (simplex coordinates)
Parameters
----------
xy : NDArray
(x,y) of nodes 1, 2, 3, 4 given in counterclockwise manner,
see notes.
Returns
-------
ke_matrix : NDArray
local stiffness matrix
Notes
-----
A tetrahedron is described using [0, 1, 2, 3] (local node index) or
[171, 27, 9, 53] (global index). Counterclockwise (CCW) is defined
such that the barycentric coordinate of face (1->2->3) is positive.
"""
s = xy[[2, 3, 0, 1]] - xy[[1, 2, 3, 0]]
# volume of the tetrahedron
# TODO: remove abs, user must make sure all tetrahedrons are CCW.
vt = np.abs(1./6 * la.det(s[[0, 1, 2]]))
# calculate area (vector) of triangle faces
# re-normalize using alternative (+,-) signs
ij_pairs = [[0, 1], [1, 2], [2, 3], [3, 0]]
signs = [1, -1, 1, -1]
a = [sign*np.cross(s[i], s[j]) for (i, j), sign in zip(ij_pairs, signs)]
a = np.array(a)
# local (e for element) stiffness matrix
ke_matrix = np.dot(a, a.transpose()) / (36. * vt)
return ke_matrix
| 44.673729
| 190
| 0.59085
|
07ca034f42ed9914133896adf55819b82f730358
| 3,447
|
py
|
Python
|
tools/adb.py
|
lingyfh/r8
|
03f8577d014124c37760c0d6439a8dcbeb0742a1
|
[
"BSD-3-Clause",
"Apache-2.0",
"MIT"
] | 3
|
2021-07-03T15:26:05.000Z
|
2021-09-01T09:50:44.000Z
|
tools/adb.py
|
lingyfh/r8
|
03f8577d014124c37760c0d6439a8dcbeb0742a1
|
[
"BSD-3-Clause",
"Apache-2.0",
"MIT"
] | null | null | null |
tools/adb.py
|
lingyfh/r8
|
03f8577d014124c37760c0d6439a8dcbeb0742a1
|
[
"BSD-3-Clause",
"Apache-2.0",
"MIT"
] | 1
|
2021-09-24T10:09:20.000Z
|
2021-09-24T10:09:20.000Z
|
#!/usr/bin/env python
# Copyright (c) 2020, the R8 project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
import subprocess
import time
import utils
def install_apk_on_emulator(apk, emulator_id, quiet=False):
cmd = ['adb', '-s', emulator_id, 'install', '-r', '-d', apk]
if quiet:
subprocess.check_output(cmd)
else:
subprocess.check_call(cmd)
def uninstall_apk_on_emulator(app_id, emulator_id):
process = subprocess.Popen(
['adb', '-s', emulator_id, 'uninstall', app_id],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
stdout = stdout.decode('UTF-8')
stderr = stderr.decode('UTF-8')
if stdout.strip() == 'Success':
# Successfully uninstalled
return
if 'Unknown package: {}'.format(app_id) in stderr:
# Application not installed
return
# Check if the app is listed in packages
packages = subprocess.check_output(['adb', 'shell', 'pm', 'list', 'packages'])
if not 'package:' + app_id in packages:
return
raise Exception(
'Unexpected result from `adb uninstall {}\nStdout: {}\nStderr: {}'.format(
app_id, stdout, stderr))
def wait_for_emulator(emulator_id):
stdout = subprocess.check_output(['adb', 'devices']).decode('UTF-8')
if '{}\tdevice'.format(emulator_id) in stdout:
return True
print('Emulator \'{}\' not connected; waiting for connection'.format(
emulator_id))
time_waited = 0
while True:
time.sleep(10)
time_waited += 10
stdout = subprocess.check_output(['adb', 'devices']).decode('UTF-8')
if '{}\tdevice'.format(emulator_id) not in stdout:
print('... still waiting for connection')
if time_waited >= 5 * 60:
return False
else:
return True
def run_monkey(app_id, emulator_id, apk, monkey_events, quiet, enable_logging):
if not wait_for_emulator(emulator_id):
return False
install_apk_on_emulator(apk, emulator_id, quiet)
# Intentionally using a constant seed such that the monkey generates the same
# event sequence for each shrinker.
random_seed = 42
cmd = ['adb', '-s', emulator_id, 'shell', 'monkey', '-p', app_id,
'-s', str(random_seed), str(monkey_events)]
try:
stdout = utils.RunCmd(cmd, quiet=quiet, logging=enable_logging)
succeeded = ('Events injected: {}'.format(monkey_events) in stdout)
except subprocess.CalledProcessError as e:
succeeded = False
uninstall_apk_on_emulator(app_id, emulator_id)
return succeeded
def run_instrumented(app_id, test_id, emulator_id, apk, test_apk, quiet,
enable_logging,
test_runner='androidx.test.runner.AndroidJUnitRunner'):
if not wait_for_emulator(emulator_id):
return None
install_apk_on_emulator(apk, emulator_id, quiet)
install_apk_on_emulator(test_apk, emulator_id, quiet)
cmd = ['adb', '-s', emulator_id, 'shell', 'am', 'instrument', '-w',
'{}/{}'.format(test_id, test_runner)]
try:
stdout = utils.RunCmd(cmd, quiet=quiet, logging=enable_logging)
# The runner will print OK (X tests) if completed succesfully
succeeded = any("OK (" in s for s in stdout)
except subprocess.CalledProcessError as e:
succeeded = False
uninstall_apk_on_emulator(test_id, emulator_id)
uninstall_apk_on_emulator(app_id, emulator_id)
return succeeded
| 30.776786
| 80
| 0.693937
|
0b37439270d732847abdfbf043fc10cd217545d3
| 303
|
py
|
Python
|
src/backend/app/utils/jsonutils.py
|
douglasdaly/dougliz-wedding
|
da673baf3a7387c3818960c0ffa12ab7c830a89b
|
[
"MIT"
] | 5
|
2020-02-02T02:39:23.000Z
|
2021-08-17T15:50:57.000Z
|
src/backend/app/utils/jsonutils.py
|
douglasdaly/dougliz-wedding
|
da673baf3a7387c3818960c0ffa12ab7c830a89b
|
[
"MIT"
] | 4
|
2021-03-31T19:39:57.000Z
|
2022-01-22T09:29:10.000Z
|
src/backend/app/utils/jsonutils.py
|
douglasdaly/dougliz-wedding
|
da673baf3a7387c3818960c0ffa12ab7c830a89b
|
[
"MIT"
] | 1
|
2020-04-12T17:11:43.000Z
|
2020-04-12T17:11:43.000Z
|
# -*- coding: utf-8 -*-
"""
JSON utilities.
"""
import json
from uuid import UUID
class JSONEncoderUUID(json.JSONEncoder):
"""
JSON Encoder with UUID support.
"""
def default(self, obj):
if isinstance(obj, UUID):
return obj.hex
return super().default(obj)
| 16.833333
| 40
| 0.59736
|
610228375392606f289a9d6cc2fc1b6be0d0ae40
| 3,572
|
py
|
Python
|
custom_components/fpl/config_flow.py
|
dorgan/hass-fpl
|
304b4d4f5f6186202fee1d94fb988a298f8f0777
|
[
"MIT"
] | null | null | null |
custom_components/fpl/config_flow.py
|
dorgan/hass-fpl
|
304b4d4f5f6186202fee1d94fb988a298f8f0777
|
[
"MIT"
] | null | null | null |
custom_components/fpl/config_flow.py
|
dorgan/hass-fpl
|
304b4d4f5f6186202fee1d94fb988a298f8f0777
|
[
"MIT"
] | null | null | null |
from collections import OrderedDict
import voluptuous as vol
from .fplapi import FplApi
from homeassistant import config_entries
import aiohttp
from .const import DOMAIN, CONF_USERNAME, CONF_PASSWORD, CONF_NAME
from .fplapi import (
LOGIN_RESULT_OK,
LOGIN_RESULT_INVALIDUSER,
LOGIN_RESULT_INVALIDPASSWORD,
)
from homeassistant.core import callback
@callback
def configured_instances(hass):
"""Return a set of configured SimpliSafe instances."""
entites = []
for entry in hass.config_entries.async_entries(DOMAIN):
entites.append(f"{entry.data.get(CONF_USERNAME)}")
return set(entites)
class FplFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def __init__(self):
"""Initialize."""
self._errors = {}
async def async_step_user(
self, user_input={}
): # pylint: disable=dangerous-default-value
"""Handle a flow initialized by the user."""
self._errors = {}
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
if self.hass.data.get(DOMAIN):
return self.async_abort(reason="single_instance_allowed")
if user_input is not None:
username = user_input[CONF_USERNAME]
password = user_input[CONF_PASSWORD]
if username not in configured_instances(self.hass):
api = FplApi(username, password, None)
result = await api.login()
if result == LOGIN_RESULT_OK:
fplData = await api.get_data()
accounts = fplData["accounts"]
user_input["accounts"] = accounts
return self.async_create_entry(title="", data=user_input)
if result == LOGIN_RESULT_INVALIDUSER:
self._errors[CONF_USERNAME] = "invalid_username"
if result == LOGIN_RESULT_INVALIDPASSWORD:
self._errors[CONF_PASSWORD] = "invalid_password"
if result == None:
self._errors["base"] = "auth"
else:
self._errors[CONF_NAME] = "name_exists"
return await self._show_config_form(user_input)
return await self._show_config_form(user_input)
async def _show_config_form(self, user_input):
"""Show the configuration form to edit location data."""
# Defaults
username = ""
password = ""
if user_input is not None:
if CONF_USERNAME in user_input:
username = user_input[CONF_USERNAME]
if CONF_PASSWORD in user_input:
password = user_input[CONF_PASSWORD]
data_schema = OrderedDict()
data_schema[vol.Required(CONF_USERNAME, default=username)] = str
data_schema[vol.Required(CONF_PASSWORD, default=password)] = str
return self.async_show_form(
step_id="user", data_schema=vol.Schema(data_schema), errors=self._errors
)
async def async_step_import(self, user_input): # pylint: disable=unused-argument
"""Import a config entry.
Special type of import, we're not actually going to store any data.
Instead, we're going to rely on the values that are in config file.
"""
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
return self.async_create_entry(title="configuration.yaml", data={})
| 32.472727
| 85
| 0.640258
|
2a717c2d0f014320e0c6db667e1b75eef413486c
| 282
|
py
|
Python
|
invenio_stats/contrib/file_download/__init__.py
|
Glignos/invenio-stats
|
dd17ed37932d78432a4637498c1e54131ae66a9a
|
[
"MIT"
] | null | null | null |
invenio_stats/contrib/file_download/__init__.py
|
Glignos/invenio-stats
|
dd17ed37932d78432a4637498c1e54131ae66a9a
|
[
"MIT"
] | null | null | null |
invenio_stats/contrib/file_download/__init__.py
|
Glignos/invenio-stats
|
dd17ed37932d78432a4637498c1e54131ae66a9a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""File downloads Elasticsearch index templates."""
| 28.2
| 72
| 0.712766
|
1ffda41af80d6a2b20206f773189fa59adacb773
| 960
|
py
|
Python
|
text generator/scrapy/indeed/pipelines.py
|
BaiganKing/allay-ds
|
a04f1eba7880e9f88b7f3e94f4213b3d755c92ea
|
[
"MIT"
] | 1
|
2020-04-08T22:26:39.000Z
|
2020-04-08T22:26:39.000Z
|
text generator/scrapy/indeed/pipelines.py
|
BaiganKing/allay-ds
|
a04f1eba7880e9f88b7f3e94f4213b3d755c92ea
|
[
"MIT"
] | 7
|
2020-04-23T20:43:37.000Z
|
2020-11-13T17:14:24.000Z
|
text generator/scrapy/indeed/pipelines.py
|
BloomTech-Labs/allay-ds
|
ee81a9ff0eb28a19f1c634ce72f5752c36ffc786
|
[
"MIT"
] | 3
|
2020-04-29T15:32:22.000Z
|
2020-08-13T15:40:41.000Z
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.exceptions import DropItem
from scrapy.exporters import CsvItemExporter
class ValidateItemPipeline(object):
def process_item(self, item, spider):
if not all(item.values()):
raise DropItem("Missing values!")
else:
return item
class WriteItemPipeline(object):
def __init__(self):
self.filename = 'indeed.csv'
def open_spider(self, spider):
self.csvfile = open(self.filename, 'wb')
self.exporter = CsvItemExporter(self.csvfile)
self.exporter.start_exporting()
def close_spider(self, spider):
self.exporter.finish_exporting()
self.csvfile.close()
def process_item(self, item, spider):
self.exporter.export_item(item)
return item
| 27.428571
| 65
| 0.675
|
232a418260d4319f55376ac35f5f394d36a3ea13
| 55,927
|
py
|
Python
|
entity_event/models.py
|
robdmc/django-entity-event
|
8426734faba5e22169b3c329aa9e088b9300dd35
|
[
"MIT"
] | null | null | null |
entity_event/models.py
|
robdmc/django-entity-event
|
8426734faba5e22169b3c329aa9e088b9300dd35
|
[
"MIT"
] | null | null | null |
entity_event/models.py
|
robdmc/django-entity-event
|
8426734faba5e22169b3c329aa9e088b9300dd35
|
[
"MIT"
] | null | null | null |
from collections import defaultdict
from datetime import datetime
from operator import or_
from six.moves import reduce
from cached_property import cached_property
from django.contrib.postgres.fields import JSONField
from django.core.serializers.json import DjangoJSONEncoder
from django.db import models, transaction
from django.db.models import Q
from django.db.models.query import QuerySet
from django.template.loader import render_to_string
from django.template import Context, Template
from django.utils.encoding import python_2_unicode_compatible
from entity.models import Entity, EntityRelationship
from entity_event.context_serializer import DefaultContextSerializer
@python_2_unicode_compatible
class Medium(models.Model):
"""
A ``Medium`` is an object in the database that defines the method
by which users will view events. The actual objects in the
database are fairly simple, only requiring a ``name``,
``display_name`` and ``description``. Mediums can be created with
``Medium.objects.create``, using the following parameters:
:type name: str
:param name: A short, unique name for the medium.
:type display_name: str
:param display_name: A short, human readable name for the medium.
Does not need to be unique.
:type description: str
:param description: A human readable description of the
medium.
Encoding a ``Medium`` object in the database serves two
purposes. First, it is referenced when subscriptions are
created. Second the ``Medium`` objects provide an entry point to
query for events and have all the subscription logic and filtering
taken care of for you.
Any time a new way to display events to a user is created, a
corresponding ``Medium`` should be created. Some examples could
include a medium for sending email notifications, a medium for
individual newsfeeds, or a medium for a site wide notification
center.
Once a medium object is created, and corresponding subscriptions
are created, there are three methods on the medium object that can
be used to query for events. They are ``events``,
``entity_events`` and ``events_targets``. The differences between
these methods are described in their corresponding documentation.
A medium can use a ``RenderingStyle`` to use a configured style of rendering
with the medium. Any associated ``ContextRenderer`` models defined with
that rendering style will be used to render events in the ``render`` method
of the medium. This is an optional part of Entity Event's built-in
rendering system. If a rendering style is not set up for a particular source or
source group, it will try to use the default rendering style specified
in settings.
A medium can also provided ``additional_context`` that will always be passed
to the templates of its rendered events. This allows for medium-specific rendering
styles to be used. For example, perhaps a medium wishes to display a short description
of an event but does not wish to display the names of the event actors since those
names are already displayed in other places on the page. In this case, the medium
can always pass additional context to suppress rendering of names.
"""
# A name and display name for the medium along with a description for any
# application display
name = models.CharField(max_length=64, unique=True)
display_name = models.CharField(max_length=64)
description = models.TextField()
time_created = models.DateTimeField(auto_now_add=True)
# The rendering style determines the primary way the medium will try to render events.
# If a context loader has been defined for this rendering style along with the appropriate
# source, the renderer will be used. If a context renderer has not been set up with this
# rendering style, it will try to use the default style configured in settings.
rendering_style = models.ForeignKey('entity_event.RenderingStyle', null=True, on_delete=models.CASCADE)
# These values are passed in as additional context to whatever event is being rendered.
additional_context = JSONField(null=True, default=None, encoder=DjangoJSONEncoder)
def __str__(self):
"""
Readable representation of ``Medium`` objects.
"""
return self.display_name
@transaction.atomic
def events(self, **event_filters):
"""
Return subscribed events, with basic filters.
This method of getting events is useful when you want to
display events for your medium, independent of what entities
were involved in those events. For example, this method can be
used to display a list of site-wide events that happened in the
past 24 hours:
.. code-block:: python
TEMPLATE = '''
<html><body>
<h1> Yoursite's Events </h1>
<ul>
{% for event in events %}
<li> {{ event.context.event_text }} </li>
{% endfor %}
</ul>
</body></html>
'''
def site_feed(request):
site_feed_medium = Medium.objects.get(name='site_feed')
start_time = datetime.utcnow() - timedelta(days=1)
context = {}
context['events'] = site_feed_medium.events(start_time=start_time)
return HttpResponse(TEMPLATE.render(context))
While the `events` method does not filter events based on what
entities are involved, filtering based on the properties of the events
themselves is supported, through the following arguments, all
of which are optional.
:type start_time: datetime.datetime (optional)
:param start_time: Only return events that occurred after the
given time. If no time is given for this argument, no
filtering is done.
:type end_time: datetime.datetime (optional)
:param end_time: Only return events that occurred before the
given time. If no time is given for this argument, no
filtering is done
:type seen: Boolean (optional)
:param seen: This flag controls whether events that have
marked as seen are included. By default, both events that
have and have not been marked as seen are included. If
``True`` is given for this parameter, only events that
have been marked as seen will be included. If ``False`` is
given, only events that have not been marked as seen will
be included.
:type include_expired: Boolean (optional)
:param include_expired: By default, events that have a
expiration time, which has passed, are not included in the
results. Passing in ``True`` to this argument causes
expired events to be returned as well.
:type actor: Entity (optional)
:param actor: Only include events with the given entity as an
actor.
:type mark_seen: Boolean (optional)
:param mark_seen: Create a side effect in the database that
marks all the returned events as having been seen by this
medium.
:rtype: EventQuerySet
:returns: A queryset of events.
"""
events = self.get_filtered_events(**event_filters)
subscriptions = Subscription.objects.cache_related().filter(
medium=self
)
subscription_q_objects = [
Q(
eventactor__entity__in=self.followed_by(sub.subscribed_entities()),
source=sub.source
)
for sub in subscriptions if sub.only_following
]
subscription_q_objects.append(
Q(source__in=[sub.source for sub in subscriptions if not sub.only_following]))
events = events.cache_related().filter(reduce(or_, subscription_q_objects))
return events
@transaction.atomic
def entity_events(self, entity, **event_filters):
"""
Return subscribed events for a given entity.
This method of getting events is useful when you want to see
only the events relevant to a single entity. The events
returned are events that the given entity is subscribed to,
either directly as an individual entity, or because they are
part of a group subscription. As an example, the
`entity_events` method can be used to implement a newsfeed for
a individual entity:
.. code-block:: python
TEMPLATE = '''
<html><body>
<h1> {entity}'s Events </h1>
<ul>
{% for event in events %}
<li> {{ event.context.event_text }} </li>
{% endfor %}
</ul>
</body></html>
'''
def newsfeed(request):
newsfeed_medium = Medium.objects.get(name='newsfeed')
entity = Entity.get_for_obj(request.user)
context = {}
context['entity'] = entity
context['events'] = site_feed_medium.entity_events(entity, seen=False, mark_seen=True)
return HttpResponse(TEMPLATE.render(context))
The only required argument for this method is the entity to
get events for. Filtering based on the properties of the
events themselves is supported, through the rest of the
following arguments, which are optional.
:type_entity: Entity
:param entity: The entity to get events for.
:type start_time: datetime.datetime (optional)
:param start_time: Only return events that occurred after the
given time. If no time is given for this argument, no
filtering is done.
:type end_time: datetime.datetime (optional)
:param end_time: Only return events that occurred before the
given time. If no time is given for this argument, no
filtering is done
:type seen: Boolean (optional)
:param seen: This flag controls whether events that have
marked as seen are included. By default, both events that
have and have not been marked as seen are included. If
``True`` is given for this parameter, only events that
have been marked as seen will be included. If ``False`` is
given, only events that have not been marked as seen will
be included.
:type include_expired: Boolean (optional)
:param include_expired: By default, events that have a
expiration time, which has passed, are not included in the
results. Passing in ``True`` to this argument causes
expired events to be returned as well.
:type actor: Entity (optional)
:param actor: Only include events with the given entity as an
actor.
:type mark_seen: Boolean (optional)
:param mark_seen: Create a side effect in the database that
marks all the returned events as having been seen by this
medium.
:rtype: EventQuerySet
:returns: A queryset of events.
"""
events = self.get_filtered_events(**event_filters)
subscriptions = Subscription.objects.filter(medium=self)
subscriptions = self.subset_subscriptions(subscriptions, entity)
subscription_q_objects = [
Q(
eventactor__entity__in=self.followed_by(entity),
source=sub.source
)
for sub in subscriptions if sub.only_following
]
subscription_q_objects.append(
Q(source__in=[sub.source for sub in subscriptions if not sub.only_following])
)
return [
event for event in events.filter(reduce(or_, subscription_q_objects))
if self.filter_source_targets_by_unsubscription(event.source_id, [entity])
]
@transaction.atomic
def events_targets(self, entity_kind=None, **event_filters):
"""
Return all events for this medium, with who each event is for.
This method is useful for individually notifying every entity concerned with a collection of events, while
still respecting subscriptions and usubscriptions. For example, ``events_targets`` can be used to send email
notifications, by retrieving all unseen events (and marking them as now having been seen), and then processing
the emails. In code, this could look like:
.. code-block:: python
email = Medium.objects.get(name='email')
new_emails = email.events_targets(seen=False, mark_seen=True)
for event, targets in new_emails:
django.core.mail.send_mail(
subject = event.context["subject"]
message = event.context["message"]
recipient_list = [t.entity_meta["email"] for t in targets]
)
This ``events_targets`` method attempts to make bulk processing of push-style notifications straightforward.
This sort of processing should normally occur in a separate thread from any request/response cycle.
Filtering based on the properties of the events themselves is supported, through the rest of the following
arguments, which are optional.
:type entity_kind: EntityKind
:param entity_kind: Only include targets of the given kind in each targets list.
:type start_time: datetime.datetime (optional)
:param start_time: Only return events that occurred after the given time. If no time is given for this argument,
no filtering is done.
:type end_time: datetime.datetime (optional)
:param end_time: Only return events that occurred before the given time. If no time is given for this argument,
no filtering is done
:type seen: Boolean (optional)
:param seen: This flag controls whether events that have marked as seen are included. By default, both events
that have and have not been marked as seen are included. If ``True`` is given for this parameter, only
events that have been marked as seen will be included. If ``False`` is given, only events that have not
been marked as seen will be included.
:type include_expired: Boolean (optional)
:param include_expired: By default, events that have a expiration time, which has passed, are not included in
the results. Passing in ``True`` to this argument causes expired events to be returned as well.
:type actor: Entity (optional)
:param actor: Only include events with the given entity as an actor.
:type mark_seen: Boolean (optional)
:param mark_seen: Create a side effect in the database that marks all the returned events as having been seen
by this medium.
:rtype: List of tuples
:returns: A list of tuples in the form ``(event, targets)`` where ``targets`` is a list of entities.
"""
events = self.get_filtered_events(**event_filters)
subscriptions = Subscription.objects.filter(medium=self)
event_pairs = []
for event in events:
targets = []
for sub in subscriptions:
if event.source != sub.source:
continue
subscribed = sub.subscribed_entities()
if sub.only_following:
potential_targets = self.followers_of(
event.eventactor_set.values_list('entity__id', flat=True)
)
subscription_targets = list(Entity.objects.filter(
Q(id__in=subscribed), Q(id__in=potential_targets)))
else:
subscription_targets = list(subscribed)
targets.extend(subscription_targets)
targets = self.filter_source_targets_by_unsubscription(event.source_id, targets)
if entity_kind:
targets = [t for t in targets if t.entity_kind == entity_kind]
if targets:
event_pairs.append((event, targets))
return event_pairs
def subset_subscriptions(self, subscriptions, entity=None):
"""
Return only subscriptions the given entity is a part of.
An entity is "part of a subscription" if either:
1. The subscription is for that entity, with no
sub-entity-kind. That is, it is not a group subscription.
2. The subscription is for a super-entity of the given entity,
and the subscription's sub-entity-kind is the same as that of
the entity's.
:type subscriptions: QuerySet
:param subscriptions: A QuerySet of subscriptions to subset.
:type entity: (optional) Entity
:param entity: Subset subscriptions to only those relevant for
this entity.
:rtype: QuerySet
:returns: A queryset of filtered subscriptions.
"""
if entity is None:
return subscriptions
super_entities = EntityRelationship.objects.filter(
sub_entity=entity).values_list('super_entity')
subscriptions = subscriptions.filter(
Q(entity=entity, sub_entity_kind=None) |
Q(entity__in=super_entities, sub_entity_kind=entity.entity_kind)
)
return subscriptions
@cached_property
def unsubscriptions(self):
"""
Returns the unsubscribed entity IDs for each source as a dict,
keyed on source_id.
:rtype: Dictionary
:returns: A dictionary of the form ``{source_id: entities}``
where ``entities`` is a list of entities unsubscribed from
that source for this medium.
"""
unsubscriptions = defaultdict(list)
for unsub in Unsubscription.objects.filter(medium=self).values('entity', 'source'):
unsubscriptions[unsub['source']].append(unsub['entity'])
return unsubscriptions
def filter_source_targets_by_unsubscription(self, source_id, targets):
"""
Given a source id and targets, filter the targets by
unsubscriptions. Return the filtered list of targets.
"""
unsubscriptions = self.unsubscriptions
return [t for t in targets if t.id not in unsubscriptions[source_id]]
def get_filtered_events_queries(self, start_time, end_time, seen, include_expired, actor):
"""
Return Q objects to filter events table to relevant events.
The filters that are applied are those passed in from the
method that is querying the events table: One of ``events``,
``entity_events`` or ``events_targets``. The arguments have
the behavior documented in those methods.
:rtype: List of Q objects
:returns: A list of Q objects, which can be used as arguments
to ``Event.objects.filter``.
"""
now = datetime.utcnow()
filters = []
if start_time is not None:
filters.append(Q(time__gte=start_time))
if end_time is not None:
filters.append(Q(time__lte=end_time))
if not include_expired:
filters.append(Q(time_expires__gte=now))
# Check explicitly for True and False as opposed to None
# - `seen==False` gets unseen notifications
# - `seen is None` does no seen/unseen filtering
if seen is True:
filters.append(Q(eventseen__medium=self))
elif seen is False:
unseen_ids = _unseen_event_ids(medium=self)
filters.append(Q(id__in=unseen_ids))
# Filter by actor
if actor is not None:
filters.append(Q(eventactor__entity=actor))
return filters
def get_filtered_events(
self, start_time=None, end_time=None, seen=None, mark_seen=False, include_expired=False, actor=None):
"""
Retrieves events, filters by event level filters, and marks them as
seen if necessary.
:rtype: EventQuerySet
:returns: All events which match the given filters.
"""
filtered_events_queries = self.get_filtered_events_queries(start_time, end_time, seen, include_expired, actor)
events = Event.objects.filter(*filtered_events_queries)
if seen is False and mark_seen:
# Evaluate the event qset here and create a new queryset that is no longer filtered by
# if the events are marked as seen. We do this because we want to mark the events
# as seen in the next line of code. If we didn't evaluate the qset here first, it result
# in not returning unseen events since they are marked as seen.
events = Event.objects.filter(id__in=list(e.id for e in events))
events.mark_seen(self)
return events
def followed_by(self, entities):
"""
Define what entities are followed by the entities passed to this
method.
This method can be overridden by a class that concretely
inherits ``Medium``, to define custom semantics for the
``only_following`` flag on relevant ``Subscription``
objects. Overriding this method, and ``followers_of`` will be
sufficient to define that behavior. This method is not useful
to call directly, but is used by the methods that filter
events and targets.
This implementation attempts to provide a sane default. In
this implementation, the entities followed by the ``entities``
argument are the entities themselves, and their super entities.
That is, individual entities follow themselves, and the groups
they are a part of. This works as a default implementation,
but, for example, an alternate medium may wish to define the
opposite behavior, where an individual entity follows
themselves and all of their sub-entities.
Return a queryset of the entities that the given entities are
following. This needs to be the inverse of ``followers_of``.
:type entities: Entity or EntityQuerySet
:param entities: The Entity, or QuerySet of Entities of interest.
:rtype: EntityQuerySet
:returns: A QuerySet of all the entities followed by any of
those given.
"""
if isinstance(entities, Entity):
entities = Entity.objects.filter(id=entities.id)
super_entities = EntityRelationship.objects.filter(
sub_entity__in=entities).values_list('super_entity')
followed_by = Entity.objects.filter(
Q(id__in=entities) | Q(id__in=super_entities))
return followed_by
def followers_of(self, entities):
"""
Define what entities are followers of the entities passed to this
method.
This method can be overridden by a class that concretely
inherits ``Medium``, to define custom semantics for the
``only_following`` flag on relevant ``Subscription``
objects. Overriding this method, and ``followed_by`` will be
sufficient to define that behavior. This method is not useful
to call directly, but is used by the methods that filter
events and targets.
This implementation attempts to provide a sane default. In
this implementation, the followers of the entities passed in
are defined to be the entities themselves, and their
sub-entities.
That is, the followers of individual entities are themselves,
and if the entity has sub-entities, those sub-entities. This
works as a default implementation, but, for example, an
alternate medium may wish to define the opposite behavior,
where an the followers of an individual entity are themselves
and all of their super-entities.
Return a queryset of the entities that follow the given
entities. This needs to be the inverse of ``followed_by``.
:type entities: Entity or EntityQuerySet
:param entities: The Entity, or QuerySet of Entities of interest.
:rtype: EntityQuerySet
:returns: A QuerySet of all the entities who are followers of
any of those given.
"""
if isinstance(entities, Entity):
entities = Entity.objects.filter(id=entities.id)
sub_entities = EntityRelationship.objects.filter(
super_entity__in=entities).values_list('sub_entity')
followers_of = Entity.objects.filter(
Q(id__in=entities) | Q(id__in=sub_entities))
return followers_of
def render(self, events):
"""
Renders a list of events for this medium. The events first have their contexts loaded.
Afterwards, the rendered events are returned as a dictionary keyed on the event itself.
The key points to a tuple of (txt, html) renderings of the event.
:type events: list
:param events: A list or queryset of Event models.
:rtype: dict
:returns: A dictionary of rendered text and html tuples keyed on the provided events.
"""
from entity_event import context_loader
context_loader.load_contexts_and_renderers(events, [self])
return {e: e.render(self) for e in events}
@python_2_unicode_compatible
class Source(models.Model):
"""
A ``Source`` is an object in the database that represents where
events come from. These objects only require a few fields,
``name``, ``display_name`` ``description``, and ``group``.
Source objects categorize events
based on where they came from, or what type of information they
contain. Each source should be fairly fine grained, with broader
categorizations possible through ``SourceGroup`` objects. Sources
can be created with ``Source.objects.create`` using the following
parameters:
:type name: str
:param name: A short, unique name for the source.
:type display_name: str
:param display_name: A short, human readable name for the source.
Does not need to be unique.
:type description: str
:param description: A human readable description of the source.
:type group: SourceGroup
:param group: A SourceGroup object. A broad grouping of where the
events originate.
Storing source objects in the database servers two purposes. The
first is to provide an object that Subscriptions can reference,
allowing different categories of events to be subscribed to over
different mediums. The second is to allow source instances to
store a reference to a function which can populate event contexts
with additional information that is relevant to the source. This
allows ``Event`` objects to be created with minimal data
duplication.
Once sources are created, they will primarily be used to
categorize events, as each ``Event`` object requires a reference
to a source. Additionally they will be referenced by
``Subscription`` objects to route events of the given source to be
handled by a given medium.
"""
name = models.CharField(max_length=64, unique=True)
display_name = models.CharField(max_length=64)
description = models.TextField()
group = models.ForeignKey('entity_event.SourceGroup', on_delete=models.CASCADE)
def __str__(self):
"""
Readable representation of ``Source`` objects.
"""
return self.display_name
@python_2_unicode_compatible
class SourceGroup(models.Model):
"""
A ``SourceGroup`` object is a high level categorization of
events. Since ``Source`` objects are meant to be very fine
grained, they are collected into ``SourceGroup`` objects. There is
no additional behavior associated with the source groups other
than further categorization. Source groups can be created with
``SourceGroup.objects.create``, which takes the following
arguments:
:type name: str
:param name: A short, unique name for the source group.
:type display_name: str
:param display_name: A short, human readable name for the source
group. Does not need to be unique.
:type description: str
:param description: A human readable description of the source
group.
"""
name = models.CharField(max_length=64, unique=True)
display_name = models.CharField(max_length=64)
description = models.TextField()
def __str__(self):
"""
Readable representation of ``SourceGroup`` objects.
"""
return self.display_name
@python_2_unicode_compatible
class Unsubscription(models.Model):
"""
Because django-entity-event allows for whole groups to be
subscribed to events at once, unsubscribing an entity is not as
simple as removing their subscription object. Instead, the
Unsubscription table provides a simple way to ensure that an
entity does not see events if they don't want to.
Unsubscriptions are created for a single entity at a time, where
they are unsubscribed for events from a source on a medium. This
is stored as an ``Unsubscription`` object in the database, which
can be created using ``Unsubscription.objects.create`` using the
following arguments:
:type entity: Entity
:param entity: The entity to unsubscribe.
:type medium: Medium
:param medium: The ``Medium`` object representing where they don't
want to see the events.
:type source: Source
:param source: The ``Source`` object representing what category
of event they no longer want to see.
Once an ``Unsubscription`` object is created, all of the logic to
ensure that they do not see events form the given source by the
given medium is handled by the methods used to query for events
via the ``Medium`` object. That is, once the object is created, no
more work is needed to unsubscribe them.
"""
entity = models.ForeignKey('entity.Entity', on_delete=models.CASCADE)
medium = models.ForeignKey('entity_event.Medium', on_delete=models.CASCADE)
source = models.ForeignKey('entity_event.Source', on_delete=models.CASCADE)
def __str__(self):
"""
Readable representation of ``Unsubscription`` objects.
"""
s = '{entity} from {source} by {medium}'
entity = self.entity.__str__()
source = self.source.__str__()
medium = self.medium.__str__()
return s.format(entity=entity, source=source, medium=medium)
class SubscriptionQuerySet(QuerySet):
"""
A custom QuerySet for Subscriptions.
"""
def cache_related(self):
"""
Cache any related objects that we may use
:return:
"""
return self.select_related('medium', 'source', 'entity', 'sub_entity_kind')
@python_2_unicode_compatible
class Subscription(models.Model):
"""
Which types of events are available to which mediums is controlled through ``Subscription`` objects. By creating a
``Subscription`` object in the database, you are storing that events from a given ``Source`` object should be
available to a given ``Medium`` object.
Each ``Subscription`` object can be one of two levels, either an individual subscription or a group subscription.
Additionally, each ``Subscription`` object can be one of two types of subscription, either a global subscription,
or an "only following" subscription. ``Subscription`` objects are created using ``Subscription.objects.create``
which takes the following arguments:
:type medium: Medium
:param medium: The ``Medium`` object to make events available to.
:type source: Source
:param source: The ``Source`` object that represents the category of events to make available.
:type entity: Entity
:param entity: The entity to subscribe in the case of an individual subscription, or in the case of a group
subscription, the super-entity of the group.
:type sub_entity_kind: (optional) EntityKind
:param sub_entity_kind: When creating a group subscription, this is a foreign key to the ``EntityKind`` of the
sub-entities to subscribe. In the case of an individual subscription, this should be ``None``.
:type only_following: Boolean
:param only_following: If ``True``, events will be available to entities through the medium only if the entities
are following the actors of the event. If ``False``, the events will all be available to all the entities
through the medium.
When a ``Medium`` object is used to query for events, only the
events that have a subscription for their source to that medium
will ever be returned. This is an extremely useful property that
allows complex subscription logic to be handled simply by storing
subscription objects in the database.
Storing subscriptions is made simpler by the ability to subscribe
groups of entities with a single subscription object. Groups of
entities of a given kind can be subscribed by subscribing their
super-entity and providing the ``sub_entity_kind`` argument.
Subscriptions further are specified to be either an "only following"
subscription or not. This specification controls what
events will be returned when ``Medium.entity_events`` is called,
and controls what targets are returned when
``Medium.events_targets`` is called.
For example, if events are created for a new photo being uploaded
(from a single source called, say "photos"), and we want to provide
individuals with a notification in their newsfeed (through a
medium called "newsfeed"), we want to be able to display only the
events where the individual is tagged in the photo. By setting
``only_following`` to true the following code would only return
events where the individual was included in the ``EventActor`` s,
rather than returning all "photos" events:
.. code-block:: python
user_entity = Entity.objects.get_for_obj(user)
newsfeed_medium = Medium.objects.get(name='newsfeed')
newsfeed.entity_events(user)
The behavior of what constitutes "following" is controlled by the
Medium class. A default implementation of following is provided
and documented in the ``Medium.followers_of`` and
``Medium.followed_by`` methods, but could be extended by
subclasses of Medium.
"""
medium = models.ForeignKey('entity_event.Medium', on_delete=models.CASCADE)
source = models.ForeignKey('entity_event.Source', on_delete=models.CASCADE)
entity = models.ForeignKey('entity.Entity', related_name='+', on_delete=models.CASCADE)
sub_entity_kind = models.ForeignKey(
'entity.EntityKind', null=True, related_name='+', default=None, on_delete=models.CASCADE
)
only_following = models.BooleanField(default=True)
objects = SubscriptionQuerySet.as_manager()
def __str__(self):
"""
Readable representation of ``Subscription`` objects.
"""
s = '{entity} to {source} by {medium}'
entity = self.entity.__str__()
source = self.source.__str__()
medium = self.medium.__str__()
return s.format(entity=entity, source=source, medium=medium)
def subscribed_entities(self):
"""
Return a queryset of all subscribed entities.
This will be a single entity in the case of an individual subscription, otherwise it will be all the entities
in the group subscription.
:rtype: EntityQuerySet
:returns: A QuerySet of all the entities that are a part of this subscription.
"""
if self.sub_entity_kind is not None:
sub_entities = self.entity.sub_relationships.filter(
sub_entity__entity_kind=self.sub_entity_kind).values_list('sub_entity')
entities = Entity.objects.filter(id__in=sub_entities)
else:
entities = Entity.all_objects.filter(id=self.entity.id)
return entities
class EventQuerySet(QuerySet):
"""
A custom QuerySet for Events.
"""
def cache_related(self):
"""
Cache any related objects that we may use
:return:
"""
return self.select_related(
'source'
).prefetch_related(
'source__group'
)
def mark_seen(self, medium):
"""
Creates EventSeen objects for the provided medium for every event
in the queryset.
Creating these EventSeen objects ensures they will not be
returned when passing ``seen=False`` to any of the medium
event retrieval functions, ``events``, ``entity_events``, or
``events_targets``.
"""
EventSeen.objects.bulk_create([
EventSeen(event=event, medium=medium) for event in self
])
def load_contexts_and_renderers(self, medium):
"""
Loads context data into the event ``context`` variable. This method
destroys the queryset and returns a list of events.
"""
from entity_event import context_loader
return context_loader.load_contexts_and_renderers(self, [medium])
class EventManager(models.Manager):
"""
A custom Manager for Events.
"""
def get_queryset(self):
"""
Return the EventQuerySet.
"""
return EventQuerySet(self.model)
def cache_related(self):
"""
Return a queryset with prefetched values
:return:
"""
return self.get_queryset().cache_related()
def mark_seen(self, medium):
"""
Creates EventSeen objects for the provided medium for every event
in the queryset.
Creating these EventSeen objects ensures they will not be
returned when passing ``seen=False`` to any of the medium
event retrieval functions, ``events``, ``entity_events``, or
``events_targets``.
"""
return self.get_queryset().mark_seen(medium)
def load_contexts_and_renderers(self, medium):
"""
Loads context data into the event ``context`` variable. This method
destroys the queryset and returns a list of events.
"""
return self.get_queryset().load_contexts_and_renderers(medium)
@transaction.atomic
def create_event(self, actors=None, ignore_duplicates=False, **kwargs):
"""
Create events with actors.
This method can be used in place of ``Event.objects.create``
to create events, and the appropriate actors. It takes all the
same keywords as ``Event.objects.create`` for the event
creation, but additionally takes a list of actors, and can be
told to not attempt to create an event if a duplicate event
exists.
:type source: Source
:param source: A ``Source`` object representing where the
event came from.
:type context: dict
:param context: A dictionary containing relevant
information about the event, to be serialized into
JSON. It is possible to load additional context
dynamically when events are fetched. See the
documentation on the ``ContextRenderer`` model.
:type uuid: str
:param uuid: A unique string for the event. Requiring a
``uuid`` allows code that creates events to ensure they do
not create duplicate events. This id could be, for example
some hash of the ``context``, or, if the creator is
unconcerned with creating duplicate events a call to
python's ``uuid1()`` in the ``uuid`` module.
:type time_expires: datetime (optional)
:param time_expires: If given, the default methods for
querying events will not return this event after this time
has passed.
:type actors: (optional) List of entities or list of entity ids.
:param actors: An ``EventActor`` object will be created for
each entity in the list. This allows for subscriptions
which are only following certain entities to behave
appropriately.
:type ignore_duplicates: (optional) Boolean
:param ignore_duplicates: If ``True``, a check will be made to
ensure that an event with the give ``uuid`` does not exist
before attempting to create the event. Setting this to
``True`` allows the creator of events to gracefully ensure
no duplicates are attempted to be created. There is a uniqueness constraint on uuid
so it will raise an exception if duplicates are allowed and submitted.
:rtype: Event
:returns: The created event. Alternatively if a duplicate
event already exists and ``ignore_duplicates`` is
``True``, it will return ``None``.
"""
kwargs['actors'] = actors
kwargs['ignore_duplicates'] = ignore_duplicates
events = self.create_events([kwargs])
if events:
return events[0]
return None
def create_events(self, kwargs_list):
"""
Create events in bulk to save on queries. Each element in the kwargs list should be a dict with the same set
of arguments you would normally pass to create_event
:param kwargs_list: list of kwargs dicts
:return: list of Event
"""
# Build map of uuid to event info
uuid_map = {
kwargs.get('uuid', ''): {
'actors': kwargs.pop('actors', []),
'ignore_duplicates': kwargs.pop('ignore_duplicates', False),
'event_kwargs': kwargs
}
for kwargs in kwargs_list
}
# Check for uuids
uuid_set = set(Event.objects.filter(uuid__in=uuid_map.keys()).values_list('uuid', flat=True))
# Set a flag for whether each uuid exists
for uuid, event_dict in uuid_map.items():
event_dict['exists'] = uuid in uuid_set
# Build list of events to bulk create
events_to_create = []
for uuid, event_dict in uuid_map.items():
# If the event doesn't already exist or the event does exist but we are allowing duplicates
if not event_dict['exists'] or not event_dict['ignore_duplicates']:
events_to_create.append(Event(**event_dict['event_kwargs']))
# Bulk create the events
created_events = Event.objects.bulk_create(events_to_create)
# Build list of EventActor objects to bulk create
event_actors_to_create = []
for created_event in created_events:
event_dict = uuid_map[created_event.uuid]
if event_dict['actors'] is not None:
for actor in event_dict['actors']:
actor_id = actor.id if hasattr(actor, 'id') else actor
event_actors_to_create.append(EventActor(entity_id=actor_id, event=created_event))
EventActor.objects.bulk_create(event_actors_to_create)
return created_events
@python_2_unicode_compatible
class Event(models.Model):
"""
``Event`` objects store information about events. By storing
events, from a given source, with some context, they are made
available to any ``Medium`` object with an appropriate
subscription. Events can be created with
``Event.objects.create_event``, documented above.
When creating an event, the information about what occurred is
stored in a JSON blob in the ``context`` field. This context can
be any type of information that could be useful for displaying
events on a given Medium. It is entirely the role of the
application developer to ensure that there is agreement between
what information is stored in ``Event.context`` and what
information the code the processes and displays events on each
medium expects.
Events will usually be created by code that also created, or knows
about the ``Source`` object that is required to create the event.
To prevent storing unnecessary data in the context, this code can
define a context loader function when creating this source, which
can be used to dynamically fetch more data based on whatever
limited amount of data makes sense to store in the context. This
is further documented in the ``Source`` documentation.
"""
source = models.ForeignKey('entity_event.Source', on_delete=models.CASCADE)
context = JSONField(encoder=DjangoJSONEncoder)
time = models.DateTimeField(auto_now_add=True, db_index=True)
time_expires = models.DateTimeField(default=datetime.max, db_index=True)
uuid = models.CharField(max_length=512, unique=True)
objects = EventManager()
def __init__(self, *args, **kwargs):
super(Event, self).__init__(*args, **kwargs)
# A dictionary that is populated with renderers after the contexts have been
# properly loaded. When renderers are available, the 'render' method may be
# called with a medium and optional observer
self._context_renderers = {}
def _merge_medium_additional_context_with_context(self, medium):
"""
If the medium has additional context properties, merge those together here in the
main context before rendering.
"""
if medium.additional_context:
context = self.context.copy()
context.update(medium.additional_context)
return context
else:
return self.context
def render(self, medium, observing_entity=None):
"""
Returns the rendered event as a tuple of text and html content. This information
is filled out with respect to which medium is rendering the event, what context
renderers are available with the prefetched context, and which optional entity
may be observing the rendered event.
"""
if medium not in self._context_renderers:
raise RuntimeError('Context and renderer for medium {0} has not or cannot been fetched'.format(medium))
else:
context = self._merge_medium_additional_context_with_context(medium)
return self._context_renderers[medium].render_context_to_text_html_templates(context)
def get_serialized_context(self, medium):
"""
Returns the serialized context of the event for a specific medium
:param medium:
:return:
"""
if medium not in self._context_renderers:
raise RuntimeError('Context and renderer for medium {0} has not or cannot been fetched'.format(medium))
else:
context = self._merge_medium_additional_context_with_context(medium)
return self._context_renderers[medium].get_serialized_context(context)
def __str__(self):
"""
Readable representation of ``Event`` objects.
"""
s = '{source} event at {time}'
source = self.source.__str__()
time = self.time.strftime('%Y-%m-%d::%H:%M:%S')
return s.format(source=source, time=time)
class AdminEvent(Event):
"""
A proxy model used to provide a separate interface for event
creation through the django-admin interface.
"""
class Meta:
proxy = True
@python_2_unicode_compatible
class EventActor(models.Model):
"""
``EventActor`` objects encode what entities were involved in an
event. They provide the information necessary to create "only
following" subscriptions which route events only to the entities
that are involved in the event.
``EventActor`` objects should not be created directly, but should
be created as part of the creation of ``Event`` objects, using
``Event.objects.create_event``.
"""
event = models.ForeignKey('entity_event.Event', on_delete=models.CASCADE)
entity = models.ForeignKey('entity.Entity', on_delete=models.CASCADE)
def __str__(self):
"""
Readable representation of ``EventActor`` objects.
"""
s = 'Event {eventid} - {entity}'
eventid = self.event.id
entity = self.entity.__str__()
return s.format(eventid=eventid, entity=entity)
@python_2_unicode_compatible
class EventSeen(models.Model):
"""
``EventSeen`` objects store information about where and when an
event was seen. They store the medium that the event was seen on,
and what time it was seen. This information is used by the event
querying methods on ``Medium`` objects to filter events by whether
or not they have been seen on that medium.
``EventSeen`` objects should not be created directly, but should
be created by using the ``EventQuerySet.mark_seen`` method,
available on the QuerySets returned by the event querying methods.
"""
event = models.ForeignKey('entity_event.Event', on_delete=models.CASCADE)
medium = models.ForeignKey('entity_event.Medium', on_delete=models.CASCADE)
time_seen = models.DateTimeField(default=datetime.utcnow)
class Meta:
unique_together = ('event', 'medium')
def __str__(self):
"""
Readable representation of ``EventSeen`` objects.
"""
s = 'Seen on {medium} at {time}'
medium = self.medium.__str__()
time = self.time_seen.strftime('%Y-%m-%d::%H:%M:%S')
return s.format(medium=medium, time=time)
def _unseen_event_ids(medium):
"""
Return all events that have not been seen on this medium.
"""
query = '''
SELECT event.id
FROM entity_event_event AS event
LEFT OUTER JOIN (SELECT *
FROM entity_event_eventseen AS seen
WHERE seen.medium_id=%s) AS eventseen
ON event.id = eventseen.event_id
WHERE eventseen.medium_id IS NULL
'''
unseen_events = Event.objects.raw(query, params=[medium.id])
ids = [e.id for e in unseen_events]
return ids
@python_2_unicode_compatible
class RenderingStyle(models.Model):
"""
Defines a rendering style. This is used to group together mediums that have
similar rendering styles and allows context renderers to be used across
mediums.
"""
name = models.CharField(max_length=64, unique=True)
display_name = models.CharField(max_length=64, default='')
def __str__(self):
return '{0} {1}'.format(self.display_name, self.name)
@python_2_unicode_compatible
class ContextRenderer(models.Model):
"""
``ContextRenderer`` objects store information about how
a source or source group is rendered with a particular rendering style, along with
information for loading the render context in a database-efficient
manner.
Of the four template fields: `text_template_path`, 'html_template_path',
`text_template`, and `html_template`, at least one must be
non-empty. Both a text and html template may be provided, either
through a path to the template, or a raw template object.
If both are provided, the template given in the path will be used and
the text template will be ignored.
This object is linked to a `RenderingStyle` object. This is how the
context renderer is associated with various `Medium` objects. It also
provides the `source` that uses the renderer. If a `source_group` is specified,
all sources under that group use this context renderer for the rendering style.
The `context_hints` provide the ability to fetch model IDs of an event context that
are stored in the database. For example, if an event context has a `user` key that
points to the PK of a Django `User` model, the context hints for it would be specified
as follows:
.. code-block:: python
{
'user': {
'app_name': 'auth',
'model_name': 'User',
}
}
With these hints, the 'user' field in the event context will be treated as a PK in the
database and fetched appropriately. If one wishes to perform and prefetch or select_related
calls, the following options can be added:
.. code-block:: python
{
'user': {
'app_name': 'auth',
'model_name': 'User',
'select_related': ['foreign_key_field', 'one_to_one_field'],
'prefetch_related': ['reverse_foreign_key_field', 'many_to_many_field'],
}
}
Note that as many keys can be defined that have corresponding keys in the event context for
the particular source or source group. Also note that the keys in the event context can
be embedded anywhere in the context and can also point to a list of PKs. For example:
.. code-block:: python
{
'my_context': {
'user': [1, 3, 5, 10],
'other_context_info': 'other_info_string',
},
'more_context': {
'hello': 'world',
}
}
In the above case, `User` objects with the PKs 1, 3, 5, and 10 will be fetched and loaded into
the event context whenever rendering is performed.
"""
name = models.CharField(max_length=64, unique=True)
# The various templates that can be used for rendering
text_template_path = models.CharField(max_length=256, default='')
html_template_path = models.CharField(max_length=256, default='')
text_template = models.TextField(default='')
html_template = models.TextField(default='')
# The source or source group of the event. It can only be one or the other
source = models.ForeignKey('entity_event.Source', null=True, on_delete=models.CASCADE)
source_group = models.ForeignKey('entity_event.SourceGroup', null=True, on_delete=models.CASCADE)
# The rendering style. Used to associated it with a medium
rendering_style = models.ForeignKey('entity_event.RenderingStyle', on_delete=models.CASCADE)
# Contains hints on how to fetch the context from the database
context_hints = JSONField(null=True, default=None, encoder=DjangoJSONEncoder)
class Meta:
unique_together = ('source', 'rendering_style')
def get_sources(self):
return [self.source] if self.source_id else self.source_group.source_set.all()
def __str__(self):
return self.name
def get_serialized_context(self, context):
"""
Serializes the context using the serializer class.
"""
return DefaultContextSerializer(context).data
def render_text_or_html_template(self, context, is_text=True):
"""
Renders a text or html template based on either the template path or the
stored template.
"""
template_path = getattr(self, '{0}_template_path'.format('text' if is_text else 'html'))
template = getattr(self, '{0}_template'.format('text' if is_text else 'html'))
if template_path:
return render_to_string(template_path, context)
elif template:
return Template(template).render(Context(context))
else:
return ''
def render_context_to_text_html_templates(self, context):
"""
Render the templates with the provided context.
Args:
A loaded context.
Returns:
A tuple of (rendered_text, rendered_html). Either, but not both
may be an empty string.
"""
# Process text template:
return (
self.render_text_or_html_template(context, is_text=True).strip(),
self.render_text_or_html_template(context, is_text=False).strip(),
)
| 41.305022
| 120
| 0.666118
|
8b7e0f2a1f8d7363c4a7045709aa260449c86b2e
| 4,816
|
py
|
Python
|
mysite/myapp/forms.py
|
MarkArren/PhotoSocial
|
bb401f465a464e7cf6a7fac184cef0d40e0a9525
|
[
"MIT"
] | null | null | null |
mysite/myapp/forms.py
|
MarkArren/PhotoSocial
|
bb401f465a464e7cf6a7fac184cef0d40e0a9525
|
[
"MIT"
] | null | null | null |
mysite/myapp/forms.py
|
MarkArren/PhotoSocial
|
bb401f465a464e7cf6a7fac184cef0d40e0a9525
|
[
"MIT"
] | null | null | null |
from django import forms
from django.forms import ModelForm
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.core.validators import EmailValidator
from . import models
from .models import ProfileModel
from io import BytesIO
from PIL import Image, ExifTags
from django.core.files import File
def compressImage(image):
maxWidth = 440
# Open image and get bytes
imageTmp = Image.open(image).convert('RGB')
imageIO = BytesIO()
try:
# Rotate image if 'Orientation' included in metadata
# From https://stackoverflow.com/questions/13872331/rotating-an-image-with-orientation-specified-in-exif-using-python-without-pil-in
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation]=='Orientation':
break
exif=dict(imageTmp.getexif().items())
if exif[orientation] == 3:
imageTmp=imageTmp.rotate(180, expand=True)
elif exif[orientation] == 6:
imageTmp=imageTmp.rotate(270, expand=True)
elif exif[orientation] == 8:
imageTmp=imageTmp.rotate(90, expand=True)
except (AttributeError, KeyError, IndexError):
pass
# Get image attributes
width, height = imageTmp.size
newWidth = width
newHeight = height
# Check if if image needs to be cropped
crop = False
if width/height > 1.7:
# Image is too wide so cut width
ratio = height/9
newWidth = 16 * ratio
newHeight = height
crop = True
print("too wide")
elif width/height < 0.8:
# image is too tall so cut height
ratio = width / 8
newWidth = width
newHeight = 10 * ratio
crop = True
print("too tall")
if crop:
# Crop
left = (width - newWidth) / 2
top = (height - newHeight)/2
right = (width + newWidth)/2
bottom = (height + newHeight)/2
imageTmp = imageTmp.crop((left, top, right, bottom))
print("cropped")
# Resize image
ratio = maxWidth/newWidth
newWidth = newWidth * ratio
newHeight = newHeight * ratio
imageTmp = imageTmp.resize((int(newWidth), int(newHeight)))
print("resized")
# Convert to bytes, save and compress
imageTmp.save(imageIO, format='JPEG', optimize=True, quality=60)
return File(imageIO, name=image.name)
class PostForm(forms.Form):
image = forms.ImageField(label="Upload Image", required=True)
caption = forms.CharField(label="Caption", max_length=512, required=False, widget=forms.TextInput(attrs={'placeholder': 'Caption'}))
location = forms.CharField(label="Location", max_length=50, required=False, widget=forms.TextInput(attrs={'placeholder': 'Location'}))
def save(self, request):
postInstance = models.PostModel()
postInstance.image = compressImage(self.cleaned_data["image"])
postInstance.caption = self.cleaned_data["caption"]
postInstance.location = self.cleaned_data["location"]
profile = models.ProfileModel.objects.filter(user=request.user.id)
postInstance.profile = profile[0]
postInstance.save()
return postInstance
# class PostForm(ModelForm):
# class meta:
# model = models.PostModel
# fields = ('image', 'caption', 'location')
def must_be_unique_email(value):
user = User.objects.filter(email=value)
if len(user) > 0:
raise forms.ValidationError("Email Already Exists")
return value
def must_be_unique_username(value):
user = User.objects.filter(username=value)
if len(user) > 0:
raise forms.ValidationError("Username Already Exists")
return value
class RegistrationForm(UserCreationForm):
# email = forms.EmailField(
# label="Email",
# required=True,
# validators=[EmailValidator]
# )
username = forms.CharField(label='Username',
required=True,
max_length=30
)
class Meta:
model = User
fields = ("username",
"password1", "password2")
def save(self, commit=True):
user = super(RegistrationForm, self).save(commit=False)
# user.email = self.cleaned_data["email"]
if commit:
user.save()
return user
# def __init__(self, *args, **kwargs):
# super(RegistrationForm, self).__init__(*args, **kwargs)
# self.fields['fullname'] = user.first_name + user.last_name
class ProfileForm(ModelForm):
class Meta:
model = ProfileModel
fields = ('profilePicture', 'fullname', 'email', 'bio')
class UserUpdateForm(forms.ModelForm):
class Meta:
model = User
fields = ('username', 'email')
# class ProfileForm(forms.Form):
# profilePicture = forms.ImageField(label="Profile Picture", required=False)
# bio = forms.CharField(label="Bio", max_length=512, required=False)
# def save(self, request):
# profileInstance = models.PostModel()
# postInstance.user = request.user
# profileInstance.profilePicture = self.cleaned_data["profilePicture"]
# profileInstance.bio = self.cleaned_data["bio"]
# profileInstance.save()
# return profileInstance
| 28.163743
| 135
| 0.706811
|
96f5f81c1f04b7d64ddbd6fd461348c6986d9ff6
| 94,579
|
py
|
Python
|
tensorflow/python/ops/array_ops.py
|
Matanley/tensorflow
|
be5bcb90566013b495c3f267423d7b7c5269b9ee
|
[
"Apache-2.0"
] | 1
|
2018-03-01T08:32:28.000Z
|
2018-03-01T08:32:28.000Z
|
tensorflow/python/ops/array_ops.py
|
shenruixiang10086/tensorflow
|
acb1ef68f5aea3b6f7f1e14db588b74134719b5e
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/ops/array_ops.py
|
shenruixiang10086/tensorflow
|
acb1ef68f5aea3b6f7f1e14db588b74134719b5e
|
[
"Apache-2.0"
] | 2
|
2019-01-17T08:02:34.000Z
|
2021-09-26T17:11:21.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Tests for this file live in python/kernel_tests/array_ops_test.py
"""Support for manipulating tensors.
See the @{$python/array_ops} guide.
@@string_to_number
@@to_double
@@to_float
@@to_bfloat16
@@to_int32
@@to_int64
@@cast
@@bitcast
@@saturate_cast
@@broadcast_dynamic_shape
@@broadcast_static_shape
@@shape
@@shape_n
@@size
@@rank
@@reshape
@@squeeze
@@expand_dims
@@unravel_index
@@meshgrid
@@slice
@@strided_slice
@@split
@@tile
@@pad
@@concat
@@stack
@@parallel_stack
@@unstack
@@reverse_sequence
@@reverse
@@reverse_v2
@@transpose
@@extract_image_patches
@@space_to_batch_nd
@@space_to_batch
@@required_space_to_batch_paddings
@@batch_to_space_nd
@@batch_to_space
@@space_to_depth
@@depth_to_space
@@gather
@@gather_nd
@@unique_with_counts
@@scatter_nd
@@dynamic_partition
@@dynamic_stitch
@@boolean_mask
@@one_hot
@@sequence_mask
@@dequantize
@@quantize
@@quantize_v2
@@quantized_concat
@@setdiff1d
@@guarantee_const
@@fake_quant_with_min_max_args
@@fake_quant_with_min_max_args_gradient
@@fake_quant_with_min_max_vars
@@fake_quant_with_min_max_vars_gradient
@@fake_quant_with_min_max_vars_per_channel
@@fake_quant_with_min_max_vars_per_channel_gradient
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
# 'Constant' gets imported in the module 'array_ops'.
from tensorflow.python.framework.constant_op import constant
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_array_ops import *
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
# pylint: enable=wildcard-import
# Used for slicing to specify a new 1 size dimension
newaxis = None
tf_export("newaxis").export_constant(__name__, "newaxis")
# We override the 'slice' for the "slice" op, so we keep python's
# existing 'slice' for later use in this module.
_BaseSlice = slice
@tf_export("identity")
def identity(input, name=None): # pylint: disable=redefined-builtin
r"""Return a tensor with the same shape and contents as input.
Args:
input: A `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
if context.in_graph_mode():
return gen_array_ops.identity(input, name=name)
else:
input = ops.convert_to_tensor(input)
in_device = input.device
# TODO(ashankar): Does 'identity' need to invoke execution callbacks?
context_device = context.context().device_name
if not context_device:
context_device = "/job:localhost/replica:0/task:0/device:CPU:0"
if context_device != in_device:
return input._copy() # pylint: disable=protected-access
return input
# pylint: disable=redefined-builtin,protected-access
@tf_export("expand_dims")
def expand_dims(input, axis=None, name=None, dim=None):
"""Inserts a dimension of 1 into a tensor's shape.
Given a tensor `input`, this operation inserts a dimension of 1 at the
dimension index `axis` of `input`'s shape. The dimension index `axis` starts
at zero; if you specify a negative number for `axis` it is counted backward
from the end.
This operation is useful if you want to add a batch dimension to a single
element. For example, if you have a single image of shape `[height, width,
channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
which will make the shape `[1, height, width, channels]`.
Other examples:
```python
# 't' is a tensor of shape [2]
tf.shape(tf.expand_dims(t, 0)) # [1, 2]
tf.shape(tf.expand_dims(t, 1)) # [2, 1]
tf.shape(tf.expand_dims(t, -1)) # [2, 1]
# 't2' is a tensor of shape [2, 3, 5]
tf.shape(tf.expand_dims(t2, 0)) # [1, 2, 3, 5]
tf.shape(tf.expand_dims(t2, 2)) # [2, 3, 1, 5]
tf.shape(tf.expand_dims(t2, 3)) # [2, 3, 5, 1]
```
This operation requires that:
`-1-input.dims() <= dim <= input.dims()`
This operation is related to `squeeze()`, which removes dimensions of
size 1.
Args:
input: A `Tensor`.
axis: 0-D (scalar). Specifies the dimension index at which to
expand the shape of `input`. Must be in the range
`[-rank(input) - 1, rank(input)]`.
name: The name of the output `Tensor`.
dim: 0-D (scalar). Equivalent to `axis`, to be deprecated.
Returns:
A `Tensor` with the same data as `input`, but its shape has an additional
dimension of size 1 added.
Raises:
ValueError: if both `dim` and `axis` are specified.
"""
# TODO(aselle): Remove argument dim
if dim is not None:
if axis is not None:
raise ValueError("can't specify both 'dim' and 'axis'")
axis = dim
return gen_array_ops._expand_dims(input, axis, name)
# pylint: enable=redefined-builtin,protected-access
# Aliases for some automatically-generated names.
# pylint: disable=protected-access
@deprecation.deprecated(
"2016-11-30",
"This op will be removed after the deprecation date. "
"Please switch to tf.setdiff1d().")
def listdiff(x, y, out_idx=None, name=None):
return gen_array_ops._list_diff(x, y, out_idx, name)
listdiff.__doc__ = gen_array_ops._list_diff.__doc__ + "\n" + listdiff.__doc__
# pylint: enable=protected-access
# pylint: disable=undefined-variable,protected-access
@tf_export("setdiff1d")
def setdiff1d(x, y, index_dtype=dtypes.int32, name=None):
return gen_array_ops._list_diff(x, y, index_dtype, name)
setdiff1d.__doc__ = gen_array_ops._list_diff.__doc__
# pylint: enable=protected-access
@tf_export("broadcast_dynamic_shape")
def broadcast_dynamic_shape(shape_x, shape_y):
# pylint: disable=protected-access
"""Returns the broadcasted dynamic shape between `shape_x` and `shape_y`.
Args:
shape_x: A rank 1 integer `Tensor`, representing the shape of x.
shape_y: A rank 1 integer `Tensor`, representing the shape of y.
Returns:
A rank 1 integer `Tensor` representing the broadcasted shape.
"""
return gen_array_ops._broadcast_args(shape_x, shape_y)
# pylint: enable=protected-access
@tf_export("broadcast_static_shape")
def broadcast_static_shape(shape_x, shape_y):
"""Returns the broadcasted static shape between `shape_x` and `shape_y`.
Args:
shape_x: A `TensorShape`
shape_y: A `TensorShape`
Returns:
A `TensorShape` representing the broadcasted shape.
Raises:
ValueError: If the two shapes can not be broadcasted.
"""
return common_shapes.broadcast_shape(shape_x, shape_y)
@tf_export("shape")
def shape(input, name=None, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the shape of a tensor.
This operation returns a 1-D integer tensor representing the shape of `input`.
For example:
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.shape(t) # [2, 2, 3]
```
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
out_type: (Optional) The specified output type of the operation
(`int32` or `int64`). Defaults to `tf.int32`.
Returns:
A `Tensor` of type `out_type`.
"""
return shape_internal(input, name, optimize=True, out_type=out_type)
def shape_internal(input, name=None, optimize=True, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the shape of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the shape as a constant when possible.
out_type: (Optional) The specified output type of the operation
(`int32` or `int64`). Defaults to tf.int32.
Returns:
A `Tensor` of type `out_type`.
"""
with ops.name_scope(name, "Shape", [input]) as name:
if isinstance(input, (sparse_tensor.SparseTensor,
sparse_tensor.SparseTensorValue)):
return gen_math_ops.cast(input.dense_shape, out_type)
else:
if context.in_graph_mode():
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
if optimize and input_shape.is_fully_defined():
return constant(input_shape.as_list(), out_type, name=name)
return gen_array_ops.shape(input, name=name, out_type=out_type)
@tf_export("shape_n")
def shape_n(input, out_type=dtypes.int32, name=None):
# pylint: disable=redefined-builtin
"""Returns shape of tensors.
Args:
input: A list of at least 1 `Tensor` object with the same type.
out_type: The specified output type of the operation
(`int32` or `int64`). Defaults to `tf.int32`(optional).
name: A name for the operation (optional).
Returns:
A list with the same length as `input` of `Tensor` objects with
type `out_type`.
"""
output = gen_array_ops.shape_n(input, out_type=out_type, name=name)
if context.in_graph_mode():
for i, input_tensor in enumerate(input):
input_tensor = ops.convert_to_tensor(input_tensor)
input_shape = input_tensor.get_shape()
if input_shape.is_fully_defined():
output[i] = constant(
input_shape.as_list(), dtype=out_type, name=name)
return output
@tf_export("size")
def size(input, name=None, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the size of a tensor.
Returns a 0-D `Tensor` representing the number of elements in `input`
of type `out_type`. Defaults to tf.int32.
For example:
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.size(t) # 12
```
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
out_type: (Optional) The specified non-quantized numeric output type
of the operation. Defaults to `tf.int32`.
Returns:
A `Tensor` of type `out_type`. Defaults to `tf.int32`.
@compatibility(numpy)
Equivalent to np.size()
@end_compatibility
"""
return size_internal(input, name, optimize=True, out_type=out_type)
def size_internal(input, name=None, optimize=True, out_type=dtypes.int32):
# pylint: disable=redefined-builtin,protected-access
"""Returns the size of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the size as a constant when possible.
out_type: (Optional) The specified non-quantized numeric output type
of the operation. Defaults to `tf.int32`.
Returns:
A `Tensor` of type `out_type`. Defaults to `tf.int32`.
"""
if context.in_eager_mode() and not isinstance(
input, (sparse_tensor.SparseTensor,
sparse_tensor.SparseTensorValue)):
size_ = 1
for dim in ops.convert_to_tensor(input)._shape_tuple(): # pylint: disable=protected-access
size_ *= dim
return size_
with ops.name_scope(name, "Size", [input]) as name:
if isinstance(input, (sparse_tensor.SparseTensor,
sparse_tensor.SparseTensorValue)):
return gen_math_ops._prod(
gen_math_ops.cast(input.dense_shape, out_type), 0, name=name)
else:
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
if optimize:
if input_shape.is_fully_defined():
return constant(input_shape.num_elements(), out_type, name=name)
if input_shape.dims and any(dim == 0 for dim in input_shape.dims):
return constant(0, out_type, name=name)
return gen_array_ops.size(input, name=name, out_type=out_type)
@tf_export("rank")
def rank(input, name=None):
# pylint: disable=redefined-builtin
"""Returns the rank of a tensor.
Returns a 0-D `int32` `Tensor` representing the rank of `input`.
For example:
```python
# shape of tensor 't' is [2, 2, 3]
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.rank(t) # 3
```
**Note**: The rank of a tensor is not the same as the rank of a matrix. The
rank of a tensor is the number of indices required to uniquely select each
element of the tensor. Rank is also known as "order", "degree", or "ndims."
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
@compatibility(numpy)
Equivalent to np.ndim
@end_compatibility
"""
return rank_internal(input, name, optimize=True)
def rank_internal(input, name=None, optimize=True):
# pylint: disable=redefined-builtin
"""Returns the rank of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the rank as a constant when possible.
Returns:
A `Tensor` of type `int32`.
"""
with ops.name_scope(name, "Rank", [input]) as name:
if isinstance(input, (sparse_tensor.SparseTensor,
sparse_tensor.SparseTensorValue)):
return gen_array_ops.size(input.dense_shape, name=name)
else:
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
if optimize and input_shape.ndims is not None:
return constant(input_shape.ndims, dtypes.int32, name=name)
return gen_array_ops.rank(input, name=name)
def _slice_helper(tensor, slice_spec, var=None):
"""Overload for Tensor.__getitem__.
This operation extracts the specified region from the tensor.
The notation is similar to NumPy with the restriction that
currently only support basic indexing. That means that
using a non-scalar tensor as input is not currently allowed.
Some useful examples:
```python
# strip leading and trailing 2 elements
foo = tf.constant([1,2,3,4,5,6])
print(foo[2:-2].eval()) # => [3,4]
# skip every row and reverse every column
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[::2,::-1].eval()) # => [[3,2,1], [9,8,7]]
# Use scalar tensors as indices on both dimensions
print(foo[tf.constant(0), tf.constant(2)].eval()) # => 3
# Insert another dimension
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
print(foo[:, tf.newaxis, :].eval()) # => [[[1,2,3]], [[4,5,6]], [[7,8,9]]]
print(foo[:, :, tf.newaxis].eval()) # => [[[1],[2],[3]], [[4],[5],[6]],
[[7],[8],[9]]]
# Ellipses (3 equivalent operations)
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
print(foo[tf.newaxis, ...].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
print(foo[tf.newaxis].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
```
Notes:
- `tf.newaxis` is `None` as in NumPy.
- An implicit ellipsis is placed at the end of the `slice_spec`
- NumPy advanced indexing is currently not supported.
Args:
tensor: An ops.Tensor object.
slice_spec: The arguments to Tensor.__getitem__.
var: In the case of variable slice assignment, the Variable
object to slice (i.e. tensor is the read-only view of this
variable).
Returns:
The appropriate slice of "tensor", based on "slice_spec".
Raises:
ValueError: If a slice range is negative size.
TypeError: If the slice indices aren't int, slice, or Ellipsis.
"""
if not isinstance(slice_spec, (list, tuple)):
slice_spec = [slice_spec]
begin, end, strides = [], [], []
index = 0
new_axis_mask, shrink_axis_mask = 0, 0
begin_mask, end_mask = 0, 0
ellipsis_mask = 0
for s in slice_spec:
if isinstance(s, _BaseSlice):
# python doesn't always use None when constructing ranges
# for example a[:] gives slice(None,sys.maxsize,None)
# whereas a[::1] gives slice(None,None,None)
if s.start is not None and s.start is not sys.maxsize:
begin.append(s.start)
else:
begin.append(0)
begin_mask |= (1 << index)
if s.stop is not None and s.stop != sys.maxsize:
end.append(s.stop)
else:
end.append(0)
end_mask |= (1 << index)
if s.step is not None:
strides.append(s.step)
else:
strides.append(1)
elif s is Ellipsis:
begin.append(0)
end.append(0)
strides.append(1)
ellipsis_mask |= (1 << index)
elif s is newaxis:
begin.append(0)
end.append(0)
strides.append(1)
new_axis_mask |= (1 << index)
else:
begin.append(s)
end.append(s + 1)
strides.append(1)
shrink_axis_mask |= (1 << index)
index += 1
# stack possibly involves no tensors, so we must use op_scope correct graph.
with ops.name_scope(None, "strided_slice",
[tensor] + begin + end + strides) as name:
if begin:
packed_begin, packed_end, packed_strides = (stack(begin), stack(end),
stack(strides))
if (packed_begin.dtype == dtypes.int64 or
packed_end.dtype == dtypes.int64 or
packed_strides.dtype == dtypes.int64):
if packed_begin.dtype != dtypes.int64:
packed_begin = gen_math_ops.cast(packed_begin, dtypes.int64)
if packed_end.dtype != dtypes.int64:
packed_end = gen_math_ops.cast(packed_end, dtypes.int64)
if packed_strides.dtype != dtypes.int64:
packed_strides = gen_math_ops.cast(packed_strides, dtypes.int64)
else:
var_empty = constant([], dtype=dtypes.int32)
packed_begin = packed_end = packed_strides = var_empty
return strided_slice(
tensor,
packed_begin,
packed_end,
packed_strides,
begin_mask=begin_mask,
end_mask=end_mask,
shrink_axis_mask=shrink_axis_mask,
new_axis_mask=new_axis_mask,
ellipsis_mask=ellipsis_mask,
var=var,
name=name)
# pylint: disable=undefined-variable,protected-access,redefined-outer-name
@tf_export("slice")
def slice(input_, begin, size, name=None):
# pylint: disable=redefined-builtin
"""Extracts a slice from a tensor.
This operation extracts a slice of size `size` from a tensor `input` starting
at the location specified by `begin`. The slice `size` is represented as a
tensor shape, where `size[i]` is the number of elements of the 'i'th dimension
of `input` that you want to slice. The starting location (`begin`) for the
slice is represented as an offset in each dimension of `input`. In other
words, `begin[i]` is the offset into the 'i'th dimension of `input` that you
want to slice from.
Note that @{tf.Tensor.__getitem__} is typically a more pythonic way to
perform slices, as it allows you to write `foo[3:7, :-2]` instead of
`tf.slice(foo, [3, 0], [4, foo.get_shape()[1]-2])`.
`begin` is zero-based; `size` is one-based. If `size[i]` is -1,
all remaining elements in dimension i are included in the
slice. In other words, this is equivalent to setting:
`size[i] = input.dim_size(i) - begin[i]`
This operation requires that:
`0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n]`
For example:
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]],
[[3, 3, 3], [4, 4, 4]],
[[5, 5, 5], [6, 6, 6]]])
tf.slice(t, [1, 0, 0], [1, 1, 3]) # [[[3, 3, 3]]]
tf.slice(t, [1, 0, 0], [1, 2, 3]) # [[[3, 3, 3],
# [4, 4, 4]]]
tf.slice(t, [1, 0, 0], [2, 1, 3]) # [[[3, 3, 3]],
# [[5, 5, 5]]]
```
Args:
input_: A `Tensor`.
begin: An `int32` or `int64` `Tensor`.
size: An `int32` or `int64` `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` the same type as `input`.
"""
return gen_array_ops._slice(input_, begin, size, name=name)
# pylint: disable=invalid-name
@tf_export("strided_slice")
def strided_slice(input_,
begin,
end,
strides=None,
begin_mask=0,
end_mask=0,
ellipsis_mask=0,
new_axis_mask=0,
shrink_axis_mask=0,
var=None,
name=None):
"""Extracts a strided slice of a tensor (generalized python array indexing).
**Instead of calling this op directly most users will want to use the
NumPy-style slicing syntax (e.g. `tensor[..., 3:4:-1, tf.newaxis, 3]`), which
is supported via @{tf.Tensor.__getitem__} and @{tf.Variable.__getitem__}.**
The interface of this op is a low-level encoding of the slicing syntax.
Roughly speaking, this op extracts a slice of size `(end-begin)/stride`
from the given `input_` tensor. Starting at the location specified by `begin`
the slice continues by adding `stride` to the index until all dimensions are
not less than `end`.
Note that a stride can be negative, which causes a reverse slice.
Given a Python slice `input[spec0, spec1, ..., specn]`,
this function will be called as follows.
`begin`, `end`, and `strides` will be vectors of length n.
n in general is not equal to the rank of the `input_` tensor.
In each mask field (`begin_mask`, `end_mask`, `ellipsis_mask`,
`new_axis_mask`, `shrink_axis_mask`) the ith bit will correspond to
the ith spec.
If the ith bit of `begin_mask` is set, `begin[i]` is ignored and
the fullest possible range in that dimension is used instead.
`end_mask` works analogously, except with the end range.
`foo[5:,:,:3]` on a 7x8x9 tensor is equivalent to `foo[5:7,0:8,0:3]`.
`foo[::-1]` reverses a tensor with shape 8.
If the ith bit of `ellipsis_mask` is set, as many unspecified dimensions
as needed will be inserted between other dimensions. Only one
non-zero bit is allowed in `ellipsis_mask`.
For example `foo[3:5,...,4:5]` on a shape 10x3x3x10 tensor is
equivalent to `foo[3:5,:,:,4:5]` and
`foo[3:5,...]` is equivalent to `foo[3:5,:,:,:]`.
If the ith bit of `new_axis_mask` is set, then `begin`,
`end`, and `stride` are ignored and a new length 1 dimension is
added at this point in the output tensor.
For example,
`foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor.
If the ith bit of `shrink_axis_mask` is set, it implies that the ith
specification shrinks the dimensionality by 1. `begin[i]`, `end[i]` and
`strides[i]` must imply a slice of size 1 in the dimension. For example in
Python one might do `foo[:, 3, :]` which would result in
`shrink_axis_mask` equal to 2.
NOTE: `begin` and `end` are zero-indexed.
`strides` entries must be non-zero.
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]],
[[3, 3, 3], [4, 4, 4]],
[[5, 5, 5], [6, 6, 6]]])
tf.strided_slice(t, [1, 0, 0], [2, 1, 3], [1, 1, 1]) # [[[3, 3, 3]]]
tf.strided_slice(t, [1, 0, 0], [2, 2, 3], [1, 1, 1]) # [[[3, 3, 3],
# [4, 4, 4]]]
tf.strided_slice(t, [1, -1, 0], [2, -3, 3], [1, -1, 1]) # [[[4, 4, 4],
# [3, 3, 3]]]
```
Args:
input_: A `Tensor`.
begin: An `int32` or `int64` `Tensor`.
end: An `int32` or `int64` `Tensor`.
strides: An `int32` or `int64` `Tensor`.
begin_mask: An `int32` mask.
end_mask: An `int32` mask.
ellipsis_mask: An `int32` mask.
new_axis_mask: An `int32` mask.
shrink_axis_mask: An `int32` mask.
var: The variable corresponding to `input_` or None
name: A name for the operation (optional).
Returns:
A `Tensor` the same type as `input`.
"""
if strides is None:
strides = ones_like(begin)
op = gen_array_ops.strided_slice(
input=input_,
begin=begin,
end=end,
strides=strides,
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
parent_name = name
def assign(val, name=None):
"""Closure that holds all the arguments to create an assignment."""
if var is None:
raise ValueError("Sliced assignment is only supported for variables")
if name is None:
name = parent_name + "_assign"
return var._strided_slice_assign(
begin=begin,
end=end,
strides=strides,
value=val,
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
if context.in_graph_mode():
# TODO(apassos) In eager mode assignment will be done by overriding
# __setitem__ instead.
op.assign = assign
return op
def _SliceHelperVar(var, slice_spec):
"""Creates a slice helper object given a variable.
This allows creating a sub-tensor from part of the current contents
of a variable. See ${tf.Tensor$`Tensor.__getitem__`}
for detailed examples of slicing.
This function in addition also allows assignment to a sliced range.
This is similar to `__setitem__` functionality in Python. However,
the syntax is different so that the user can capture the assignment
operation for grouping or passing to `sess.run()`.
For example,
```python
import tensorflow as tf
A = tf.Variable([[1,2,3], [4,5,6], [7,8,9]], dtype=tf.float32)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(A[:2, :2])) # => [[1,2], [4,5]]
op = A[:2,:2].assign(22. * tf.ones((2, 2)))
print(sess.run(op)) # => [[22, 22, 3], [22, 22, 6], [7,8,9]]
```
Note that assignments currently do not support NumPy broadcasting
semantics.
Args:
var: An `ops.Variable` object.
slice_spec: The arguments to `Tensor.__getitem__`.
Returns:
The appropriate slice of "tensor", based on "slice_spec".
As an operator. The operator also has a `assign()` method
that can be used to generate an assignment operator.
Raises:
ValueError: If a slice range is negative size.
TypeError: If the slice indices aren't int, slice, or Ellipsis.
"""
return _slice_helper(var._AsTensor(), slice_spec, var)
ops.Tensor._override_operator("__getitem__", _slice_helper)
@tf_export("parallel_stack")
def parallel_stack(values, name="parallel_stack"):
"""Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor in parallel.
Requires that the shape of inputs be known at graph construction time.
Packs the list of tensors in `values` into a tensor with rank one higher than
each tensor in `values`, by packing them along the first dimension.
Given a list of length `N` of tensors of shape `(A, B, C)`; the `output`
tensor will have the shape `(N, A, B, C)`.
For example:
```python
x = tf.constant([1, 4])
y = tf.constant([2, 5])
z = tf.constant([3, 6])
tf.parallel_stack([x, y, z]) # [[1, 4], [2, 5], [3, 6]]
```
The difference between `stack` and `parallel_stack` is that `stack` requires
all the inputs be computed before the operation will begin but doesn't require
that the input shapes be known during graph construction.
`parallel_stack` will copy pieces of the input into the output as they become
available, in some situations this can provide a performance benefit.
Unlike `stack`, `parallel_stack` does NOT support backpropagation.
This is the opposite of unstack. The numpy equivalent is
tf.parallel_stack([x, y, z]) = np.asarray([x, y, z])
Args:
values: A list of `Tensor` objects with the same shape and type.
name: A name for this operation (optional).
Returns:
output: A stacked `Tensor` with the same type as `values`.
"""
with ops.name_scope(name):
value_t = ops.convert_to_tensor(values[0])
value_shape = ops.convert_to_tensor(value_t).get_shape()
output_shape = tensor_shape.TensorShape([len(values)])
output_shape = output_shape.concatenate(value_shape)
# expand_dims converts concat to stack.
return gen_array_ops._parallel_concat(
[expand_dims(value, 0) for value in values], shape=output_shape)
@tf_export("stack")
def stack(values, axis=0, name="stack"):
"""Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor.
Packs the list of tensors in `values` into a tensor with rank one higher than
each tensor in `values`, by packing them along the `axis` dimension.
Given a list of length `N` of tensors of shape `(A, B, C)`;
if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
Etc.
For example:
```python
x = tf.constant([1, 4])
y = tf.constant([2, 5])
z = tf.constant([3, 6])
tf.stack([x, y, z]) # [[1, 4], [2, 5], [3, 6]] (Pack along first dim.)
tf.stack([x, y, z], axis=1) # [[1, 2, 3], [4, 5, 6]]
```
This is the opposite of unstack. The numpy equivalent is
```python
tf.stack([x, y, z]) = np.stack([x, y, z])
```
Args:
values: A list of `Tensor` objects with the same shape and type.
axis: An `int`. The axis to stack along. Defaults to the first dimension.
Negative values wrap around, so the valid range is `[-(R+1), R+1)`.
name: A name for this operation (optional).
Returns:
output: A stacked `Tensor` with the same type as `values`.
Raises:
ValueError: If `axis` is out of the range [-(R+1), R+1).
"""
if axis == 0:
try:
# If the input is a constant list, it can be converted to a constant op
return ops.convert_to_tensor(values, name=name)
except (TypeError, ValueError):
pass # Input list contains non-constant tensors
value_shape = ops.convert_to_tensor(values[0], name=name).get_shape()
if value_shape.ndims is not None:
expanded_num_dims = value_shape.ndims + 1
if axis < -expanded_num_dims or axis >= expanded_num_dims:
raise ValueError("axis = %d not in [%d, %d)" % (axis, -expanded_num_dims,
expanded_num_dims))
return gen_array_ops._pack(values, axis=axis, name=name)
# pylint: disable=invalid-name
def _autopacking_helper(list_or_tuple, dtype, name):
"""Converts the given list or tuple to a tensor by packing.
Args:
list_or_tuple: A (possibly nested) list or tuple containing a tensor.
dtype: The element type of the returned tensor.
name: A name for the returned tensor.
Returns:
A `tf.Tensor` with value equivalent to `list_or_tuple`.
"""
must_pack = False
converted_elems = []
with ops.name_scope(name) as scope:
for i, elem in enumerate(list_or_tuple):
if ops.is_dense_tensor_like(elem):
if dtype is not None and elem.dtype.base_dtype != dtype:
raise TypeError("Cannot convert a list containing a tensor of dtype "
"%s to %s (Tensor is: %r)" % (elem.dtype, dtype,
elem))
converted_elems.append(elem)
must_pack = True
elif isinstance(elem, (list, tuple)):
converted_elem = _autopacking_helper(elem, dtype, str(i))
if ops.is_dense_tensor_like(converted_elem):
must_pack = True
converted_elems.append(converted_elem)
else:
converted_elems.append(elem)
if must_pack:
elems_as_tensors = []
for i, elem in enumerate(converted_elems):
if ops.is_dense_tensor_like(elem):
elems_as_tensors.append(elem)
else:
# NOTE(mrry): This is inefficient, but it enables us to
# handle the case where the list arguments are other
# convertible-to-tensor types, such as numpy arrays.
elems_as_tensors.append(
constant_op.constant(elem, dtype=dtype, name=str(i)))
return gen_array_ops._pack(elems_as_tensors, name=scope)
else:
return converted_elems
def _get_dtype_from_nested_lists(list_or_tuple):
"""Returns the dtype of any tensor-like object in `list_or_tuple`, if found.
Args:
list_or_tuple: A list or tuple representing an object that can be
converted to a `tf.Tensor`.
Returns:
The dtype of any tensor-like object in `list_or_tuple`, or `None` if no
such object exists.
"""
for elem in list_or_tuple:
if ops.is_dense_tensor_like(elem):
return elem.dtype.base_dtype
elif isinstance(elem, (list, tuple)):
maybe_dtype = _get_dtype_from_nested_lists(elem)
if maybe_dtype is not None:
return maybe_dtype
return None
def _autopacking_conversion_function(v, dtype=None, name=None, as_ref=False):
"""Tensor conversion function that automatically packs arguments."""
if as_ref:
return NotImplemented
inferred_dtype = _get_dtype_from_nested_lists(v)
if inferred_dtype is None:
# We did not find any tensor-like objects in the nested lists, so defer to
# other conversion functions.
return NotImplemented
if dtype is not None and dtype != inferred_dtype:
return NotImplemented
return _autopacking_helper(v, inferred_dtype, name or "packed")
# pylint: enable=invalid-name
# NOTE: Register this conversion function to run *before* one that
# assumes every element is a value.
ops.register_tensor_conversion_function((list, tuple),
_autopacking_conversion_function, 99)
@tf_export("unstack")
def unstack(value, num=None, axis=0, name="unstack"):
"""Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors.
Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
If `num` is not specified (the default), it is inferred from `value`'s shape.
If `value.shape[axis]` is not known, `ValueError` is raised.
For example, given a tensor of shape `(A, B, C, D)`;
If `axis == 0` then the i'th tensor in `output` is the slice
`value[i, :, :, :]` and each tensor in `output` will have shape `(B, C, D)`.
(Note that the dimension unpacked along is gone, unlike `split`).
If `axis == 1` then the i'th tensor in `output` is the slice
`value[:, i, :, :]` and each tensor in `output` will have shape `(A, C, D)`.
Etc.
This is the opposite of stack. The numpy equivalent is
tf.unstack(x, n) = np.unstack(x)
Args:
value: A rank `R > 0` `Tensor` to be unstacked.
num: An `int`. The length of the dimension `axis`. Automatically inferred
if `None` (the default).
axis: An `int`. The axis to unstack along. Defaults to the first
dimension. Negative values wrap around, so the valid range is `[-R, R)`.
name: A name for the operation (optional).
Returns:
The list of `Tensor` objects unstacked from `value`.
Raises:
ValueError: If `num` is unspecified and cannot be inferred.
ValueError: If `axis` is out of the range [-R, R).
"""
if num is None:
value = ops.convert_to_tensor(value)
value_shape = value.get_shape()
if value_shape.ndims is not None:
if axis < -value_shape.ndims or axis >= value_shape.ndims:
raise ValueError("axis = %d not in [%d, %d)" %
(axis, -value_shape.ndims, value_shape.ndims))
num = value_shape[axis].value
if num is None:
raise ValueError("Cannot infer num from shape %s" % value_shape)
return gen_array_ops._unpack(value, num=num, axis=axis, name=name)
@tf_export("concat")
def concat(values, axis, name="concat"):
"""Concatenates tensors along one dimension.
Concatenates the list of tensors `values` along dimension `axis`. If
`values[i].shape = [D0, D1, ... Daxis(i), ...Dn]`, the concatenated
result has shape
[D0, D1, ... Raxis, ...Dn]
where
Raxis = sum(Daxis(i))
That is, the data from the input tensors is joined along the `axis`
dimension.
The number of dimensions of the input tensors must match, and all dimensions
except `axis` must be equal.
For example:
```python
t1 = [[1, 2, 3], [4, 5, 6]]
t2 = [[7, 8, 9], [10, 11, 12]]
tf.concat([t1, t2], 0) # [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tf.concat([t1, t2], 1) # [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]]
# tensor t3 with shape [2, 3]
# tensor t4 with shape [2, 3]
tf.shape(tf.concat([t3, t4], 0)) # [4, 3]
tf.shape(tf.concat([t3, t4], 1)) # [2, 6]
```
As in Python, the `axis` could also be negative numbers. Negative `axis`
are interpreted as counting from the end of the rank, i.e.,
`axis + rank(values)`-th dimension.
For example:
```python
t1 = [[[1, 2], [2, 3]], [[4, 4], [5, 3]]]
t2 = [[[7, 4], [8, 4]], [[2, 10], [15, 11]]]
tf.concat([t1, t2], -1)
```
would produce:
```python
[[[ 1, 2, 7, 4],
[ 2, 3, 8, 4]],
[[ 4, 4, 2, 10],
[ 5, 3, 15, 11]]]
```
Note: If you are concatenating along a new axis consider using stack.
E.g.
```python
tf.concat([tf.expand_dims(t, axis) for t in tensors], axis)
```
can be rewritten as
```python
tf.stack(tensors, axis=axis)
```
Args:
values: A list of `Tensor` objects or a single `Tensor`.
axis: 0-D `int32` `Tensor`. Dimension along which to concatenate. Must be
in the range `[-rank(values), rank(values))`. As in Python, indexing
for axis is 0-based. Positive axis in the rage of
`[0, rank(values))` refers to `axis`-th dimension. And negative axis
refers to `axis + rank(values)`-th dimension.
name: A name for the operation (optional).
Returns:
A `Tensor` resulting from concatenation of the input tensors.
"""
if not isinstance(values, (list, tuple)):
values = [values]
# TODO(mrry): Change to return values?
if len(values) == 1: # Degenerate case of one tensor.
# Make a throwaway call to convert_to_tensor to make sure
# that axis is of the correct type, and make sure that
# the returned tensor is a scalar.
# TODO(keveman): Implement a standalone type and shape checker.
with ops.name_scope(name) as scope:
ops.convert_to_tensor(
axis, name="concat_dim",
dtype=dtypes.int32).get_shape().assert_is_compatible_with(
tensor_shape.scalar())
return identity(values[0], name=scope)
return gen_array_ops._concat_v2(values=values, axis=axis, name=name)
@tf_export("boolean_mask")
def boolean_mask(tensor, mask, name="boolean_mask", axis=None):
"""Apply boolean mask to tensor. Numpy equivalent is `tensor[mask]`.
```python
# 1-D example
tensor = [0, 1, 2, 3]
mask = np.array([True, False, True, False])
boolean_mask(tensor, mask) # [0, 2]
```
In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match
the first K dimensions of `tensor`'s shape. We then have:
`boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`
where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).
The `axis` could be used with `mask` to indicate the axis to mask from.
In that case, `axis + dim(mask) <= dim(tensor)` and `mask`'s shape must match
the first `axis + dim(mask)` dimensions of `tensor`'s shape.
Args:
tensor: N-D tensor.
mask: K-D boolean tensor, K <= N and K must be known statically.
name: A name for this operation (optional).
axis: A 0-D int Tensor representing the axis in `tensor` to mask from.
By default, axis is 0 which will mask from the first dimension. Otherwise
K + axis <= N.
Returns:
(N-K+1)-dimensional tensor populated by entries in `tensor` corresponding
to `True` values in `mask`.
Raises:
ValueError: If shapes do not conform.
Examples:
```python
# 2-D example
tensor = [[1, 2], [3, 4], [5, 6]]
mask = np.array([True, False, True])
boolean_mask(tensor, mask) # [[1, 2], [5, 6]]
```
"""
def _apply_mask_1d(reshaped_tensor, mask, axis=None):
"""Mask tensor along dimension 0 with a 1-D mask."""
indices = squeeze(where(mask), squeeze_dims=[1])
return gather(reshaped_tensor, indices, axis=axis)
with ops.name_scope(name, values=[tensor, mask]):
tensor = ops.convert_to_tensor(tensor, name="tensor")
mask = ops.convert_to_tensor(mask, name="mask")
shape_mask = mask.get_shape()
ndims_mask = shape_mask.ndims
shape_tensor = tensor.get_shape()
if ndims_mask == 0:
raise ValueError("mask cannot be scalar.")
if ndims_mask is None:
raise ValueError(
"Number of mask dimensions must be specified, even if some dimensions"
" are None. E.g. shape=[None] is ok, but shape=None is not.")
axis = 0 if axis is None else axis
shape_tensor[axis:axis + ndims_mask].assert_is_compatible_with(shape_mask)
leading_size = gen_math_ops._prod(
shape(tensor)[axis:axis + ndims_mask], [0])
tensor = reshape(tensor,
concat([
shape(tensor)[:axis], [leading_size],
shape(tensor)[axis + ndims_mask:]
], 0))
first_dim = shape_tensor[axis:axis + ndims_mask].num_elements()
tensor.set_shape(
tensor_shape.as_shape(shape_tensor[:axis]).concatenate([first_dim])
.concatenate(shape_tensor[axis + ndims_mask:]))
mask = reshape(mask, [-1])
return _apply_mask_1d(tensor, mask, axis)
@tf_export("sparse_mask")
def sparse_mask(a, mask_indices, name=None):
"""Masks elements of `IndexedSlices`.
Given an `IndexedSlices` instance `a`, returns another `IndexedSlices` that
contains a subset of the slices of `a`. Only the slices at indices not
specified in `mask_indices` are returned.
This is useful when you need to extract a subset of slices in an
`IndexedSlices` object.
For example:
```python
# `a` contains slices at indices [12, 26, 37, 45] from a large tensor
# with shape [1000, 10]
a.indices # [12, 26, 37, 45]
tf.shape(a.values) # [4, 10]
# `b` will be the subset of `a` slices at its second and third indices, so
# we want to mask its first and last indices (which are at absolute
# indices 12, 45)
b = tf.sparse_mask(a, [12, 45])
b.indices # [26, 37]
tf.shape(b.values) # [2, 10]
```
Args:
a: An `IndexedSlices` instance.
mask_indices: Indices of elements to mask.
name: A name for the operation (optional).
Returns:
The masked `IndexedSlices` instance.
"""
with ops.name_scope(name, "sparse_mask", [a, mask_indices]) as name:
indices = a.indices
out_indices, to_gather = setdiff1d(indices, mask_indices)
out_values = gather(a.values, to_gather, name=name)
return ops.IndexedSlices(out_values, out_indices, a.dense_shape)
@tf_export("unique")
def unique(x, out_idx=dtypes.int32, name=None):
# TODO(yongtang): switch to v2 once API deprecation
# period (3 weeks) pass.
# TODO(yongtang): The documentation should also
# be updated when switch to v2.
return gen_array_ops._unique(x, out_idx, name)
unique.__doc__ = gen_array_ops._unique.__doc__
@tf_export("unique_with_counts")
def unique_with_counts(x, out_idx=dtypes.int32, name=None):
# TODO(yongtang): switch to v2 once API deprecation
# period (3 weeks) pass.
# TODO(yongtang): The documentation should also
# be updated when switch to v2.
return gen_array_ops._unique_with_counts(x, out_idx, name)
unique_with_counts.__doc__ = gen_array_ops._unique_with_counts.__doc__
@tf_export("split")
def split(value, num_or_size_splits, axis=0, num=None, name="split"):
"""Splits a tensor into sub tensors.
If `num_or_size_splits` is an integer type, `num_split`, then splits `value`
along dimension `axis` into `num_split` smaller tensors.
Requires that `num_split` evenly divides `value.shape[axis]`.
If `num_or_size_splits` is not an integer type, it is presumed to be a Tensor
`size_splits`, then splits `value` into `len(size_splits)` pieces. The shape
of the `i`-th piece has the same size as the `value` except along dimension
`axis` where the size is `size_splits[i]`.
For example:
```python
# 'value' is a tensor with shape [5, 30]
# Split 'value' into 3 tensors with sizes [4, 15, 11] along dimension 1
split0, split1, split2 = tf.split(value, [4, 15, 11], 1)
tf.shape(split0) # [5, 4]
tf.shape(split1) # [5, 15]
tf.shape(split2) # [5, 11]
# Split 'value' into 3 tensors along dimension 1
split0, split1, split2 = tf.split(value, num_or_size_splits=3, axis=1)
tf.shape(split0) # [5, 10]
```
Args:
value: The `Tensor` to split.
num_or_size_splits: Either a 0-D integer `Tensor` indicating the number of
splits along split_dim or a 1-D integer `Tensor` containing
the sizes of each output tensor along split_dim. If a scalar then it must
evenly divide `value.shape[axis]`; otherwise the sum of sizes along the
split dimension must match that of the `value`.
axis: A 0-D `int32` `Tensor`. The dimension along which to split.
Must be in the range `[-rank(value), rank(value))`. Defaults to 0.
num: Optional, used to specify the number of outputs when it cannot be
inferred from the shape of `size_splits`.
name: A name for the operation (optional).
Returns:
if `num_or_size_splits` is a scalar returns `num_or_size_splits` `Tensor`
objects; if `num_or_size_splits` is a 1-D Tensor returns
`num_or_size_splits.get_shape[0]` `Tensor` objects resulting from splitting
`value`.
Raises:
ValueError: If `num` is unspecified and cannot be inferred.
"""
size_splits = ops.convert_to_tensor(num_or_size_splits)
if size_splits._rank() == 0 and size_splits.dtype.is_integer:
return gen_array_ops._split(
axis=axis, num_split=num_or_size_splits, value=value, name=name)
if num is None:
num = size_splits._shape_tuple()[0]
if num is None:
raise ValueError("Cannot infer num from shape %s" % num_or_size_splits)
return gen_array_ops._split_v(
value=value,
size_splits=size_splits,
axis=axis,
num_split=num,
name=name)
@tf_export("transpose")
def transpose(a, perm=None, name="transpose", conjugate=False):
"""Transposes `a`. Permutes the dimensions according to `perm`.
The returned tensor's dimension i will correspond to the input dimension
`perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
the rank of the input tensor. Hence by default, this operation performs a
regular matrix transpose on 2-D input Tensors. If conjugate is True and
`a.dtype` is either `complex64` or `complex128` then the values of `a`
are conjugated and transposed.
@compatibility(numpy)
In `numpy` transposes are memory-efficient constant time operations as they
simply return a new view of the same data with adjusted `strides`.
TensorFlow does not support strides, so `transpose` returns a new tensor with
the items permuted.
@end_compatibility
For example:
```python
x = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.transpose(x) # [[1, 4]
# [2, 5]
# [3, 6]]
# Equivalently
tf.transpose(x, perm=[1, 0]) # [[1, 4]
# [2, 5]
# [3, 6]]
# If x is complex, setting conjugate=True gives the conjugate transpose
x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
[4 + 4j, 5 + 5j, 6 + 6j]])
tf.transpose(x, conjugate=True) # [[1 - 1j, 4 - 4j],
# [2 - 2j, 5 - 5j],
# [3 - 3j, 6 - 6j]]
# 'perm' is more useful for n-dimensional tensors, for n > 2
x = tf.constant([[[ 1, 2, 3],
[ 4, 5, 6]],
[[ 7, 8, 9],
[10, 11, 12]]])
# Take the transpose of the matrices in dimension-0
# (this common operation has a shorthand `matrix_transpose`)
tf.transpose(x, perm=[0, 2, 1]) # [[[1, 4],
# [2, 5],
# [3, 6]],
# [[7, 10],
# [8, 11],
# [9, 12]]]
```
Args:
a: A `Tensor`.
perm: A permutation of the dimensions of `a`.
name: A name for the operation (optional).
conjugate: Optional bool. Setting it to `True` is mathematically equivalent
to tf.conj(tf.transpose(input)).
Returns:
A transposed `Tensor`.
"""
with ops.name_scope(name, "transpose", [a]) as name:
transpose_fn = (
gen_array_ops._conjugate_transpose
if (conjugate and a.dtype.is_complex) else gen_array_ops.transpose)
if perm is None:
rank = gen_array_ops.rank(a)
perm = (rank - 1) - gen_math_ops._range(0, rank, 1)
ret = transpose_fn(a, perm, name=name)
# NOTE(mrry): Setting the shape explicitly because
# reverse is not handled by the shape function.
if context.in_graph_mode():
input_shape = ret.op.inputs[0].get_shape().dims
if input_shape is not None:
ret.set_shape(input_shape[::-1])
else:
ret = transpose_fn(a, perm, name=name)
return ret
# pylint: disable=invalid-name
@tf_export("matrix_transpose", "linalg.transpose")
def matrix_transpose(a, name="matrix_transpose", conjugate=False):
"""Transposes last two dimensions of tensor `a`.
For example:
```python
x = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.matrix_transpose(x) # [[1, 4],
# [2, 5],
# [3, 6]]
x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
[4 + 4j, 5 + 5j, 6 + 6j]])
tf.matrix_transpose(x, conjugate=True) # [[1 - 1j, 4 - 4j],
# [2 - 2j, 5 - 5j],
# [3 - 3j, 6 - 6j]]
# Matrix with two batch dimensions.
# x.shape is [1, 2, 3, 4]
# tf.matrix_transpose(x) is shape [1, 2, 4, 3]
```
Note that `tf.matmul` provides kwargs allowing for transpose of arguments.
This is done with minimal cost, and is preferable to using this function. E.g.
```python
# Good! Transpose is taken at minimal additional cost.
tf.matmul(matrix, b, transpose_b=True)
# Inefficient!
tf.matmul(matrix, tf.matrix_transpose(b))
```
@compatibility(numpy)
In `numpy` transposes are memory-efficient constant time operations as they
simply return a new view of the same data with adjusted `strides`.
TensorFlow does not support strides, `matrix_transposes` return a new tensor
with the items permuted.
@end_compatibility
Args:
a: A `Tensor` with `rank >= 2`.
name: A name for the operation (optional).
conjugate: Optional bool. Setting it to `True` is mathematically equivalent
to tf.conj(tf.matrix_transpose(input)).
Returns:
A transposed batch matrix `Tensor`.
Raises:
ValueError: If `a` is determined statically to have `rank < 2`.
"""
with ops.name_scope(name, values=[a]):
a = ops.convert_to_tensor(a, name="a")
# If we know the number of dimensions (statically), we can do two things:
# 1. Check that `a` is a (batch) matrix.
# 2. Use a python list for perm. This preserves static shape information
# and avoids extra computations.
a_shape = a.get_shape()
ndims = a_shape.ndims
if ndims is not None:
if ndims < 2:
raise ValueError(
"Argument 'a' should be a (batch) matrix, with rank >= 2. Found: "
"%s" % a_shape)
perm = list(range(ndims - 2)) + [ndims - 1] + [ndims - 2]
else:
a_rank = rank(a)
perm = concat((gen_math_ops._range(0, a_rank - 2, 1),
[a_rank - 1, a_rank - 2]), 0)
return transpose(a, perm=perm, conjugate=conjugate)
# pylint: enable=invalid-name
@tf_export("zeros")
def zeros(shape, dtype=dtypes.float32, name=None):
"""Creates a tensor with all elements set to zero.
This operation returns a tensor of type `dtype` with shape `shape` and
all elements set to zero.
For example:
```python
tf.zeros([3, 4], tf.int32) # [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
```
Args:
shape: A list of integers, a tuple of integers, or a 1-D `Tensor` of type
`int32`.
dtype: The type of an element in the resulting `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to zero.
"""
dtype = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "zeros", [shape]) as name:
if dtype == dtypes.bool:
zero = False
elif dtype == dtypes.string:
zero = ""
else:
zero = 0
if not isinstance(shape, ops.Tensor):
try:
# Go through tensor shapes to get int64-if-needed semantics
shape = constant_op._tensor_shape_tensor_conversion_function(
tensor_shape.TensorShape(shape))
except (TypeError, ValueError):
# Happens when shape is a list with tensor elements
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)
if not shape._shape_tuple():
shape = reshape(shape, [-1]) # Ensure it's a vector
output = fill(shape, constant(zero, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtype
return output
@tf_export("zeros_like")
def zeros_like(tensor, dtype=None, name=None, optimize=True):
"""Creates a tensor with all elements set to zero.
Given a single tensor (`tensor`), this operation returns a tensor of the
same type and shape as `tensor` with all elements set to zero. Optionally,
you can use `dtype` to specify a new type for the returned tensor.
For example:
```python
tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.zeros_like(tensor) # [[0, 0, 0], [0, 0, 0]]
```
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,
`float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,
`complex64`, `complex128`, `bool` or `string`.
name: A name for the operation (optional).
optimize: if true, attempt to statically determine the shape of 'tensor'
and encode it as a constant.
Returns:
A `Tensor` with all elements set to zero.
"""
with ops.name_scope(name, "zeros_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
if context.in_eager_mode():
if dtype is not None and dtype != tensor.dtype:
return zeros(
shape_internal(tensor, optimize=optimize), dtype=dtype, name=name)
with ops.device(tensor.device):
return gen_array_ops._zeros_like(tensor, name=name)
# For now, variant types must be created via zeros_like; as we need to
# pass the input variant object to the proper zeros callback.
if (optimize and tensor.shape.is_fully_defined() and
tensor.dtype != dtypes.variant):
# We can produce a zeros tensor independent of the value of 'tensor',
# since the shape is known statically.
return zeros(tensor.shape, dtype=dtype or tensor.dtype, name=name)
if dtype is not None and dtype != tensor.dtype and dtype != dtypes.variant:
return zeros(
shape_internal(tensor, optimize=optimize), dtype=dtype, name=name)
else:
return gen_array_ops._zeros_like(tensor, name=name)
@tf_export("ones_like")
def ones_like(tensor, dtype=None, name=None, optimize=True):
"""Creates a tensor with all elements set to 1.
Given a single tensor (`tensor`), this operation returns a tensor of the same
type and shape as `tensor` with all elements set to 1. Optionally, you can
specify a new type (`dtype`) for the returned tensor.
For example:
```python
tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.ones_like(tensor) # [[1, 1, 1], [1, 1, 1]]
```
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
`int8`, `uint8`, `int16`, `uint16`, int32`, `int64`,
`complex64`, `complex128` or `bool`.
name: A name for the operation (optional).
optimize: if true, attempt to statically determine the shape of 'tensor'
and encode it as a constant.
Returns:
A `Tensor` with all elements set to 1.
"""
with ops.name_scope(name, "ones_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
ones_shape = shape_internal(tensor, optimize=optimize)
if dtype is None:
dtype = tensor.dtype
ret = ones(ones_shape, dtype=dtype, name=name)
if context.in_graph_mode():
ret.set_shape(tensor.get_shape())
return ret
@tf_export("ones")
def ones(shape, dtype=dtypes.float32, name=None):
"""Creates a tensor with all elements set to 1.
This operation returns a tensor of type `dtype` with shape `shape` and all
elements set to 1.
For example:
```python
tf.ones([2, 3], tf.int32) # [[1, 1, 1], [1, 1, 1]]
```
Args:
shape: A list of integers, a tuple of integers, or a 1-D `Tensor` of type
`int32`.
dtype: The type of an element in the resulting `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to 1.
"""
dtype = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "ones", [shape]) as name:
one = True if dtype == dtypes.bool else 1
if not isinstance(shape, ops.Tensor):
try:
# Go through tensor shapes to get int64-if-needed semantics
shape = constant_op._tensor_shape_tensor_conversion_function(
tensor_shape.TensorShape(shape))
except (TypeError, ValueError):
# Happens when shape is a list with tensor elements
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)
if not shape._shape_tuple():
shape = reshape(shape, [-1]) # Ensure it's a vector
output = fill(shape, constant(one, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtype
return output
@tf_export("placeholder")
def placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a tensor that will be always fed.
**Important**: This tensor will produce an error if evaluated. Its value must
be fed using the `feed_dict` optional argument to `Session.run()`,
`Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.placeholder(tf.float32, shape=(1024, 1024))
y = tf.matmul(x, x)
with tf.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
rand_array = np.random.rand(1024, 1024)
print(sess.run(y, feed_dict={x: rand_array})) # Will succeed.
```
@compatibility{eager} Placeholders are not compatible with eager execution.
Args:
dtype: The type of elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a tensor of any shape.
name: A name for the operation (optional).
Returns:
A `Tensor` that may be used as a handle for feeding a value, but not
evaluated directly.
Raises:
RuntimeError: if eager execution is enabled
"""
if context.in_eager_mode():
raise RuntimeError("tf.placeholder() is not compatible with "
"eager execution.")
return gen_array_ops._placeholder(dtype=dtype, shape=shape, name=name)
# pylint: disable=redefined-outer-name
def _normalize_sparse_shape(shape, name):
"""Returns a tuple of (Tensor or None, rank or None)."""
if shape is None:
return (None, None)
rank = shape.get_shape()[0] if isinstance(shape, ops.Tensor) else len(shape)
if not isinstance(shape, ops.Tensor) and None in shape:
return (None, rank)
return (ops.convert_to_tensor(shape, dtype=dtypes.int64, name=name), rank)
@tf_export("sparse_placeholder")
def sparse_placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a sparse tensor that will be always fed.
**Important**: This sparse tensor will produce an error if evaluated.
Its value must be fed using the `feed_dict` optional argument to
`Session.run()`, `Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.sparse_placeholder(tf.float32)
y = tf.sparse_reduce_sum(x)
with tf.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
indices = np.array([[3, 2, 0], [4, 5, 1]], dtype=np.int64)
values = np.array([1.0, 2.0], dtype=np.float32)
shape = np.array([7, 9, 2], dtype=np.int64)
print(sess.run(y, feed_dict={
x: tf.SparseTensorValue(indices, values, shape)})) # Will succeed.
print(sess.run(y, feed_dict={
x: (indices, values, shape)})) # Will succeed.
sp = tf.SparseTensor(indices=indices, values=values, dense_shape=shape)
sp_value = sp.eval(session=sess)
print(sess.run(y, feed_dict={x: sp_value})) # Will succeed.
```
@compatibility{eager} Placeholders are not compatible with eager execution.
Args:
dtype: The type of `values` elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a sparse tensor of any shape.
name: A name for prefixing the operations (optional).
Returns:
A `SparseTensor` that may be used as a handle for feeding a value, but not
evaluated directly.
Raises:
RuntimeError: if eager execution is enabled
"""
if context.in_eager_mode():
raise RuntimeError("tf.placeholder() is not compatible with "
"eager execution.")
shape_name = (name + "/shape") if name is not None else None
shape, rank = _normalize_sparse_shape(shape, shape_name)
if shape is None:
shape = placeholder(dtypes.int64, shape=[rank], name=shape_name)
return sparse_tensor.SparseTensor(
values=placeholder(
dtype,
shape=[None],
name=(name + "/values") if name is not None else None),
indices=placeholder(
dtypes.int64, shape=[None, rank],
name=(name + "/indices") if name is not None else None),
dense_shape=shape)
# pylint: enable=redefined-outer-name
@tf_export("pad")
def pad(tensor, paddings, mode="CONSTANT", name=None, constant_values=0): # pylint: disable=invalid-name
"""Pads a tensor.
This operation pads a `tensor` according to the `paddings` you specify.
`paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of
`tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how
many values to add before the contents of `tensor` in that dimension, and
`paddings[D, 1]` indicates how many values to add after the contents of
`tensor` in that dimension. If `mode` is "REFLECT" then both `paddings[D, 0]`
and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If
`mode` is "SYMMETRIC" then both `paddings[D, 0]` and `paddings[D, 1]` must be
no greater than `tensor.dim_size(D)`.
The padded size of each dimension D of the output is:
`paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`
For example:
```python
t = tf.constant([[1, 2, 3], [4, 5, 6]])
paddings = tf.constant([[1, 1,], [2, 2]])
# 'constant_values' is 0.
# rank of 't' is 2.
tf.pad(t, paddings, "CONSTANT") # [[0, 0, 0, 0, 0, 0, 0],
# [0, 0, 1, 2, 3, 0, 0],
# [0, 0, 4, 5, 6, 0, 0],
# [0, 0, 0, 0, 0, 0, 0]]
tf.pad(t, paddings, "REFLECT") # [[6, 5, 4, 5, 6, 5, 4],
# [3, 2, 1, 2, 3, 2, 1],
# [6, 5, 4, 5, 6, 5, 4],
# [3, 2, 1, 2, 3, 2, 1]]
tf.pad(t, paddings, "SYMMETRIC") # [[2, 1, 1, 2, 3, 3, 2],
# [2, 1, 1, 2, 3, 3, 2],
# [5, 4, 4, 5, 6, 6, 5],
# [5, 4, 4, 5, 6, 6, 5]]
```
Args:
tensor: A `Tensor`.
paddings: A `Tensor` of type `int32`.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive)
name: A name for the operation (optional).
constant_values: In "CONSTANT" mode, the scalar pad value to use. Must be
same type as `tensor`.
Returns:
A `Tensor`. Has the same type as `tensor`.
Raises:
ValueError: When mode is not one of "CONSTANT", "REFLECT", or "SYMMETRIC".
"""
# Convert lower/mixed case to upper for NumPy compatibility
# NumPy uses all lower-case modes.
mode = mode.upper()
if mode == "CONSTANT":
# TODO(rjryan): Once the forward compatibility period (3 weeks) have passed
# remove the "Pad" fallback here.
if constant_values != 0:
result = gen_array_ops._pad_v2(
tensor, paddings, constant_values, name=name)
else:
result = gen_array_ops._pad(tensor, paddings, name=name)
elif mode == "REFLECT":
result = gen_array_ops._mirror_pad(
tensor, paddings, mode="REFLECT", name=name)
elif mode == "SYMMETRIC":
result = gen_array_ops._mirror_pad(
tensor, paddings, mode="SYMMETRIC", name=name)
else:
raise ValueError("Unknown padding mode: %s" % mode)
# Restore shape information where possible.
if context.in_graph_mode():
paddings_constant = tensor_util.constant_value(
result.op.inputs[1], partial=True)
input_shape = result.op.inputs[0].shape
if (input_shape.ndims is not None and not result.shape.is_fully_defined()
and paddings_constant is not None):
new_shape = []
for padding, dim in zip(paddings_constant, input_shape.as_list()):
if padding is None or dim is None or not all(padding):
new_shape.append(None)
else:
new_shape.append(sum(padding) + dim)
result.set_shape(new_shape)
return result
@tf_export("meshgrid")
def meshgrid(*args, **kwargs):
"""Broadcasts parameters for evaluation on an N-D grid.
Given N one-dimensional coordinate arrays `*args`, returns a list `outputs`
of N-D coordinate arrays for evaluating expressions on an N-D grid.
Notes:
`meshgrid` supports cartesian ('xy') and matrix ('ij') indexing conventions.
When the `indexing` argument is set to 'xy' (the default), the broadcasting
instructions for the first two dimensions are swapped.
Examples:
Calling `X, Y = meshgrid(x, y)` with the tensors
```python
x = [1, 2, 3]
y = [4, 5, 6]
X, Y = tf.meshgrid(x, y)
# X = [[1, 2, 3],
# [1, 2, 3],
# [1, 2, 3]]
# Y = [[4, 4, 4],
# [5, 5, 5],
# [6, 6, 6]]
```
Args:
*args: `Tensor`s with rank 1.
**kwargs:
- indexing: Either 'xy' or 'ij' (optional, default: 'xy').
- name: A name for the operation (optional).
Returns:
outputs: A list of N `Tensor`s with rank N.
Raises:
TypeError: When no keyword arguments (kwargs) are passed.
ValueError: When indexing keyword argument is not one of `xy` or `ij`.
"""
indexing = kwargs.pop("indexing", "xy")
name = kwargs.pop("name", "meshgrid")
if kwargs:
key = list(kwargs.keys())[0]
raise TypeError("'{}' is an invalid keyword argument "
"for this function".format(key))
if indexing not in ("xy", "ij"):
raise ValueError("indexing parameter must be either 'xy' or 'ij'")
with ops.name_scope(name, "meshgrid", args) as name:
ndim = len(args)
s0 = (1,) * ndim
# Prepare reshape by inserting dimensions with size 1 where needed
output = []
for i, x in enumerate(args):
output.append(reshape(stack(x), (s0[:i] + (-1,) + s0[i + 1::])))
# Create parameters for broadcasting each tensor to the full size
shapes = [size(x) for x in args]
output_dtype = ops.convert_to_tensor(args[0]).dtype.base_dtype
if indexing == "xy" and ndim > 1:
output[0] = reshape(output[0], (1, -1) + (1,) * (ndim - 2))
output[1] = reshape(output[1], (-1, 1) + (1,) * (ndim - 2))
shapes[0], shapes[1] = shapes[1], shapes[0]
# TODO(nolivia): improve performance with a broadcast
mult_fact = ones(shapes, output_dtype)
return [x * mult_fact for x in output]
NEW_AXIS = -1
SHRINK_AXIS = -2
# PEP-8 naming
# pylint: disable=invalid-name,redefined-outer-name
def _compute_size_of_strided_dim(shrink, spec, size):
"""Computes the size of a single strided slice dimension."""
unknown = None # Document what None means here.
use_full_range = None # Document other use of None.
# if this is a shrink axis (i.e. a non-range index)
# it either will produce an error or return 1
if shrink:
return 1
if size is unknown or size.value is unknown:
return unknown
size = size.value
stride = spec.step
if stride is not unknown:
if stride == 0:
return unknown
stride = spec.step
valid_range = [0, size] if stride > 0 else [-1, size - 1]
# PEP-8 naming
# pylint: disable=invalid-name
def canonical(x, c):
if x is use_full_range:
return valid_range[c] if stride > 0 else valid_range[(c + 1) & 1]
else:
x_fwd = size + x if x < 0 else x # make negative indices positive
return max(valid_range[0], min(valid_range[1], x_fwd))
begin = canonical(spec.start, 0)
end = canonical(spec.stop, 1)
interval_length = end - begin
if interval_length == 0 or ((interval_length < 0) != (stride < 0)):
return 0
else:
remainder = 1 if interval_length % stride != 0 else 0
return interval_length // stride + remainder
else:
return unknown # unknown because stride is unknown
def _TileGradShape(op):
"""Shape function for the TileGrad op."""
multiples_shape = op.inputs[1].get_shape().with_rank(1)
input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0])
# NOTE(mrry): Represent `multiples` as a `TensorShape` because (i)
# it is a vector of non-negative integers, and (ii) doing so allows
# us to handle partially-known multiples.
multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank(
input_shape.ndims)
if multiples.ndims is None:
return [tensor_shape.unknown_shape()]
else:
output_dims = []
for dim, multiple in zip(input_shape.dims, multiples.dims):
output_dims.append(dim // multiple)
return [tensor_shape.TensorShape(output_dims)]
@tf_export("edit_distance")
def edit_distance(hypothesis, truth, normalize=True, name="edit_distance"):
"""Computes the Levenshtein distance between sequences.
This operation takes variable-length sequences (`hypothesis` and `truth`),
each provided as a `SparseTensor`, and computes the Levenshtein distance.
You can normalize the edit distance by length of `truth` by setting
`normalize` to true.
For example, given the following input:
```python
# 'hypothesis' is a tensor of shape `[2, 1]` with variable-length values:
# (0,0) = ["a"]
# (1,0) = ["b"]
hypothesis = tf.SparseTensor(
[[0, 0, 0],
[1, 0, 0]],
["a", "b"],
(2, 1, 1))
# 'truth' is a tensor of shape `[2, 2]` with variable-length values:
# (0,0) = []
# (0,1) = ["a"]
# (1,0) = ["b", "c"]
# (1,1) = ["a"]
truth = tf.SparseTensor(
[[0, 1, 0],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0]],
["a", "b", "c", "a"],
(2, 2, 2))
normalize = True
```
This operation would return the following:
```python
# 'output' is a tensor of shape `[2, 2]` with edit distances normalized
# by 'truth' lengths.
output ==> [[inf, 1.0], # (0,0): no truth, (0,1): no hypothesis
[0.5, 1.0]] # (1,0): addition, (1,1): no hypothesis
```
Args:
hypothesis: A `SparseTensor` containing hypothesis sequences.
truth: A `SparseTensor` containing truth sequences.
normalize: A `bool`. If `True`, normalizes the Levenshtein distance by
length of `truth.`
name: A name for the operation (optional).
Returns:
A dense `Tensor` with rank `R - 1`, where R is the rank of the
`SparseTensor` inputs `hypothesis` and `truth`.
Raises:
TypeError: If either `hypothesis` or `truth` are not a `SparseTensor`.
"""
if not isinstance(hypothesis, (sparse_tensor.SparseTensor,
sparse_tensor.SparseTensorValue)):
raise TypeError("Hypothesis must be a SparseTensor.")
if not isinstance(truth, (sparse_tensor.SparseTensor,
sparse_tensor.SparseTensorValue)):
raise TypeError("Truth must be a SparseTensor.")
return gen_array_ops._edit_distance(
hypothesis.indices,
hypothesis.values,
hypothesis.dense_shape,
truth.indices,
truth.values,
truth.dense_shape,
normalize=normalize,
name=name)
@ops.RegisterGradient("FakeQuantWithMinMaxArgs")
def _FakeQuantWithMinMaxArgsGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxArgs op."""
return fake_quant_with_min_max_args_gradient(
grad,
op.inputs[0],
min=op.get_attr("min"),
max=op.get_attr("max"),
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
@ops.RegisterGradient("FakeQuantWithMinMaxVars")
def _FakeQuantWithMinMaxVarsGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxVars op."""
return fake_quant_with_min_max_vars_gradient(
grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
@ops.RegisterGradient("FakeQuantWithMinMaxVarsPerChannel")
def _FakeQuantWithMinMaxVarsPerChannelGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxVarsPerChannel op."""
return fake_quant_with_min_max_vars_per_channel_gradient(
grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
@tf_export("required_space_to_batch_paddings")
def required_space_to_batch_paddings(input_shape,
block_shape,
base_paddings=None,
name=None):
"""Calculate padding required to make block_shape divide input_shape.
This function can be used to calculate a suitable paddings argument for use
with space_to_batch_nd and batch_to_space_nd.
Args:
input_shape: int32 Tensor of shape [N].
block_shape: int32 Tensor of shape [N].
base_paddings: Optional int32 Tensor of shape [N, 2]. Specifies the minimum
amount of padding to use. All elements must be >= 0. If not specified,
defaults to 0.
name: string. Optional name prefix.
Returns:
(paddings, crops), where:
`paddings` and `crops` are int32 Tensors of rank 2 and shape [N, 2]
satisfying:
paddings[i, 0] = base_paddings[i, 0].
0 <= paddings[i, 1] - base_paddings[i, 1] < block_shape[i]
(input_shape[i] + paddings[i, 0] + paddings[i, 1]) % block_shape[i] == 0
crops[i, 0] = 0
crops[i, 1] = paddings[i, 1] - base_paddings[i, 1]
Raises: ValueError if called with incompatible shapes.
"""
with ops.name_scope(name, "required_space_to_batch_paddings",
[input_shape, block_shape]):
input_shape = ops.convert_to_tensor(
input_shape, dtype=dtypes.int32, name="input_shape")
block_shape = ops.convert_to_tensor(
block_shape, dtype=dtypes.int32, name="block_shape")
block_shape.get_shape().assert_is_fully_defined()
block_shape.get_shape().assert_has_rank(1)
num_block_dims = block_shape.get_shape()[0].value
if num_block_dims == 0:
return zeros([0, 2], dtypes.int32), zeros([0, 2], dtypes.int32)
input_shape.get_shape().assert_is_compatible_with([num_block_dims])
if base_paddings is not None:
base_paddings = ops.convert_to_tensor(
base_paddings, dtype=dtypes.int32, name="base_paddings")
base_paddings.get_shape().assert_is_compatible_with([num_block_dims, 2])
else:
base_paddings = zeros([num_block_dims, 2], dtypes.int32)
const_block_shape = tensor_util.constant_value(block_shape)
const_input_shape = tensor_util.constant_value(input_shape)
const_base_paddings = tensor_util.constant_value(base_paddings)
if (const_block_shape is not None and const_input_shape is not None and
const_base_paddings is not None):
block_shape = const_block_shape
input_shape = const_input_shape
base_paddings = const_base_paddings
# Use same expression for both constant and non-constant case.
pad_start = base_paddings[:, 0]
orig_pad_end = base_paddings[:, 1]
full_input_shape = input_shape + pad_start + orig_pad_end
pad_end_extra = (block_shape - full_input_shape % block_shape) % block_shape
pad_end = orig_pad_end + pad_end_extra
result_paddings = stack(
[[pad_start[i], pad_end[i]] for i in range(num_block_dims)],
name="paddings")
result_crops = stack(
[[0, pad_end_extra[i]] for i in range(num_block_dims)], name="crops")
return result_paddings, result_crops
@tf_export("space_to_batch")
def space_to_batch(input, paddings, block_size, name=None): # pylint: disable=redefined-builtin
result = space_to_batch_nd(
input,
paddings=paddings,
block_shape=np.array([block_size, block_size], dtype=np.int64),
name=name)
result.set_shape(result.get_shape().with_rank(4))
return result
space_to_batch.__doc__ = gen_array_ops._space_to_batch.__doc__
@tf_export("space_to_depth")
def space_to_depth(input, block_size, name=None, data_format="NHWC"): # pylint: disable=redefined-builtin
return gen_array_ops.space_to_depth(input, block_size, data_format, name=name)
space_to_depth.__doc__ = gen_array_ops.space_to_depth.__doc__
@tf_export("depth_to_space")
def depth_to_space(input, block_size, name=None, data_format="NHWC"): # pylint: disable=redefined-builtin
return gen_array_ops.depth_to_space(input, block_size, data_format, name=name)
depth_to_space.__doc__ = gen_array_ops.depth_to_space.__doc__
@tf_export("batch_to_space")
def batch_to_space(input, crops, block_size, name=None): # pylint: disable=redefined-builtin
result = batch_to_space_nd(
input,
crops=crops,
block_shape=np.array([block_size, block_size], dtype=np.int64),
name=name)
result.set_shape(result.get_shape().with_rank(4))
return result
batch_to_space.__doc__ = gen_array_ops._batch_to_space.__doc__
@tf_export("one_hot")
def one_hot(indices,
depth,
on_value=None,
off_value=None,
axis=None,
dtype=None,
name=None):
"""Returns a one-hot tensor.
The locations represented by indices in `indices` take value `on_value`,
while all other locations take value `off_value`.
`on_value` and `off_value` must have matching data types. If `dtype` is also
provided, they must be the same data type as specified by `dtype`.
If `on_value` is not provided, it will default to the value `1` with type
`dtype`
If `off_value` is not provided, it will default to the value `0` with type
`dtype`
If the input `indices` is rank `N`, the output will have rank `N+1`. The
new axis is created at dimension `axis` (default: the new axis is appended
at the end).
If `indices` is a scalar the output shape will be a vector of length `depth`
If `indices` is a vector of length `features`, the output shape will be:
```
features x depth if axis == -1
depth x features if axis == 0
```
If `indices` is a matrix (batch) with shape `[batch, features]`, the output
shape will be:
```
batch x features x depth if axis == -1
batch x depth x features if axis == 1
depth x batch x features if axis == 0
```
If `dtype` is not provided, it will attempt to assume the data type of
`on_value` or `off_value`, if one or both are passed in. If none of
`on_value`, `off_value`, or `dtype` are provided, `dtype` will default to the
value `tf.float32`.
Note: If a non-numeric data type output is desired (`tf.string`, `tf.bool`,
etc.), both `on_value` and `off_value` _must_ be provided to `one_hot`.
For example:
```python
indices = [0, 1, 2]
depth = 3
tf.one_hot(indices, depth) # output: [3 x 3]
# [[1., 0., 0.],
# [0., 1., 0.],
# [0., 0., 1.]]
indices = [0, 2, -1, 1]
depth = 3
tf.one_hot(indices, depth,
on_value=5.0, off_value=0.0,
axis=-1) # output: [4 x 3]
# [[5.0, 0.0, 0.0], # one_hot(0)
# [0.0, 0.0, 5.0], # one_hot(2)
# [0.0, 0.0, 0.0], # one_hot(-1)
# [0.0, 5.0, 0.0]] # one_hot(1)
indices = [[0, 2], [1, -1]]
depth = 3
tf.one_hot(indices, depth,
on_value=1.0, off_value=0.0,
axis=-1) # output: [2 x 2 x 3]
# [[[1.0, 0.0, 0.0], # one_hot(0)
# [0.0, 0.0, 1.0]], # one_hot(2)
# [[0.0, 1.0, 0.0], # one_hot(1)
# [0.0, 0.0, 0.0]]] # one_hot(-1)
```
Args:
indices: A `Tensor` of indices.
depth: A scalar defining the depth of the one hot dimension.
on_value: A scalar defining the value to fill in output when `indices[j]
= i`. (default: 1)
off_value: A scalar defining the value to fill in output when `indices[j]
!= i`. (default: 0)
axis: The axis to fill (default: -1, a new inner-most axis).
dtype: The data type of the output tensor.
name: A name for the operation (optional).
Returns:
output: The one-hot tensor.
Raises:
TypeError: If dtype of either `on_value` or `off_value` don't match `dtype`
TypeError: If dtype of `on_value` and `off_value` don't match one another
"""
with ops.name_scope(name, "one_hot",
[indices, depth, on_value, off_value, axis,
dtype]) as name:
on_exists = on_value is not None
off_exists = off_value is not None
on_dtype = (ops.convert_to_tensor(on_value).dtype.base_dtype if on_exists
else None)
off_dtype = (ops.convert_to_tensor(off_value).dtype.base_dtype if off_exists
else None)
if on_exists or off_exists:
if dtype is not None:
# Ensure provided on_value and/or off_value match dtype
if on_exists and on_dtype != dtype:
raise TypeError("dtype {0} of on_value does not match "
"dtype parameter {1}".format(on_dtype, dtype))
if off_exists and off_dtype != dtype:
raise TypeError("dtype {0} of off_value does not match "
"dtype parameter {1}".format(off_dtype, dtype))
else:
# dtype not provided: automatically assign it
dtype = on_dtype if on_exists else off_dtype
elif dtype is None:
# None of on_value, off_value, or dtype provided. Default dtype to float32
dtype = dtypes.float32
if not on_exists:
# on_value not provided: assign to value 1 of type dtype
on_value = ops.convert_to_tensor(1, dtype, name="on_value")
on_dtype = dtype
if not off_exists:
# off_value not provided: assign to value 0 of type dtype
off_value = ops.convert_to_tensor(0, dtype, name="off_value")
off_dtype = dtype
if on_dtype != off_dtype:
raise TypeError("dtype {0} of on_value does not match "
"dtype {1} of off_value".format(on_dtype, off_dtype))
return gen_array_ops._one_hot(indices, depth, on_value, off_value, axis,
name)
def _all_dimensions(x):
"""Returns a 1D-tensor listing all dimensions in x."""
# Fast path: avoid creating Rank and Range ops if ndims is known.
if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:
return constant_op.constant(
np.arange(x.get_shape().ndims), dtype=dtypes.int32)
if (isinstance(x, sparse_tensor.SparseTensor) and
x.dense_shape.get_shape().is_fully_defined()):
r = x.dense_shape.get_shape()[0].value # sparse.dense_shape is 1-D.
return constant_op.constant(np.arange(r), dtype=dtypes.int32)
# Otherwise, we rely on `range` and `rank` to do the right thing at runtime.
return gen_math_ops._range(0, rank(x), 1)
@tf_export("sequence_mask")
def sequence_mask(lengths, maxlen=None, dtype=dtypes.bool, name=None):
"""Returns a mask tensor representing the first N positions of each cell.
If `lengths` has shape `[d_1, d_2, ..., d_n]` the resulting tensor `mask` has
dtype `dtype` and shape `[d_1, d_2, ..., d_n, maxlen]`, with
```
mask[i_1, i_2, ..., i_n, j] = (j < lengths[i_1, i_2, ..., i_n])
```
Examples:
```python
tf.sequence_mask([1, 3, 2], 5) # [[True, False, False, False, False],
# [True, True, True, False, False],
# [True, True, False, False, False]]
tf.sequence_mask([[1, 3],[2,0]]) # [[[True, False, False],
# [True, True, True]],
# [[True, True, False],
# [False, False, False]]]
```
Args:
lengths: integer tensor, all its values <= maxlen.
maxlen: scalar integer tensor, size of last dimension of returned tensor.
Default is the maximum value in `lengths`.
dtype: output type of the resulting tensor.
name: name of the op.
Returns:
A mask tensor of shape `lengths.shape + (maxlen,)`, cast to specified dtype.
Raises:
ValueError: if `maxlen` is not a scalar.
"""
with ops.name_scope(name, "SequenceMask", [lengths, maxlen]):
lengths = ops.convert_to_tensor(lengths)
if maxlen is None:
maxlen = gen_math_ops._max(lengths, _all_dimensions(lengths))
else:
maxlen = ops.convert_to_tensor(maxlen)
if maxlen.get_shape().ndims is not None and maxlen.get_shape().ndims != 0:
raise ValueError("maxlen must be scalar for sequence_mask")
# The basic idea is to compare a range row vector of size maxlen:
# [0, 1, 2, 3, 4]
# to length as a matrix with 1 column: [[1], [3], [2]].
# Because of broadcasting on both arguments this comparison results
# in a matrix of size (len(lengths), maxlen)
row_vector = gen_math_ops._range(
constant(0, maxlen.dtype), maxlen, constant(1, maxlen.dtype))
# Since maxlen >= max(lengths), it is safe to use maxlen as a cast
# authoritative type. Whenever maxlen fits into tf.int32, so do the lengths.
matrix = gen_math_ops.cast(expand_dims(lengths, -1), maxlen.dtype)
result = row_vector < matrix
if dtype is None or result.dtype.base_dtype == dtype.base_dtype:
return result
else:
return gen_math_ops.cast(result, dtype)
@tf_export("squeeze")
def squeeze(input, axis=None, name=None, squeeze_dims=None):
# pylint: disable=redefined-builtin
"""Removes dimensions of size 1 from the shape of a tensor.
Given a tensor `input`, this operation returns a tensor of the same type with
all dimensions of size 1 removed. If you don't want to remove all size 1
dimensions, you can remove specific size 1 dimensions by specifying
`axis`.
For example:
```python
# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
tf.shape(tf.squeeze(t)) # [2, 3]
```
Or, to remove specific size 1 dimensions:
```python
# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
tf.shape(tf.squeeze(t, [2, 4])) # [1, 2, 3, 1]
```
Args:
input: A `Tensor`. The `input` to squeeze.
axis: An optional list of `ints`. Defaults to `[]`.
If specified, only squeezes the dimensions listed. The dimension
index starts at 0. It is an error to squeeze a dimension that is not 1.
Must be in the range `[-rank(input), rank(input))`.
name: A name for the operation (optional).
squeeze_dims: Deprecated keyword argument that is now axis.
Returns:
A `Tensor`. Has the same type as `input`.
Contains the same data as `input`, but has one or more dimensions of
size 1 removed.
Raises:
ValueError: When both `squeeze_dims` and `axis` are specified.
"""
if squeeze_dims is not None:
if axis is not None:
raise ValueError("Cannot specify both 'squeeze_dims' and 'axis'")
axis = squeeze_dims
if np.isscalar(axis):
axis = [axis]
return gen_array_ops._squeeze(input, axis, name)
@tf_export("where")
def where(condition, x=None, y=None, name=None):
"""Return the elements, either from `x` or `y`, depending on the `condition`.
If both `x` and `y` are None, then this operation returns the coordinates of
true elements of `condition`. The coordinates are returned in a 2-D tensor
where the first dimension (rows) represents the number of true elements, and
the second dimension (columns) represents the coordinates of the true
elements. Keep in mind, the shape of the output tensor can vary depending on
how many true values there are in input. Indices are output in row-major
order.
If both non-None, `x` and `y` must have the same shape.
The `condition` tensor must be a scalar if `x` and `y` are scalar.
If `x` and `y` are vectors of higher rank, then `condition` must be either a
vector with size matching the first dimension of `x`, or must have the same
shape as `x`.
The `condition` tensor acts as a mask that chooses, based on the value at each
element, whether the corresponding element / row in the output should be taken
from `x` (if true) or `y` (if false).
If `condition` is a vector and `x` and `y` are higher rank matrices, then it
chooses which row (outer dimension) to copy from `x` and `y`. If `condition`
has the same shape as `x` and `y`, then it chooses which element to copy from
`x` and `y`.
Args:
condition: A `Tensor` of type `bool`
x: A Tensor which may have the same shape as `condition`. If `condition` is
rank 1, `x` may have higher rank, but its first dimension must match the
size of `condition`.
y: A `tensor` with the same shape and type as `x`.
name: A name of the operation (optional)
Returns:
A `Tensor` with the same type and shape as `x`, `y` if they are non-None.
A `Tensor` with shape `(num_true, dim_size(condition))`.
Raises:
ValueError: When exactly one of `x` or `y` is non-None.
"""
if x is None and y is None:
with ops.name_scope(name, "Where", [condition]) as name:
condition = ops.convert_to_tensor(
condition, preferred_dtype=dtypes.bool, name="condition")
return gen_array_ops.where(condition=condition, name=name)
elif x is not None and y is not None:
return gen_math_ops._select(condition=condition, x=x, y=y, name=name)
else:
raise ValueError("x and y must both be non-None or both be None.")
@tf_export("reverse")
def reverse(tensor, axis, name=None):
return gen_array_ops.reverse_v2(tensor, axis, name)
reverse.__doc__ = gen_array_ops.reverse_v2.__doc__
# pylint: disable=redefined-builtin
@tf_export("reverse_sequence")
def reverse_sequence(input,
seq_lengths,
seq_axis=None,
batch_axis=None,
name=None,
seq_dim=None,
batch_dim=None):
seq_axis = deprecation.deprecated_argument_lookup("seq_axis", seq_axis,
"seq_dim", seq_dim)
batch_axis = deprecation.deprecated_argument_lookup("batch_axis", batch_axis,
"batch_dim", batch_dim)
return gen_array_ops.reverse_sequence(
input=input,
seq_lengths=seq_lengths,
seq_dim=seq_axis,
batch_dim=batch_axis,
name=name)
# pylint: enable=redefined-builtin
reverse_sequence.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
gen_array_ops.reverse_sequence.__doc__, "batch_dim", "batch_axis"),
"seq_dim", "seq_axis")
@tf_export("gather")
def gather(params, indices, validate_indices=None, name=None, axis=0):
# TODO(rjryan): Remove "Gather" creation in favor of GatherV2 once the forward
# compatibility 3 week period has passed.
if axis == 0:
return gen_array_ops.gather(
params, indices, validate_indices=validate_indices, name=name)
else:
return gen_array_ops.gather_v2(params, indices, axis, name=name)
gather.__doc__ = gen_array_ops.gather_v2.__doc__
# Define quantize_v2 here in order to make name the second-to-last attribute,
# because round_mode was added later.
@tf_export("quantize_v2")
@deprecation.deprecated(
"2017-10-25",
"`tf.quantize_v2` is deprecated, please use `tf.quantize` instead.")
def quantize_v2(input, # pylint: disable=redefined-builtin
min_range,
max_range,
T,
mode="MIN_COMBINED",
name=None,
round_mode="HALF_AWAY_FROM_ZERO"):
return gen_array_ops.quantize_v2(input,
min_range,
max_range,
T=T,
mode=mode,
name=name,
round_mode=round_mode)
quantize_v2.__doc__ = """Please use `tf.quantize` instead."""
# We want to expose tf.quantize instead of tf.quantize_v2; we can deprecate
# tf.quantize_v2 in next version of TensorFlow.
@tf_export("quantize")
def quantize(input, # pylint: disable=redefined-builtin
min_range,
max_range,
T,
mode="MIN_COMBINED",
round_mode="HALF_AWAY_FROM_ZERO",
name=None):
return gen_array_ops.quantize_v2(
input,
min_range,
max_range,
T,
mode=mode,
round_mode=round_mode,
name=name)
quantize.__doc__ = gen_array_ops.quantize_v2.__doc__
| 34.230547
| 106
| 0.649806
|
fc224e12f18d586eb073570280eafbdbf71eb48f
| 12,938
|
py
|
Python
|
likelihood/tools.py
|
jzsmoreno/MLearning
|
45ff911f56b4dd76eb43bc7934df66dde0377cd4
|
[
"MIT"
] | 1
|
2020-10-10T01:50:35.000Z
|
2020-10-10T01:50:35.000Z
|
likelihood/tools.py
|
jzsmoreno/MLearning
|
45ff911f56b4dd76eb43bc7934df66dde0377cd4
|
[
"MIT"
] | null | null | null |
likelihood/tools.py
|
jzsmoreno/MLearning
|
45ff911f56b4dd76eb43bc7934df66dde0377cd4
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
"""
Data Science from Scratch, Second Edition, by Joel Grus (O'Reilly).Copyright 2019 Joel Grus, 978-1-492-04113-9
"""
def minibatches(dataset, batch_size, shuffle = True):
"""Generates 'batch_size'-sized minibatches from the dataset
Parameters
----------
dataset : List
batch_size : int
shuffle : bool
"""
# start indexes 0, batch_size, 2 * batch_size, ...
batch_starts = [start for start in range(0, len(dataset), batch_size)]
if shuffle: np.random.shuffle(batch_starts) # shuffle the batches
for start in batch_starts:
end = start + batch_size
yield dataset[start:end]
def difference_quotient(f, x, h):
"""Calculates the difference quotient of 'f' evaluated at x and x + h
Parameters
----------
f(x) : Callable function
x : float
h : float
Returns
-------
'(f(x + h) - f(x)) / h'
"""
return (f(x + h) - f(x)) / h
def partial_difference_quotient(f, v, i, h):
"""Calculates the partial difference quotient of 'f'
Parameters
----------
f(x0,...,xi-th) : Callable function
v : Vector or np.array
h : float
Returns
-------
the i-th partial difference quotient of f at v
"""
w = [v_j + (h if j == i else 0) # add h to just the ith element of v
for j, v_j in enumerate(v)]
return (f(w) - f(v)) / h
def estimate_gradient(f, v, h = 0.0001):
"""Calculates the gradient of 'f' at v
Parameters
----------
f(x0,...,xi-th) : Callable function
v : Vector or np.array
h : float. By default it is set to 0.0001
"""
return [partial_difference_quotient(f, v, i, h)
for i in range(len(v))]
#-------------------------------------------------------------------------
def calculate_probability(x, points = 1, cond = True):
"""Calculates the probability of the data
Parameters
----------
x : np.array
An array containing the data.
points : int
An integer value. By default it is set to `1`.
cond : bool
A boolean value. By default it is set to `True`.
Returns
-------
p : np.array
An array containing the probability of the data.
"""
p = []
f = cdf(x)[0]
for i in range(len(x)):
p.append(f(x[i]))
p = np.array(p)
if cond:
if(np.prod(p[-points]) > 1):
print('\nThe probability of the data cannot be calculated.\n')
else:
if(np.prod(p[-points]) < 0):
print('\nThe probability of the data cannot be calculated.\n')
else:
print('The model has a probability of {:.2f}% of being correct'.format(np.prod(p[-points])*100))
else:
if(np.sum(p[-points]) < 0):
print('\nThe probability of the data cannot be calculated.\n')
else:
if(np.sum(p[-points]) > 1):
print('\nThe probability of the data cannot be calculated.\n')
else:
print('The model has a probability of {:.2f}% of being correct'.format(np.sum(p[-points])*100))
return p
def cdf(x, poly = 9, inv = False, plot = False):
"""Calculates the cumulative distribution function of the data
Parameters
----------
x : np.array
An array containing the data.
poly : int
An integer value. By default it is set to `9`.
inv : bool
A boolean value. By default it is set to `False`.
Returns
-------
cdf_ : np.array
An array containing the cumulative distribution function.
"""
cdf_ = np.cumsum(x) / np.sum(x)
ox = np.sort(x)
I = np.ones(len(ox))
M = np.triu(I)
df = np.dot(ox, M)
df_ = df/np.max(df)
if inv:
fit = np.polyfit(df_, ox, poly)
f = np.poly1d(fit)
else:
fit = np.polyfit(ox, df_, poly)
f = np.poly1d(fit)
if plot:
if inv:
plt.plot(df_, ox, 'o', label = 'inv cdf')
plt.plot(df_, f(df_), 'r--', label = 'fit')
plt.title('Quantile Function')
plt.xlabel("Probability")
plt.ylabel("Value")
plt.legend()
plt.show()
else:
plt.plot(ox, cdf_, 'o', label = 'cdf')
plt.plot(ox, f(ox), 'r--', label = 'fit')
plt.title('Cumulative Distribution Function')
plt.xlabel("Value")
plt.ylabel("Probability")
plt.legend()
plt.show()
return f, cdf_, ox
class corr():
"""Calculates the autocorrelation of the data
Parameters
----------
x : np.array
An array containing the data.
y : np.array
An array containing the data.
Returns
-------
z : np.array
An array containing the correlation of x and y.
"""
def __init__(self, x, y):
self.x = x
self.y = y
self.result = np.correlate(x, y, mode='full')
self.z = self.result[self.result.size//2:]
self.z = self.z/float(np.abs(self.z).max())
def plot(self):
plt.plot(range(len(self.z)), self.z, label = 'Correlation')
plt.legend()
plt.show()
def __call__(self):
return self.z
class autocorr():
"""Calculates the autocorrelation of the data
Parameters
----------
x : np.array
An array containing the data.
Returns
-------
z : np.array
An array containing the autocorrelation of the data.
"""
def __init__(self, x):
self.x = x
self.result = np.correlate(x, x, mode='full')
self.z = self.result[self.result.size//2:]
self.z = self.z/float(np.abs(self.z).max())
def plot(self):
plt.plot(range(len(self.z)), self.z, label = 'Autocorrelation')
plt.legend()
plt.show()
def __call__(self):
return self.z
def fft_denoise(dataset, sigma = 0, mode = True):
"""Performs the noise removal using the Fast Fourier Transform
Parameters
----------
dataset : np.array
An array containing the noised data.
sigma : float
A float between 0 and 1. By default it is set to `0`.
mode : bool
A boolean value. By default it is set to `True`.
Returns
-------
dataset : np.array
An array containing the denoised data.
"""
dataset_ = dataset.copy()
for i in range(dataset.shape[0]):
n = dataset.shape[1]
fhat = np.fft.fft(dataset[i, :], n)
freq = (1/n) * np.arange(n)
L = np.arange(1,np.floor(n/2),dtype='int')
PSD = fhat * np.conj(fhat) / n
indices = PSD > np.mean(PSD) + sigma * np.std(PSD)
PSDclean = PSD * indices # Zero out all others
fhat = indices * fhat
ffilt = np.fft.ifft(fhat) # Inverse FFT for filtered time signal
dataset_[i, :] = ffilt.real
# Calculate the period of the signal
period = 1 / freq[L][np.argmax(fhat[L])]
if mode:
print(f'The {i+1}-th row of the dataset has been denoised.')
print(f'The period is {round(period, 4)}')
return dataset_
def feature_importance(dataset, values):
"""Calculates the importance of each feature
Parameters
----------
dataset : np.array
An array containing the scaled data.
values : np.ndarray
A set of values returned by the linear function.
Returns
-------
importance : np.array
An array containing the importance of each feature.
"""
importance = []
print('\nFeature importance:')
U, S, VT = np.linalg.svd(dataset, full_matrices=False)
w = (VT.T@np.linalg.inv(np.diag(S))@U.T).T@values
for i in range(dataset.shape[0]):
a = np.around(w[i], decimals=4)
importance.append(a)
print(f'The importance of the {i+1} feature is {a}')
return np.array(importance)
def cal_average(y, alpha = 1):
"""Calculates the moving average of the data
Parameters
----------
y : np.array
An array containing the data.
alpha : float
A float between 0 and 1. By default it is set to `1`.
Returns
-------
average : float
The average of the data.
"""
n = int(alpha * len(y))
w = np.ones(n) / n
average = np.convolve(y, w, mode='same') / np.convolve(np.ones_like(y), w, mode='same')
return average
def rescale(dataset, n=1):
"""Perform a standard rescaling of the data
Parameters
----------
dataset : np.array
An array containing the model data.
n : int
Is the degree of the polynomial to subtract
the slope. By default it is set to `1`.
Returns
-------
data_scaled : np.array
An array containing the scaled data.
mu : np.array
An array containing the min of the
original data.
sigma : np.array
An array containing the (max - min)
of the original data.
"""
mu = []
sigma = []
fitting = []
dataset_ = dataset.copy()
try:
xaxis = range(dataset.shape[1])
except:
error_type = 'IndexError'
msg = 'Trying to access an item at an invalid index.'
print(f'{error_type}: {msg}')
return None
for i in range(dataset.shape[0]):
if n != None:
fit = np.polyfit(xaxis, dataset[i, :], n)
f = np.poly1d(fit)
poly = f(xaxis)
fitting.append(f)
else:
fitting.append(0.0)
dataset_[i, :] += -poly
mu.append(np.min(dataset_[i, :]))
if np.max(dataset_[i, :]) != 0:
sigma.append(np.max(dataset_[i, :])-mu[i])
else:
sigma.append(1)
dataset_[i, :] = 2*((dataset_[i, :] - mu[i]) / sigma[i])-1
values = [mu, sigma, fitting]
return dataset_, values
def scale(dataset, values):
"""Performs the inverse operation to the rescale function
Parameters
----------
dataset : np.array
An array containing the scaled data.
values : np.ndarray
A set of values returned by the rescale function.
"""
dataset_ = dataset.copy()
for i in range(dataset.shape[0]):
dataset_[i, :] += 1
dataset_[i, :] /= 2
dataset_[i, :] = dataset_[i, :]*values[1][i]
dataset_[i, :] += values[0][i]
dataset_[i, :] += values[2][i](range(dataset_.shape[1]))
return dataset_
def generate_series(n, n_steps, incline = True):
"""Function that generates $n$ series of length $n_steps$
"""
freq1, freq2, offsets1, offsets2 = np.random.rand(4, n, 1)
if incline :
slope = np.random.rand(n, 1)
else:
slope = 0.0
offsets2 = 1
time = np.linspace(0, 1, n_steps)
series = 0.5 * np.sin((time - offsets1) * (freq1 * 10 + 10)) # wave 1
series += 0.2 * np.sin((time - offsets2) * (freq2 * 20 + 20)) # + wave 2
series += 0.7 * (np.random.rand(n, n_steps) - 0.5) # + noise
series += 5 * slope * time + 2 * (offsets2-offsets1) * time ** (1-offsets2)
series = series
return series.astype(np.float32)
def RMSE(y_true, y_pred):
"""Calculates the Root Mean Squared Error
Parameters
----------
y_true : np.array
An array containing the true values.
y_pred : np.array
An array containing the predicted values.
Returns
-------
RMSE : float
The Root Mean Squared Error.
"""
print(f'The RMSE is {np.sqrt(np.mean((y_true - y_pred)**2))}')
return np.sqrt(np.mean((y_true - y_pred)**2))
#-------------------------------------------------------------------------
if __name__ == '__main__':
# Generate data
x = np.random.rand(3, 100)
y = 0.1*x[0, :] + 0.4*x[1, :] + 0.5*x[2, :]
importance = feature_importance(x, y)
a = generate_series(1, 40, incline=False)
# Graph the data for visualization
plt.plot(range(len(a[0, :])), a[0, :], label = 'Original Data')
plt.legend()
plt.xlabel('Time periods')
plt.ylabel('$y(t)$')
plt.show()
a_denoise = fft_denoise(a)
plt.plot(range(len(a_denoise[0, :])), a_denoise[0, :], label = 'Denoise Data')
plt.legend()
plt.xlabel('Time periods')
plt.ylabel('$y(t)$')
plt.show()
# Calculate the autocorrelation of the data
z = autocorr(a[0, :])
z.plot()
#print(z())
N = 1000
mu = np.random.uniform(0, 10.0)
sigma = np.random.uniform(0.1, 1.0)
x = np.random.normal(mu, sigma, N)
f, cdf_, ox = cdf(x, plot = True)
invf, cdf_, ox = cdf(x, plot = True, inv = True)
| 26.842324
| 112
| 0.535941
|
e6ae75e3f1c892ee2f2ca00e2dfe19a9ff89d5b2
| 9,931
|
py
|
Python
|
train.py
|
HawkSun562/lw-pose_modify
|
f8906704aa2ef2be1df2004b90cd4ec52718f589
|
[
"Apache-2.0"
] | 1
|
2021-08-24T07:00:47.000Z
|
2021-08-24T07:00:47.000Z
|
train.py
|
HawkSun562/lw-pose_modify
|
f8906704aa2ef2be1df2004b90cd4ec52718f589
|
[
"Apache-2.0"
] | null | null | null |
train.py
|
HawkSun562/lw-pose_modify
|
f8906704aa2ef2be1df2004b90cd4ec52718f589
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import cv2
import os
import torch
from torch.nn import DataParallel
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import transforms
from datasets.coco import CocoTrainDataset
from datasets.transformations import ConvertKeypoints, Scale, Rotate, CropPad, Flip
from modules.get_parameters import get_parameters_conv, get_parameters_bn, get_parameters_conv_depthwise
from models.with_mobilenet import PoseEstimationWithMobileNet
from modules.loss import l2_loss
from modules.load_state import load_state, load_from_mobilenet
from val import evaluate
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False) # To prevent freeze of DataLoader
def train(prepared_train_labels, train_images_folder, num_refinement_stages, base_lr, batch_size, batches_per_iter,
num_workers, checkpoint_path, weights_only, from_mobilenet, checkpoints_folder, log_after,
val_labels, val_images_folder, val_output_name, checkpoint_after, val_after):
gpu_available = torch.cuda.is_available()
if gpu_available:
print('[INFO] Training on GPU')
else:
print('[INFO] Training on CPU')
net = PoseEstimationWithMobileNet(num_refinement_stages)
stride = 8
sigma = 7
path_thickness = 1
dataset = CocoTrainDataset(prepared_train_labels, train_images_folder,
stride, sigma, path_thickness,
transform=transforms.Compose([
ConvertKeypoints(),
Scale(),
Rotate(pad=(128, 128, 128)),
CropPad(pad=(128, 128, 128)),
Flip()]))
train_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
optimizer = optim.Adam([
{'params': get_parameters_conv(net.model, 'weight')},
{'params': get_parameters_conv_depthwise(net.model, 'weight'), 'weight_decay': 0},
{'params': get_parameters_bn(net.model, 'weight'), 'weight_decay': 0},
{'params': get_parameters_bn(net.model, 'bias'), 'lr': base_lr * 2, 'weight_decay': 0},
{'params': get_parameters_conv(net.cpm, 'weight'), 'lr': base_lr},
{'params': get_parameters_conv(net.cpm, 'bias'), 'lr': base_lr * 2, 'weight_decay': 0},
{'params': get_parameters_conv_depthwise(net.cpm, 'weight'), 'weight_decay': 0},
{'params': get_parameters_conv(net.initial_stage, 'weight'), 'lr': base_lr},
{'params': get_parameters_conv(net.initial_stage, 'bias'), 'lr': base_lr * 2, 'weight_decay': 0},
{'params': get_parameters_conv(net.refinement_stages, 'weight'), 'lr': base_lr * 4},
{'params': get_parameters_conv(net.refinement_stages, 'bias'), 'lr': base_lr * 8, 'weight_decay': 0},
{'params': get_parameters_bn(net.refinement_stages, 'weight'), 'weight_decay': 0},
{'params': get_parameters_bn(net.refinement_stages, 'bias'), 'lr': base_lr * 2, 'weight_decay': 0},
], lr=base_lr, weight_decay=5e-4)
num_iter = 0
current_epoch = 0
drop_after_epoch = [100, 200, 260]
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=drop_after_epoch, gamma=0.333)
if checkpoint_path:
checkpoint = torch.load(checkpoint_path, map_location=lambda storage, loc: storage.cuda()) if gpu_available else torch.load(checkpoint_path, map_location=lambda storage, loc: storage)
if from_mobilenet:
load_from_mobilenet(net, checkpoint)
else:
load_state(net, checkpoint)
if not weights_only:
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
num_iter = checkpoint['iter']
current_epoch = checkpoint['current_epoch']
net = DataParallel(net).cuda() if gpu_available else DataParallel(net).cpu()
net.train()
for epochId in range(current_epoch, 280):
total_losses = [0, 0] * (num_refinement_stages + 1) # heatmaps loss, paf loss per stage
batch_per_iter_idx = 0
for batch_data in train_loader:
if batch_per_iter_idx == 0:
optimizer.zero_grad()
images = batch_data['image'].cuda() if gpu_available else batch_data['image']
keypoint_masks = batch_data['keypoint_mask'].cuda() if gpu_available else batch_data['keypoint_mask']
paf_masks = batch_data['paf_mask'].cuda() if gpu_available else batch_data['paf_mask']
keypoint_maps = batch_data['keypoint_maps'].cuda() if gpu_available else batch_data['keypoint_maps']
paf_maps = batch_data['paf_maps'].cuda() if gpu_available else batch_data['paf_maps']
stages_output = net(images)
losses = []
for loss_idx in range(len(total_losses) // 2):
losses.append(l2_loss(stages_output[loss_idx * 2], keypoint_maps, keypoint_masks, images.shape[0]))
losses.append(l2_loss(stages_output[loss_idx * 2 + 1], paf_maps, paf_masks, images.shape[0]))
total_losses[loss_idx * 2] += losses[-2].item() / batches_per_iter
total_losses[loss_idx * 2 + 1] += losses[-1].item() / batches_per_iter
loss = losses[0]
for loss_idx in range(1, len(losses)):
loss += losses[loss_idx]
loss /= batches_per_iter
loss.backward()
batch_per_iter_idx += 1
if batch_per_iter_idx == batches_per_iter:
optimizer.step()
batch_per_iter_idx = 0
num_iter += 1
else:
continue
if num_iter % log_after == 0:
print('Iter: {}'.format(num_iter))
for loss_idx in range(len(total_losses) // 2):
print('\n'.join(['stage{}_pafs_loss: {}', 'stage{}_heatmaps_loss: {}']).format(
loss_idx + 1, total_losses[loss_idx * 2 + 1] / log_after,
loss_idx + 1, total_losses[loss_idx * 2] / log_after))
for loss_idx in range(len(total_losses)):
total_losses[loss_idx] = 0
if num_iter % checkpoint_after == 0:
snapshot_name = '{}/checkpoint_iter_{}.pth'.format(checkpoints_folder, num_iter)
torch.save({'state_dict': net.module.state_dict(),
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict(),
'iter': num_iter,
'current_epoch': epochId},
snapshot_name)
if num_iter % val_after == 0:
print('Validation...')
evaluate(val_labels, val_output_name, val_images_folder, net)
net.train()
scheduler.step()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--prepared-train-labels', type=str, required=True,
help='path to the file with prepared annotations')
parser.add_argument('--train-images-folder', type=str, required=True, help='path to COCO train images folder')
parser.add_argument('--num-refinement-stages', type=int, default=1, help='number of refinement stages')
parser.add_argument('--base-lr', type=float, default=4e-5, help='initial learning rate')
parser.add_argument('--batch-size', type=int, default=80, help='batch size')
parser.add_argument('--batches-per-iter', type=int, default=1, help='number of batches to accumulate gradient from')
parser.add_argument('--num-workers', type=int, default=8, help='number of workers')
parser.add_argument('--checkpoint-path', type=str, required=True, help='path to the checkpoint to continue training from')
parser.add_argument('--from-mobilenet', action='store_true',
help='load weights from mobilenet feature extractor')
parser.add_argument('--weights-only', action='store_true',
help='just initialize layers with pre-trained weights and start training from the beginning')
parser.add_argument('--experiment-name', type=str, default='default',
help='experiment name to create folder for checkpoints')
parser.add_argument('--log-after', type=int, default=100, help='number of iterations to print train loss')
parser.add_argument('--val-labels', type=str, required=True, help='path to json with keypoints val labels')
parser.add_argument('--val-images-folder', type=str, required=True, help='path to COCO val images folder')
parser.add_argument('--val-output-name', type=str, default='detections.json',
help='name of output json file with detected keypoints')
parser.add_argument('--checkpoint-after', type=int, default=5000,
help='number of iterations to save checkpoint')
parser.add_argument('--val-after', type=int, default=5000,
help='number of iterations to run validation')
parser.add_argument('--gpu', type=str, default='0',
help='which gpu index you want to use')
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
checkpoints_folder = '{}_checkpoints'.format(args.experiment_name)
if not os.path.exists(checkpoints_folder):
os.makedirs(checkpoints_folder)
train(args.prepared_train_labels, args.train_images_folder, args.num_refinement_stages, args.base_lr, args.batch_size,
args.batches_per_iter, args.num_workers, args.checkpoint_path, args.weights_only, args.from_mobilenet,
checkpoints_folder, args.log_after, args.val_labels, args.val_images_folder, args.val_output_name,
args.checkpoint_after, args.val_after)
| 55.792135
| 191
| 0.642433
|
913ba5a6e3386099e14326b7aa8f12531ef8ff77
| 21,676
|
py
|
Python
|
python/tvm/script/scope_handler.py
|
h77h7/tvm-04.26
|
1bd8e6b921f392ae29b7672159326d94d40d6922
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 3
|
2021-04-30T04:09:44.000Z
|
2021-07-07T09:49:51.000Z
|
python/tvm/script/scope_handler.py
|
h77h7/tvm-04.26
|
1bd8e6b921f392ae29b7672159326d94d40d6922
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 1
|
2019-10-22T21:09:49.000Z
|
2019-10-22T21:09:49.000Z
|
python/tvm/script/scope_handler.py
|
h77h7/tvm-04.26
|
1bd8e6b921f392ae29b7672159326d94d40d6922
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 2
|
2019-10-22T08:48:21.000Z
|
2022-02-19T02:17:50.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TVM Script Parser Scope Handler Classes"""
# pylint: disable=redefined-builtin, unused-argument, invalid-name, relative-beyond-top-level
from typing import Tuple, Any, Callable, Optional, List, Union, Mapping
import synr
from synr import ast
import tvm.tir
from tvm.runtime import Object
from tvm.ir import Span, Range
from tvm.tir import Stmt, PrimExpr, IterVar, Var, Buffer, BufferRegion, ForKind
from .context_maintainer import ContextMaintainer
from .utils import (
get_param_list,
tvm_span_from_synr,
buffer_slice_to_region,
call_with_error_reporting,
)
from .registry import register
from .node import BufferSlice
class ScopeHandler:
"""Base class for all scope handlers"""
def __init__(self, func: Callable):
self.func: Callable = func
self.body: Optional[Stmt] = None
self.node: Optional[synr.ast.Node] = None
self.context: Optional[ContextMaintainer] = None
def signature(self) -> Tuple[str, Tuple[list, list, Any]]:
return "tir." + self.func.__name__, get_param_list(self.func)
def enter_scope(
self,
node: synr.ast.Node,
context: ContextMaintainer,
arg_list: List[Any],
span: synr.ast.Span,
):
pass
def exit_scope(
self,
node: synr.ast.Node,
context: ContextMaintainer,
arg_list: List[Any],
span: synr.ast.Span,
):
self.node = node
self.context = context
return call_with_error_reporting(
context.report_error, span, self.func, *arg_list, span=tvm_span_from_synr(span)
)
class WithScopeHandler(ScopeHandler):
"""Base class for all with scope handlers"""
def __init__(self, func, concise_scope, def_symbol):
super().__init__(func)
self.concise_scope = concise_scope
self.def_symbol = def_symbol
@staticmethod
def get_optional_var_names(node, context):
"""Get list of names from ast.With's optional_vars"""
assert isinstance(
node, ast.With
), f"WithScopeHandler expected ast.With but got {type(node)}"
if isinstance(node.lhs, list):
for var in node.lhs:
if not isinstance(var, ast.Var):
context.report_error(
f"Invalid optional var definition, expected Var but got {type(var)}",
node.span,
)
var_names = [var.id.name for var in node.lhs]
else:
context.report_error(
f"Invalid optional var definition, expected list of Var but got {type(node.lhs)}",
node.span,
)
return var_names
@register
class Allocate(WithScopeHandler):
""" With scope handler tir.allocate(extents, dtype, scope, condition) """
def __init__(self):
def allocate(extents, dtype, scope, condition=True, span=None):
condition = tvm.runtime.convert(condition)
scope = tvm.runtime.convert(scope)
body = tvm.tir.Allocate(
self.buffer_var, dtype, extents, condition, self.body, span=span
)
return tvm.tir.AttrStmt(self.buffer_var, "storage_scope", scope, body, span=span)
super().__init__(allocate, concise_scope=True, def_symbol=True)
self.buffer_var = None
def enter_scope(
self,
node: synr.ast.Node,
context: ContextMaintainer,
arg_list: List[Any],
span: synr.ast.Span,
):
# define buffer vars in symbol table
if isinstance(node, ast.With):
names = WithScopeHandler.get_optional_var_names(node, context)
if len(names) != 1:
context.report_error("Unexpected number of vars", node.span)
name = names[0]
elif isinstance(node, ast.Assign):
name = node.lhs.id.name
else:
raise Exception("Internal Bug")
def setup_buffer_var(extents, dtype, scope, condition=True, span: Span = None):
"""Setup buffer var for a given type."""
buffer_ptr_type = tvm.ir.PointerType(tvm.ir.PrimType(dtype))
self.buffer_var = tvm.tir.Var(name, buffer_ptr_type, span)
setup_buffer_var(*arg_list, span=tvm_span_from_synr(node.lhs.id.span))
context.update_symbol(name, self.buffer_var, node)
@register
class LaunchThread(WithScopeHandler):
""" With scope handler tir.launch_thread(env_var, extent) """
def __init__(self):
def launch_thread(env_var, extent, span):
extent = tvm.runtime.convert(extent, span=span)
return tvm.tir.AttrStmt(
IterVar(
None,
env_var,
getattr(IterVar, "ThreadIndex"),
self.context.func_var_env_dict[env_var],
span=span,
),
"thread_extent",
extent,
self.body,
span=span,
)
super().__init__(launch_thread, concise_scope=True, def_symbol=False)
@register
class Realize(WithScopeHandler):
""" With scope handler tir.realize(buffer_bounds, scope, condition) """
def __init__(self):
def realize(
buffer_slice: BufferSlice, scope: str, condition: bool = True, span: bool = None
):
assert self.context, "call 'exit_scope' before 'enter_scope'"
buffer: Buffer = buffer_slice.buffer
bounds: List[Range] = []
for s in buffer_slice.slices:
min: Union[PrimExpr, int] = s.start
extent: Union[PrimExpr, int] = 1 if s.stop is None else s.stop - s.start
if isinstance(extent, PrimExpr):
extent = self.context.analyzer.simplify(extent)
bounds.append(Range.from_min_extent(min, extent, span=s.span))
scope = tvm.runtime.convert(scope, span=span)
return tvm.tir.AttrStmt(
buffer,
"realize_scope",
scope,
tvm.tir.BufferRealize(buffer, bounds, condition, self.body, span=span),
span=span,
)
super().__init__(realize, concise_scope=True, def_symbol=False)
@register
class Attr(WithScopeHandler):
""" With scope handler tir.attr(attr_node, attr_key, value) """
def __init__(self):
def attr(attr_node, attr_key, value, span):
attr_node = tvm.runtime.convert(attr_node, span=span)
value = tvm.runtime.convert(value, span=span)
return tvm.tir.AttrStmt(attr_node, attr_key, value, self.body, span=span)
super().__init__(attr, concise_scope=True, def_symbol=False)
@register
class AssertHandler(WithScopeHandler):
""" With scope handler tir.Assert(condition, message) """
def __init__(self):
def Assert(condition, message, span):
return tvm.tir.AssertStmt(condition, tvm.runtime.convert(message), self.body, span=span)
super().__init__(Assert, concise_scope=True, def_symbol=False)
@register
class Let(WithScopeHandler):
""" With scope handler tir.let(var, value) """
def __init__(self):
def let(var, value, span):
return tvm.tir.LetStmt(var, value, self.body, span=span)
super().__init__(let, concise_scope=False, def_symbol=False)
@register
class Block(WithScopeHandler):
""" With scope handler tir.block(extents, name) as iter_vars"""
def __init__(self):
def block(axes=None, name_hint: str = "", span: Optional[Span] = None):
assert (
self.node and self.context and self.body
), "call 'exit_scope' before 'enter_scope'"
block_info = self.context.block_info_stack[-1]
if axes is None:
axes = []
if len(axes) != len(self.block_vars):
self.context.report_error(
"Inconsistent number of block vars, "
+ f"there are {len(axes)} axes but {len(self.block_vars)} block vars. "
+ "The number of block vars should match the number of axes.",
self.node.span,
)
block_iters: List[IterVar] = []
for i, axis in enumerate(axes):
axis = tvm.runtime.convert(axis)
if isinstance(axis, tvm.tir.PrimExpr):
block_var_dom = Range.from_min_extent(0, axis)
block_iters.append(IterVar(block_var_dom, self.block_vars[i], 0))
elif isinstance(axis, Range):
block_iters.append(IterVar(axis, self.block_vars[i], 0))
elif isinstance(axis, IterVar):
block_iters.append(IterVar(axis.dom, self.block_vars[i], axis.iter_type))
else:
self.context.report_error(
"Invalid argument of tir.block(), "
+ f"expected PrimExpr, Range or IterVar, but got {type(axis)}",
self.node.span,
)
# create block read/write regions
reads: List[BufferRegion] = (
[buffer_slice_to_region(read) for read in block_info.reads]
if block_info.reads
else []
)
writes: List[BufferRegion] = (
[buffer_slice_to_region(write) for write in block_info.writes]
if block_info.writes
else []
)
region_detect_mask: int = (block_info.reads is None) | (
(block_info.writes is None) << 1
)
annotations = {} if block_info.annotations is None else block_info.annotations
if region_detect_mask != 0:
annotations["tir.script_parsing_detect_access"] = region_detect_mask
inner = tvm.tir.Block(
block_iters,
reads,
writes,
name_hint,
self.body,
block_info.init,
block_info.alloc_buffers,
block_info.match_buffers,
annotations,
span,
)
# create block var iter binding
values: List[PrimExpr]
if not block_info.iter_bindings:
values = self.context.loop_stack[-2].copy()
if len(block_iters) == 0:
# It is an opaque block without any bindings
values = []
elif len(values) == 0:
values = [tvm.tir.const(float("nan"), dtype="float32")] * len(block_iters)
elif len(values) != len(block_iters):
self.context.report_error(
"Number of block iter var and outer loop nesting mismatch, "
+ f"{len(block_iters)} block iter vars but {len(values)} loops",
self.node.span,
)
else:
for block_var in self.block_vars:
if block_var not in block_info.iter_bindings:
self.context.report_error(
"Missing block iter var binding for " + block_var.name,
self.node.span,
)
values = [block_info.iter_bindings[block_var] for block_var in self.block_vars]
predicate = (
tvm.tir.const(True, "bool")
if block_info.predicate is None
else block_info.predicate
)
body = tvm.tir.BlockRealize(values, predicate, inner, span)
return body
super().__init__(func=block, concise_scope=False, def_symbol=True)
self.block_vars = None
def enter_scope(
self,
node: synr.ast.Node,
context: ContextMaintainer,
arg_list: List[Any],
span: synr.ast.Span,
):
# define block vars
assert isinstance(
node, ast.With
), f"BlockScopeHandler expected to work on ast.With but got {type(node)}"
var_names = WithScopeHandler.get_optional_var_names(node, context)
self.block_vars = [tvm.te.var(name) for name in var_names]
for block_var in self.block_vars:
context.update_symbol(block_var.name, block_var, node)
@register
class InitBlock(WithScopeHandler):
""" With scope handler tir.init()"""
def __init__(self):
def init(span: Span = None):
assert self.context, "call 'exit_scope' before 'enter_scope'"
if self.context.block_info_stack[-2].init is not None:
self.context.report_error("Duplicate init block declaration", span)
self.context.block_info_stack[-2].init = self.body
super().__init__(func=init, concise_scope=False, def_symbol=True)
class ForScopeHandler(ScopeHandler):
"""Base class for all for scope handlers"""
def __init__(self, func):
super().__init__(func)
self.loop_vars: Optional[List[Var]] = None
def enter_scope(
self,
node: synr.ast.Node,
context: ContextMaintainer,
arg_list: List[Any],
span: synr.ast.Span,
):
assert isinstance(node, ast.For), f"ForScopeHandler expected ast.For but got {type(node)}"
loop_var_names = list()
spans = list()
if isinstance(node.lhs, ast.Var):
loop_var_names.append(node.lhs.id.name)
spans.append(tvm_span_from_synr(node.lhs.id.span))
elif isinstance(node.lhs, list):
for elt in node.lhs:
if not isinstance(elt, ast.Var):
context.report_error(
f"Invalid loop var. Expected a var, but got {type(elt)}", elt.span
)
loop_var_names.append(elt.id.name)
spans.append(tvm_span_from_synr(elt.id.span))
else:
context.report_error(
f"Invalid loop var. Expected var or list of vars as lhs, but got {type(node.lhs)}",
span,
)
self.loop_vars = [
tvm.te.var(name, dtype="int32", span=span) for name, span in zip(loop_var_names, spans)
]
for loop_var in self.loop_vars:
context.update_symbol(loop_var.name, loop_var, node)
context.loop_stack[-1].append(loop_var)
def exit_scope(
self,
node: synr.ast.Node,
context: ContextMaintainer,
arg_list: List[Any],
span: synr.ast.Span,
):
assert self.loop_vars, "call 'exit_scope' before 'enter_scope'"
for _ in self.loop_vars:
context.loop_stack[-1].pop()
return super().exit_scope(node, context, arg_list, span)
def create_loop(
self,
begin: PrimExpr,
end: PrimExpr,
kind: ForKind,
thread_binding: Optional[str] = None,
annotations: Optional[Mapping[str, Object]] = None,
span: Optional[Span] = None,
) -> tvm.tir.For:
"""
Helper function for creating For in TVM Script parser.
Parameters
----------
begin : PrimExpr
The beginning value.
end : PrimExpr
The endding value.
kind : ForKind
The type of the for.
thread_binding: Optional[str]
The thread this loop binds to.
annotations : Optional[Mapping[str, Object]]
Additional annotation hints.
span : Optional[Span]
The location of this for in the source code.
Returns
-------
for : For
The constructed For.
"""
assert (
self.loop_vars and self.context and self.node
), "call 'exit_scope' before 'enter_scope'"
if len(self.loop_vars) != 1:
self.context.report_error(
f"Expected exactly one loop var, but got {self.loop_vars}", self.node.span
)
extent = end if begin == 0 else self.context.analyzer.simplify(end - begin)
annos: Mapping[str, Object] = {}
if annotations is not None:
annos = {
key: tvm.tir.StringImm(val) if isinstance(val, str) else val
for key, val in annotations.items()
}
return tvm.tir.For(
self.loop_vars[0],
begin,
extent,
kind,
self.body,
thread_binding=thread_binding,
annotations=annos,
span=span,
)
@register
class Serial(ForScopeHandler):
""" For scope handler tir.serial(begin, end, annotations)"""
def __init__(self):
def serial(
begin: PrimExpr,
end: PrimExpr,
annotations: Optional[Mapping[str, Object]] = None,
span: Optional[Span] = None,
):
return self.create_loop(begin, end, ForKind.SERIAL, annotations=annotations, span=span)
super().__init__(serial)
@register
class Parallel(ForScopeHandler):
""" For scope handler tir.parallel(begin, end, annotations)"""
def __init__(self):
def parallel(
begin: PrimExpr,
end: PrimExpr,
annotations: Optional[Mapping[str, Object]] = None,
span: Optional[Span] = None,
):
return self.create_loop(
begin, end, ForKind.PARALLEL, annotations=annotations, span=span
)
super().__init__(parallel)
@register
class Vectorized(ForScopeHandler):
""" For scope handler tir.vectorized(begin, end, annotations)"""
def __init__(self):
def vectorized(
begin: PrimExpr,
end: PrimExpr,
annotations: Optional[Mapping[str, Object]] = None,
span: Optional[Span] = None,
):
return self.create_loop(
begin, end, ForKind.VECTORIZED, annotations=annotations, span=span
)
super().__init__(vectorized)
@register
class Unroll(ForScopeHandler):
""" For scope handler tir.unroll(begin, end, annotations)"""
def __init__(self):
def unroll(
begin: PrimExpr,
end: PrimExpr,
annotations: Optional[Mapping[str, Object]] = None,
span: Optional[Span] = None,
):
return self.create_loop(
begin, end, ForKind.UNROLLED, annotations=annotations, span=span
)
super().__init__(unroll)
@register
class ThreadBinding(ForScopeHandler):
""" For scope handler tir.thread_binding(begin, end, thread, annotations)"""
def __init__(self):
def thread_binding(
begin: PrimExpr,
end: PrimExpr,
thread: str,
annotations: Optional[Mapping[str, Object]] = None,
span: Optional[Span] = None,
):
thread_iter_var = IterVar(None, None, IterVar.ThreadIndex, thread, span=span)
return self.create_loop(
begin,
end,
ForKind.THREAD_BINDING,
thread_binding=thread_iter_var,
annotations=annotations,
span=span,
)
super().__init__(thread_binding)
@register
class RangeHandler(ForScopeHandler):
"""For scope handler range(begin, end, annotations)
Note that tir.range is totally the same as tir.serial
"""
def __init__(self):
def for_range(
begin: PrimExpr,
end: PrimExpr,
annotations: Optional[Mapping[str, Object]] = None,
span: Optional[Span] = None,
):
return self.create_loop(begin, end, ForKind.SERIAL, annotations=annotations, span=span)
super().__init__(for_range)
def signature(self):
return "range", get_param_list(self.func)
@register
class Grid(ForScopeHandler):
""" For scope handler tir.grid(extents)"""
def __init__(self):
def grid(*extents: List[PrimExpr], span: Span):
assert (
self.node and self.context and self.loop_vars
), "call 'exit_scope' before 'enter_scope'"
if len(self.loop_vars) != len(extents):
self.context.report_error(
"Inconsistent number of loop vars and extents, "
+ f"got {len(self.loop_vars)} vs {len(extents)}",
self.node.span,
)
body = self.body
for loop_var, extent in zip(reversed(self.loop_vars), reversed(extents)):
body = tvm.tir.For(loop_var, 0, extent, ForKind.SERIAL, body, span=span)
return body
super().__init__(grid)
| 34.737179
| 100
| 0.577966
|
9a87e10612d3c46c9c370b2414314511217e8bfc
| 2,377
|
py
|
Python
|
ubicenter/plotly.py
|
MaxGhenis/ubicenter.py
|
6444e456f278216ec17c0bdd818212af7804c67a
|
[
"MIT"
] | null | null | null |
ubicenter/plotly.py
|
MaxGhenis/ubicenter.py
|
6444e456f278216ec17c0bdd818212af7804c67a
|
[
"MIT"
] | null | null | null |
ubicenter/plotly.py
|
MaxGhenis/ubicenter.py
|
6444e456f278216ec17c0bdd818212af7804c67a
|
[
"MIT"
] | null | null | null |
from plotly import graph_objects as go
from typing import Union
CONFIG = {"displayModeBar": False}
LOGO_URL = (
"https://raw.githubusercontent.com/UBICenter/ubicenter.org/master/"
"assets/images/logos/wide-blue.jpg"
)
LIGHTER_BLUE = "#ABCEEB" # Blue 100.
LIGHT_BLUE = "#49A6E2" # Blue 500.
BLUE = "#1976D2" # Blue 700.
DARK_BLUE = "#0F4AA1" # Blue 900.
GRAY = "#BDBDBD"
WHITE = "#FFFFFF"
BLUE_COLOR_SEQUENCE = [LIGHTER_BLUE, LIGHT_BLUE, BLUE, DARK_BLUE]
def add_ubi_center_logo(
fig: go.Figure, x: float = 0.98, y: float = -0.12
) -> None:
"""Adds UBI Center logo to a plotly figure. Returns nothing.
:param fig: Plotly figure.
:type fig: go.Figure
:param x: Horizontal coordinate.
:type x: float
:param y: Vertical coordinate, defaults to -0.12.
:type y: float, optional
"""
fig.add_layout_image(
dict(
source=LOGO_URL,
xref="paper",
yref="paper",
x=x,
y=y,
sizex=0.12,
sizey=0.12,
xanchor="right",
yanchor="bottom",
)
)
def format_fig(
fig: go.Figure,
show: bool = True,
width: int = 800,
height: int = 600,
**kwargs
) -> Union[None, go.Figure]:
"""Formats figure with UBI styling and logo.
**kwargs passed to add_ubi_center_logo.
:param fig: Plotly figure.
:type fig: go.Figure
:param show: Whether to show the figure, defaults to True.
If False, returns the figure.
:type show: bool
:param width: Width of the figure, defaults to 800.
:type width: int, optional
:param height: Height of the figure, defaults to 600.
:type height: int, optional
:return: If show is True, nothing. If show is False, returns the
formatted plotly figure.
:rtype: go.Figure
"""
add_ubi_center_logo(fig, **kwargs)
fig.update_xaxes(
title_font=dict(size=16, color="black"), tickfont={"size": 14}
)
fig.update_yaxes(
title_font=dict(size=16, color="black"), tickfont={"size": 14}
)
fig.update_layout(
hoverlabel_align="right",
font_family="Roboto",
title_font_size=20,
plot_bgcolor="white",
paper_bgcolor="white",
width=width,
height=height,
)
if show:
fig.show(config=CONFIG)
else:
return fig
| 25.55914
| 71
| 0.604964
|
667db24e93107503ee9bdae285b6ab24da57538d
| 2,825
|
py
|
Python
|
delve/tests/test_torch.py
|
dionhaefner/delve
|
811756520cbfd8dce4427c53203ac193f61a94d1
|
[
"MIT"
] | 69
|
2019-03-04T20:59:59.000Z
|
2022-02-02T17:44:09.000Z
|
delve/tests/test_torch.py
|
dionhaefner/delve
|
811756520cbfd8dce4427c53203ac193f61a94d1
|
[
"MIT"
] | 30
|
2019-02-26T13:34:06.000Z
|
2022-01-24T13:08:25.000Z
|
delve/tests/test_torch.py
|
dionhaefner/delve
|
811756520cbfd8dce4427c53203ac193f61a94d1
|
[
"MIT"
] | 11
|
2019-04-22T21:53:56.000Z
|
2021-12-22T13:20:05.000Z
|
import tempfile
import pytest
import torch
import torch.nn
from delve import CheckLayerSat
from delve.writers import CSVandPlottingWriter
device = "cuda:0" if torch.cuda.is_available() else "cpu"
TEMP_DIR = tempfile.TemporaryDirectory()
TEMP_DIRNAME = TEMP_DIR.name
def test_dense_saturation_runs():
save_path = TEMP_DIRNAME
model = torch.nn.Sequential(torch.nn.Linear(10, 88)).to(device)
writer = CSVandPlottingWriter(save_path,
fontsize=16,
primary_metric='test_accuracy')
_ = CheckLayerSat(save_path, [writer],
model,
stats=['lsat', 'idim'],
device=device)
test_input = torch.randn(5, 10).to(device)
_ = model(test_input)
return True
def test_lstm_saturation_runs():
save_path = TEMP_DIRNAME
# Run 1
timeseries_method = 'timestepwise'
model = torch.nn.Sequential().to(device)
lstm = torch.nn.LSTM(10, 88, 2)
lstm.name = 'lstm2'
model.add_module('lstm', lstm)
writer = CSVandPlottingWriter(save_path,
fontsize=16,
primary_metric='test_accuracy')
saturation = CheckLayerSat(save_path, [writer],
model,
stats=['lsat', 'idim'],
timeseries_method=timeseries_method,
device=device)
input = torch.randn(5, 3, 10).to(device)
output, (hn, cn) = model(input)
saturation.close()
def test_lstm_saturation_embed_runs():
save_path = TEMP_DIRNAME
# Run 2
timeseries_method = 'last_timestep'
model = torch.nn.Sequential().to(device)
lstm = torch.nn.LSTM(10, 88, 2)
model.add_module('lstm', lstm)
writer = CSVandPlottingWriter(save_path, fontsize=16)
saturation = CheckLayerSat(save_path, [writer],
model,
stats=['lsat', 'idim', 'embed'],
timeseries_method=timeseries_method,
device=device)
input = torch.randn(5, 3, 10).to(device)
output, (hn, cn) = model(input)
assert saturation.logs['train-covariance-matrix'][
'lstm'].saved_samples.shape == torch.Size([5, 88])
input = torch.randn(8, 3, 10)
output, (hn, cn) = model(input)
assert saturation.logs['train-covariance-matrix'][
'lstm'].saved_samples.shape == torch.Size([8, 88])
saturation.add_saturations()
saturation.close()
return True
@pytest.fixture(scope="session", autouse=True)
def cleanup(request):
"""Cleanup temp directory once we are finished"""
def remove_tempdir():
TEMP_DIR.cleanup()
request.addfinalizer(remove_tempdir)
| 30.053191
| 67
| 0.588673
|
ef68d0dc6b30640aa0342204c6916ce12fab6b51
| 5,565
|
py
|
Python
|
tools/text_demo.py
|
lz20061213/quadrilateral
|
ee0a38c91bfc979998f63963585b5e00f24a49a0
|
[
"MIT"
] | 2
|
2018-08-12T13:15:55.000Z
|
2021-06-14T18:48:09.000Z
|
tools/text_demo.py
|
lz20061213/quadrilateral
|
ee0a38c91bfc979998f63963585b5e00f24a49a0
|
[
"MIT"
] | null | null | null |
tools/text_demo.py
|
lz20061213/quadrilateral
|
ee0a38c91bfc979998f63963585b5e00f24a49a0
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
from model.test import test_net, im_detect
from model.config import cfg, cfg_from_file, cfg_from_list
from datasets.factory import get_imdb
import argparse
import pprint
import time, os, sys
import os.path as osp
import cv2
import numpy as np
from utils.cython_nms import nms
import tensorflow as tf
from nets.vgg16 import vgg16
from nets.resnet_v1 import resnetv1
NUM_CLASSES = 2
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a Faster R-CNN network')
parser.add_argument('--model', dest='model',
help='model to test',
default='/home/hezheqi/Project/dev/frame_regression/output/res101/icdar_train_17/baseline_0.62/res101_faster_rcnn_iter_100000.ckpt', type=str)
parser.add_argument('--gpu', dest='gpu')
parser.add_argument('--num_dets', dest='max_per_image',
help='max number of detections per image',
default=100, type=int)
parser.add_argument('--tag', dest='tag',
help='tag of the model',
default='', type=str)
parser.add_argument('--net', dest='net',
help='vgg16 or res101',
default='res101', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def draw_polygon(ori_img, pts, is_copy=True):
if is_copy:
img = ori_img.copy()
else:
img = ori_img
if pts == None or len(pts) == 0:
return img
pts = pts.reshape((-1, 1, 2))
# print('pts', pts)
cv2.polylines(img, [pts], True, (255, 0, 0), thickness=8)
return img
def draw_one_page(img, frames, pos_list=None):
for i, frame in enumerate(frames):
if len(frame) != 0:
if pos_list:
frame[::2] += pos_list[i][0]
frame[1::2] += pos_list[i][1]
draw_polygon(img, frame, is_copy=False)
def vis_detections(im, dets, thresh=0.5):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
for i in inds:
frame = dets[i, 4:12]
frame = frame.astype(int)
# score = dets[i, -1]
draw_polygon(im, frame, is_copy=False)
def detect_one(name, thresh=0.75):
im = cv2.imread(osp.join('/home/hezheqi/data/tmp/p2', name))
scores, polys = im_detect(sess, net, im)
print(scores)
boxes = np.zeros((polys.shape[0], 8), dtype=polys.dtype)
boxes[:, 0] = np.min(polys[:, 0:8:2], axis=1)
boxes[:, 1] = np.min(polys[:, 1:8:2], axis=1)
boxes[:, 2] = np.max(polys[:, 0:8:2], axis=1)
boxes[:, 3] = np.max(polys[:, 1:8:2], axis=1)
boxes[:, 4] = np.min(polys[:, 8::2], axis=1)
boxes[:, 5] = np.min(polys[:, 9::2], axis=1)
boxes[:, 6] = np.max(polys[:, 8::2], axis=1)
boxes[:, 7] = np.max(polys[:, 9::2], axis=1)
for j in range(1, NUM_CLASSES):
inds = np.where(scores[:, j] > thresh)[0]
cls_scores = scores[inds, j]
cls_boxes = boxes[inds, j * 4:(j + 1) * 4]
cls_polys = polys[inds, j * 8:(j + 1) * 8]
cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
cls_dets_poly = cls_polys.astype(np.float32, copy=False)
keep = nms(cls_dets, cfg.TEST.NMS)
# cls_dets = cls_dets[keep, :]
cls_dets = cls_boxes[keep, :]
cls_dets_poly = cls_dets_poly[keep, :]
cls_scores = cls_scores[:, np.newaxis]
cls_scores = cls_scores[keep, :]
cls_dets = np.hstack((cls_dets, cls_dets_poly, cls_scores))
print(cls_dets)
vis_detections(im, cls_dets)
cv2.imwrite(osp.join(out_dir, name), im)
fout = open(osp.join(out_dir, 'txt', name[:-4]+'.txt'), 'w')
for det in cls_dets:
fout.write('{}\n'.format(' '.join(str(int(d)) for d in det[4:12])))
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
# if has model, get the name from it
# if does not, then just use the inialization weights
if args.model:
filename = os.path.splitext(os.path.basename(args.model))[0]
else:
filename = os.path.splitext(os.path.basename(args.weight))[0]
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
tag = args.tag
tag = tag if tag else 'default'
filename = tag + '/' + filename
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth = True
# init session
sess = tf.Session(config=tfconfig)
# load network
if args.net == 'vgg16':
net = vgg16(batch_size=1)
else:
net = resnetv1(batch_size=1, num_layers=101)
# load model
anchors = [4, 8, 16, 32]
ratios = [0.25,0.5,1,2,4]
net.create_architecture(sess, "TEST", NUM_CLASSES,
tag='default', anchor_scales=anchors, anchor_ratios=ratios)
if args.model:
print(('Loading model check point from {:s}').format(args.model))
saver = tf.train.Saver()
saver.restore(sess, args.model)
print('Loaded.')
else:
print(('Loading initial weights from {:s}').format(args.weight))
sess.run(tf.global_variables_initializer())
print('Loaded.')
out_dir = osp.join(cfg.ROOT_DIR, 'output', 'demo')
if not osp.isdir(out_dir):
os.makedirs(out_dir)
img_dir = '/home/hezheqi/data/tmp'
names = open(osp.join(img_dir, 'img_list.txt')).read().strip().split('\n')
# name = ['1.jpg']
for name in names:
detect_one(name)
| 32.16763
| 154
| 0.637376
|
fad14b06d09f5ea3e28830ff5854ff6aece6f4f8
| 4,690
|
py
|
Python
|
flatlib/ephem/swe.py
|
D-Vaillant/flatlib
|
fba89c72c13279c0709b0967a597427ee177665b
|
[
"MIT"
] | 196
|
2015-03-13T21:08:26.000Z
|
2022-03-27T20:46:33.000Z
|
flatlib/ephem/swe.py
|
D-Vaillant/flatlib
|
fba89c72c13279c0709b0967a597427ee177665b
|
[
"MIT"
] | 71
|
2017-03-19T14:11:11.000Z
|
2022-02-20T17:37:28.000Z
|
flatlib/ephem/swe.py
|
D-Vaillant/flatlib
|
fba89c72c13279c0709b0967a597427ee177665b
|
[
"MIT"
] | 77
|
2016-03-10T01:23:12.000Z
|
2022-02-27T22:18:12.000Z
|
"""
This file is part of flatlib - (C) FlatAngle
Author: João Ventura (flatangleweb@gmail.com)
This module implements a simple interface with the C
Swiss Ephemeris using the pyswisseph library.
The pyswisseph library must be already installed and
accessible.
"""
import swisseph
from flatlib import angle
from flatlib import const
# Map objects
SWE_OBJECTS = {
const.SUN: 0,
const.MOON: 1,
const.MERCURY: 2,
const.VENUS: 3,
const.MARS: 4,
const.JUPITER: 5,
const.SATURN: 6,
const.URANUS: 7,
const.NEPTUNE: 8,
const.PLUTO: 9,
const.CHIRON: 15,
const.NORTH_NODE: 10
}
# Map house systems
SWE_HOUSESYS = {
const.HOUSES_PLACIDUS: b'P',
const.HOUSES_KOCH: b'K',
const.HOUSES_PORPHYRIUS: b'O',
const.HOUSES_REGIOMONTANUS: b'R',
const.HOUSES_CAMPANUS: b'C',
const.HOUSES_EQUAL: b'A',
const.HOUSES_EQUAL_2: b'E',
const.HOUSES_VEHLOW_EQUAL: b'V',
const.HOUSES_WHOLE_SIGN: b'W',
const.HOUSES_MERIDIAN: b'X',
const.HOUSES_AZIMUTHAL: b'H',
const.HOUSES_POLICH_PAGE: b'T',
const.HOUSES_ALCABITUS: b'B',
const.HOUSES_MORINUS: b'M'
}
# ==== Internal functions ==== #
def setPath(path):
""" Sets the path for the swe files. """
swisseph.set_ephe_path(path)
# === Object functions === #
def sweObject(obj, jd):
""" Returns an object from the Ephemeris. """
sweObj = SWE_OBJECTS[obj]
sweList, flg = swisseph.calc_ut(jd, sweObj)
return {
'id': obj,
'lon': sweList[0],
'lat': sweList[1],
'lonspeed': sweList[3],
'latspeed': sweList[4]
}
def sweObjectLon(obj, jd):
""" Returns the longitude of an object. """
sweObj = SWE_OBJECTS[obj]
sweList, flg = swisseph.calc_ut(jd, sweObj)
return sweList[0]
def sweNextTransit(obj, jd, lat, lon, flag):
""" Returns the julian date of the next transit of
an object. The flag should be 'RISE' or 'SET'.
"""
sweObj = SWE_OBJECTS[obj]
flag = swisseph.CALC_RISE if flag == 'RISE' else swisseph.CALC_SET
trans = swisseph.rise_trans(jd, sweObj, lon, lat, 0, 0, 0, flag)
return trans[1][0]
# === Houses and angles === #
def sweHouses(jd, lat, lon, hsys):
""" Returns lists of houses and angles. """
hsys = SWE_HOUSESYS[hsys]
hlist, ascmc = swisseph.houses(jd, lat, lon, hsys)
# Add first house to the end of 'hlist' so that we
# can compute house sizes with an iterator
hlist += (hlist[0],)
houses = [
{
'id': const.LIST_HOUSES[i],
'lon': hlist[i],
'size': angle.distance(hlist[i], hlist[i + 1])
} for i in range(12)
]
angles = [
{'id': const.ASC, 'lon': ascmc[0]},
{'id': const.MC, 'lon': ascmc[1]},
{'id': const.DESC, 'lon': angle.norm(ascmc[0] + 180)},
{'id': const.IC, 'lon': angle.norm(ascmc[1] + 180)}
]
return (houses, angles)
def sweHousesLon(jd, lat, lon, hsys):
""" Returns lists with house and angle longitudes. """
hsys = SWE_HOUSESYS[hsys]
hlist, ascmc = swisseph.houses(jd, lat, lon, hsys)
angles = [
ascmc[0],
ascmc[1],
angle.norm(ascmc[0] + 180),
angle.norm(ascmc[1] + 180)
]
return (hlist, angles)
# === Fixed stars === #
# Beware: the swisseph.fixstar_mag function is really
# slow because it parses the fixstars.cat file every
# time..
def sweFixedStar(star, jd):
""" Returns a fixed star from the Ephemeris. """
sweList, stnam, flg = swisseph.fixstar2_ut(star, jd)
mag = swisseph.fixstar2_mag(star)
return {
'id': star,
'mag': mag,
'lon': sweList[0],
'lat': sweList[1]
}
# === Eclipses === #
def solarEclipseGlobal(jd, backward):
""" Returns the jd details of previous or next global solar eclipse. """
sweList = swisseph.sol_eclipse_when_glob(jd, backward=backward)
return {
'maximum': sweList[1][0],
'begin': sweList[1][2],
'end': sweList[1][3],
'totality_begin': sweList[1][4],
'totality_end': sweList[1][5],
'center_line_begin': sweList[1][6],
'center_line_end': sweList[1][7],
}
def lunarEclipseGlobal(jd, backward):
""" Returns the jd details of previous or next global lunar eclipse. """
sweList = swisseph.lun_eclipse_when(jd, backward=backward)
return {
'maximum': sweList[1][0],
'partial_begin': sweList[1][2],
'partial_end': sweList[1][3],
'totality_begin': sweList[1][4],
'totality_end': sweList[1][5],
'penumbral_begin': sweList[1][6],
'penumbral_end': sweList[1][7],
}
| 26.201117
| 76
| 0.60064
|
54ec95d23453f1268414c035dedde0b7e88b6407
| 70,903
|
py
|
Python
|
ambari-server/src/test/python/stacks/2.5/HIVE/test_hive_server_int.py
|
zyclove/ambari
|
1032f0f54cb7b312b9a3b37570cd840f4e1e89d4
|
[
"Apache-2.0"
] | null | null | null |
ambari-server/src/test/python/stacks/2.5/HIVE/test_hive_server_int.py
|
zyclove/ambari
|
1032f0f54cb7b312b9a3b37570cd840f4e1e89d4
|
[
"Apache-2.0"
] | null | null | null |
ambari-server/src/test/python/stacks/2.5/HIVE/test_hive_server_int.py
|
zyclove/ambari
|
1032f0f54cb7b312b9a3b37570cd840f4e1e89d4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python2
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
import os
from stacks.utils.RMFTestCase import *
from mock.mock import MagicMock, patch
from resource_management.libraries import functions
from resource_management.core.logger import Logger
from resource_management.libraries.script.config_dictionary import UnknownConfiguration
from hive_server_interactive import HiveServerInteractiveDefault
from resource_management.libraries.script.script import Script
from resource_management.core import shell
@patch("resource_management.libraries.Script.get_tmp_dir", new=MagicMock(return_value=('/var/lib/ambari-agent/tmp')))
@patch.object(functions, "get_stack_version", new=MagicMock(return_value="2.0.0.0-1234"))
@patch("resource_management.libraries.functions.check_thrift_port_sasl", new=MagicMock())
@patch("resource_management.libraries.functions.get_user_call_output.get_user_call_output",
new=MagicMock(return_value=(0, '123', '')))
class TestHiveServerInteractive(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "HIVE/0.12.0.2.0/package"
STACK_VERSION = "2.0.6"
UPGRADE_STACK_VERSION = "2.2"
DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp']
def setUp(self):
Logger.logger = MagicMock()
self.testDirectory = os.path.dirname(os.path.abspath(__file__))
# llap state related tests.
self.hsi = HiveServerInteractiveDefault()
self.llap_app_name='llap'
self.num_times_to_iterate = 3
self.wait_time = 1
def load_json(self, filename):
file = os.path.join(self.testDirectory, filename)
with open(file, 'rb') as f:
data = json.load(f)
return data
@patch("resource_management.libraries.functions.copy_tarball.copy_to_hdfs")
def test_configure_default(self, copy_to_hdfs_mock):
self.maxDiff = None
copy_to_hdfs_mock.return_value = False
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_server_interactive.py",
classname="HiveServerInteractive",
command="configure",
config_file=self.get_src_folder() + "/test/python/stacks/2.5/configs/hsi_default.json",
stack_version=self.STACK_VERSION,
target=RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_default()
self.assertNoMoreResources()
"""
Tests HSI start with llap package creation output having single line.
Sample output : "Prepared llap-slider-05Apr2016/run.sh for running LLAP"
"""
#@patch("Script.get_tmp_dir()")
@patch("os.path.isfile")
@patch("resource_management.libraries.functions.copy_tarball.copy_to_hdfs")
@patch("socket.socket")
@patch("time.sleep")
def test_start_default_with_llap_single_line_output(self, sleep_mock, socket_mock, copy_to_hfds_mock, is_file_mock): #, get_tmp_dir_mock):
self.maxDiff = None
copy_to_hfds_mock.return_value = False
s = socket_mock.return_value
is_file_mock.return_value = True
#get_tmp_dir_mock.return_value = "/var/lib/ambari-agent/tmp"
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_server_interactive.py",
classname="HiveServerInteractive",
command="start",
config_file=self.get_src_folder() + "/test/python/stacks/2.5/configs/hsi_default.json",
stack_version=self.STACK_VERSION,
target=RMFTestCase.TARGET_COMMON_SERVICES,
checked_call_mocks=[(0, "Prepared llap-slider-05Apr2016/run.sh for running LLAP", ""),
(0, """{
\"state\" : \"RUNNING_ALL\"
}""", ""),
(0, """{
\"state\" : \"RUNNING_ALL\"
}""", ""),
(0, "OK.", "")],
)
self.assert_configure_default()
self.assertResourceCalled('Execute',
'/home/hive/llap-slider-05Apr2016/run.sh',
logoutput= True, user=u'hive'
)
self.assertResourceCalled('Execute',
'hive --config /usr/hdp/current/hive-server2-hive2/conf/conf.server --service metatool -updateLocation hdfs://c6401.ambari.apache.org:8020 OK.',
environment={'PATH': '/usr/hdp/current/hadoop-client/bin'},
user='hive'
)
self.assertResourceCalled('Execute',
'/tmp/start_hiveserver2_interactive_script /var/run/hive/hive-server2-interactive.out /var/log/hive/hive-server2-interactive.err /var/run/hive/hive-interactive.pid /usr/hdp/current/hive-server2-hive2/conf/conf.server /var/log/hive',
environment={'HIVE_BIN': '/usr/hdp/current/hive-server2-hive2/bin/hive2',
'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
not_if="ls /var/run/hive/hive-interactive.pid >/dev/null 2>&1 && ps -p 123 >/dev/null 2>&1",
user='hive',
path=['/bin:/usr/hdp/current/hive-server2-hive2/bin:/usr/hdp/current/hadoop-client/bin'],
)
self.assertResourceCalled('Execute',
'/usr/jdk64/jdk1.7.0_45/bin/java -cp /usr/lib/ambari-agent/DBConnectionVerification.jar:/usr/hdp/current/hive-server2-hive2/lib/mysql-connector-java.jar org.apache.ambari.server.DBConnectionVerification \'jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true\' hive \'!`"\'"\'"\' 1\' com.mysql.jdbc.Driver',
path=['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
tries=5,
try_sleep=10
)
self.assertNoMoreResources()
"""
Tests HSI start with llap package creation output having single line.
Sample output : "Prepared llap-slider-05Apr2016/run.sh for running LLAP"
HSI HA is enabled and llap is stopped, so it will be restartd
"""
#@patch("Script.get_tmp_dir()")
@patch("os.path.isfile")
@patch("resource_management.libraries.functions.copy_tarball.copy_to_hdfs")
@patch("socket.socket")
@patch("time.sleep")
def test_start_hsi_ha_with_llap_single_line_output(self, sleep_mock, socket_mock, copy_to_hfds_mock, is_file_mock): #, get_tmp_dir_mock):
self.maxDiff = None
copy_to_hfds_mock.return_value = False
s = socket_mock.return_value
is_file_mock.return_value = True
#get_tmp_dir_mock.return_value = "/var/lib/ambari-agent/tmp"
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_server_interactive.py",
classname="HiveServerInteractive",
command="start",
config_file=self.get_src_folder() + "/test/python/stacks/2.5/configs/hsi_ha.json",
stack_version=self.STACK_VERSION,
target=RMFTestCase.TARGET_COMMON_SERVICES,
checked_call_mocks=[(0, """{
\"state\" : \"COMPLETED\"
}""", ""),
(0, "Prepared llap-slider-05Apr2016/run.sh for running LLAP", ""),
(0, """{
\"state\" : \"RUNNING_ALL\"
}""", ""),
(0, """{
\"state\" : \"RUNNING_ALL\"
}""", ""),
(0, "OK.", "")],
)
self.assert_configure_default()
self.assertResourceCalled('Execute',
'/home/hive/llap-slider-05Apr2016/run.sh',
logoutput= True, user='hive'
)
self.assertResourceCalled('Execute',
'hive --config /usr/hdp/current/hive-server2-hive2/conf/conf.server --service metatool -updateLocation hdfs://c6401.ambari.apache.org:8020 OK.',
environment={'PATH': '/usr/hdp/current/hadoop-client/bin'},
user='hive'
)
self.assertResourceCalled('Execute',
'/tmp/start_hiveserver2_interactive_script /var/run/hive/hive-server2-interactive.out /var/log/hive/hive-server2-interactive.err /var/run/hive/hive-interactive.pid /usr/hdp/current/hive-server2-hive2/conf/conf.server /var/log/hive',
environment={'HIVE_BIN': '/usr/hdp/current/hive-server2-hive2/bin/hive2',
'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
not_if="ls /var/run/hive/hive-interactive.pid >/dev/null 2>&1 && ps -p 123 >/dev/null 2>&1",
user='hive',
path=['/bin:/usr/hdp/current/hive-server2-hive2/bin:/usr/hdp/current/hadoop-client/bin'],
)
self.assertResourceCalled('Execute',
'/usr/jdk64/jdk1.7.0_45/bin/java -cp /usr/lib/ambari-agent/DBConnectionVerification.jar:/usr/hdp/current/hive-server2-hive2/lib/mysql-connector-java.jar org.apache.ambari.server.DBConnectionVerification \'jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true\' hive \'!`"\'"\'"\' 1\' com.mysql.jdbc.Driver',
path=['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
tries=5,
try_sleep=10
)
self.assertNoMoreResources()
"""
Tests HSI start with llap running and HA deployment. As llap is running it will not be started."
"""
#@patch("Script.get_tmp_dir()")
@patch("os.path.isfile")
@patch("resource_management.libraries.functions.copy_tarball.copy_to_hdfs")
@patch("socket.socket")
@patch("time.sleep")
def test_start_with_ha_and_llap_running(self, sleep_mock, socket_mock, copy_to_hfds_mock, is_file_mock): #, get_tmp_dir_mock):
self.maxDiff = None
copy_to_hfds_mock.return_value = False
s = socket_mock.return_value
is_file_mock.return_value = True
#get_tmp_dir_mock.return_value = "/var/lib/ambari-agent/tmp"
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_server_interactive.py",
classname="HiveServerInteractive",
command="start",
config_file=self.get_src_folder() + "/test/python/stacks/2.5/configs/hsi_ha.json",
stack_version=self.STACK_VERSION,
target=RMFTestCase.TARGET_COMMON_SERVICES,
checked_call_mocks=[(0, """{
\"state\" : \"RUNNING_ALL\"
}""", "")],
)
self.assert_configure_default()
self.assertResourceCalled('Execute',
'hive --config /usr/hdp/current/hive-server2-hive2/conf/conf.server --service metatool -updateLocation hdfs://c6401.ambari.apache.org:8020 OK.',
environment={'PATH': '/usr/hdp/current/hadoop-client/bin'},
user='hive'
)
self.assertResourceCalled('Execute',
'/tmp/start_hiveserver2_interactive_script /var/run/hive/hive-server2-interactive.out /var/log/hive/hive-server2-interactive.err /var/run/hive/hive-interactive.pid /usr/hdp/current/hive-server2-hive2/conf/conf.server /var/log/hive',
environment={'HIVE_BIN': '/usr/hdp/current/hive-server2-hive2/bin/hive2',
'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
not_if="ls /var/run/hive/hive-interactive.pid >/dev/null 2>&1 && ps -p 123 >/dev/null 2>&1",
user='hive',
path=['/bin:/usr/hdp/current/hive-server2-hive2/bin:/usr/hdp/current/hadoop-client/bin'],
)
self.assertResourceCalled('Execute',
'/usr/jdk64/jdk1.7.0_45/bin/java -cp /usr/lib/ambari-agent/DBConnectionVerification.jar:/usr/hdp/current/hive-server2-hive2/lib/mysql-connector-java.jar org.apache.ambari.server.DBConnectionVerification \'jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true\' hive \'!`"\'"\'"\' 1\' com.mysql.jdbc.Driver',
path=['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
tries=5,
try_sleep=10
)
self.assertNoMoreResources()
'''
restart should not call slider destroy
'''
@patch("os.path.isfile")
@patch("resource_management.libraries.functions.copy_tarball.copy_to_hdfs")
@patch("socket.socket")
@patch("time.sleep")
def test_restart_default_with_llap_multi_line_output(self, sleep_mock, socket_mock, copy_to_hfds_mock, is_file_mock):
self.maxDiff = None
copy_to_hfds_mock.return_value = False
s = socket_mock.return_value
is_file_mock.return_value = True
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_server_interactive.py",
classname="HiveServerInteractive",
command="restart",
config_file=self.get_src_folder() + "/test/python/stacks/2.5/configs/hsi_default_for_restart.json",
stack_version=self.STACK_VERSION,
target=RMFTestCase.TARGET_COMMON_SERVICES,
call_mocks = [(0, "OK.", ""), (0, "OK."), (0, "OK."), (0, "OK."), (0, "OK.")],
checked_call_mocks=[(0, "OK.", ""),
#(0, "OK.", ""),
(0, "UNWANTED_STRING \n "
" Prepared llap-slider-05Apr2016/run.sh for running LLAP \n "
"UNWANTED_STRING \n ", ""),
(0, """{
\"state\" : \"RUNNING_ALL\"
}""", ""),
(0, """{
\"state\" : \"RUNNING_ALL\"
}""", ""),
(0, "OK.", "")],
)
self.assertResourceCalled('Execute', "ambari-sudo.sh kill 123",
not_if="! (ls /var/run/hive/hive-interactive.pid >/dev/null 2>&1 && ps -p 123 >/dev/null 2>&1)",
)
self.assertResourceCalled('Execute',
"! (ls /var/run/hive/hive-interactive.pid >/dev/null 2>&1 && ps -p 123 >/dev/null 2>&1)",
tries=10,
try_sleep=3,
)
self.assertResourceCalled('Execute',
"! (ls /var/run/hive/hive-interactive.pid >/dev/null 2>&1 && ps -p 123 >/dev/null 2>&1)",
tries=20,
try_sleep=3,
)
self.assertResourceCalled('File', '/var/run/hive/hive-interactive.pid',
action=['delete'],
)
self.assertResourceCalled('Execute', ('slider', 'destroy', u'llap0', '--force'),
ignore_failures = True,
user = 'hive',
timeout = 30,
)
self.assert_configure_default(with_cs_enabled=True)
self.assertResourceCalled('Execute',
'/home/hive/llap-slider-05Apr2016/run.sh',
logoutput= True, user=u'hive'
)
self.assertResourceCalled('Execute',
'hive --config /usr/hdp/current/hive-server2-hive2/conf/conf.server --service metatool -updateLocation hdfs://c6401.ambari.apache.org:8020 OK.',
environment={'PATH': '/usr/hdp/current/hadoop-client/bin'},
user='hive'
)
self.assertResourceCalled('Execute',
'/tmp/start_hiveserver2_interactive_script /var/run/hive/hive-server2-interactive.out /var/log/hive/hive-server2-interactive.err /var/run/hive/hive-interactive.pid /usr/hdp/current/hive-server2-hive2/conf/conf.server /var/log/hive',
environment={'HIVE_BIN': '/usr/hdp/current/hive-server2-hive2/bin/hive2',
'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
not_if="ls /var/run/hive/hive-interactive.pid >/dev/null 2>&1 && ps -p 123 >/dev/null 2>&1",
user='hive',
path=['/bin:/usr/hdp/current/hive-server2-hive2/bin:/usr/hdp/current/hadoop-client/bin'],
)
self.assertResourceCalled('Execute',
'/usr/jdk64/jdk1.7.0_45/bin/java -cp /usr/lib/ambari-agent/DBConnectionVerification.jar:/usr/hdp/current/hive-server2-hive2/lib/mysql-connector-java.jar org.apache.ambari.server.DBConnectionVerification \'jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true\' hive \'!`"\'"\'"\' 1\' com.mysql.jdbc.Driver',
path=['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
tries=5,
try_sleep=10
)
self.assertNoMoreResources()
"""
Tests HSI start with llap package creation output having multiple lines.
Sample output : "UNWANTED STRING \n Prepared llap-slider-05Apr2016/run.sh for running LLAP \n UNWANTED STRING \n"
"""
@patch("os.path.isfile")
@patch("resource_management.libraries.functions.copy_tarball.copy_to_hdfs")
@patch("socket.socket")
@patch("time.sleep")
def test_start_default_with_llap_multi_line_output(self, sleep_mock, socket_mock, copy_to_hfds_mock, is_file_mock):
self.maxDiff = None
copy_to_hfds_mock.return_value = False
s = socket_mock.return_value
is_file_mock.return_value = True
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_server_interactive.py",
classname="HiveServerInteractive",
command="start",
config_file=self.get_src_folder() + "/test/python/stacks/2.5/configs/hsi_default.json",
stack_version=self.STACK_VERSION,
target=RMFTestCase.TARGET_COMMON_SERVICES,
checked_call_mocks=[(0, "UNWANTED_STRING \n "
" Prepared llap-slider-05Apr2016/run.sh for running LLAP \n "
"UNWANTED_STRING \n ", ""),
(0, """{
\"state\" : \"RUNNING_ALL\"
}""", ""),
(0, """{
\"state\" : \"RUNNING_ALL\"
}""", ""), (0, "OK.", "")],
)
self.assert_configure_default()
self.assertResourceCalled('Execute',
'/home/hive/llap-slider-05Apr2016/run.sh',
logoutput= True, user=u'hive'
)
self.assertResourceCalled('Execute',
'hive --config /usr/hdp/current/hive-server2-hive2/conf/conf.server --service metatool -updateLocation hdfs://c6401.ambari.apache.org:8020 OK.',
environment={'PATH': '/usr/hdp/current/hadoop-client/bin'},
user='hive'
)
self.assertResourceCalled('Execute',
'/tmp/start_hiveserver2_interactive_script /var/run/hive/hive-server2-interactive.out /var/log/hive/hive-server2-interactive.err /var/run/hive/hive-interactive.pid /usr/hdp/current/hive-server2-hive2/conf/conf.server /var/log/hive',
environment={'HIVE_BIN': '/usr/hdp/current/hive-server2-hive2/bin/hive2',
'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
not_if="ls /var/run/hive/hive-interactive.pid >/dev/null 2>&1 && ps -p 123 >/dev/null 2>&1",
user='hive',
path=['/bin:/usr/hdp/current/hive-server2-hive2/bin:/usr/hdp/current/hadoop-client/bin'],
)
self.assertResourceCalled('Execute',
'/usr/jdk64/jdk1.7.0_45/bin/java -cp /usr/lib/ambari-agent/DBConnectionVerification.jar:/usr/hdp/current/hive-server2-hive2/lib/mysql-connector-java.jar org.apache.ambari.server.DBConnectionVerification \'jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true\' hive \'!`"\'"\'"\' 1\' com.mysql.jdbc.Driver',
path=['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
tries=5,
try_sleep=10
)
self.assertNoMoreResources()
def test_stop_default(self):
self.maxDiff = None
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_server_interactive.py",
classname="HiveServerInteractive",
command="stop",
config_file=self.get_src_folder() + "/test/python/stacks/2.5/configs/hsi_default.json",
stack_version=self.STACK_VERSION,
target=RMFTestCase.TARGET_COMMON_SERVICES,
call_mocks=[(0, "OK.", ""), (0, "OK.", "")],
)
self.assertResourceCalled('Execute', "ambari-sudo.sh kill 123",
not_if="! (ls /var/run/hive/hive-interactive.pid >/dev/null 2>&1 && ps -p 123 >/dev/null 2>&1)",
)
self.assertResourceCalled('Execute',
"! (ls /var/run/hive/hive-interactive.pid >/dev/null 2>&1 && ps -p 123 >/dev/null 2>&1)",
tries=10,
try_sleep=3,
)
self.assertResourceCalled('Execute',
"! (ls /var/run/hive/hive-interactive.pid >/dev/null 2>&1 && ps -p 123 >/dev/null 2>&1)",
tries=20,
try_sleep=3,
)
self.assertResourceCalled('File', '/var/run/hive/hive-interactive.pid',
action=['delete'],
)
self.assertResourceCalled('Execute', ('slider', 'destroy', 'llap0', '--force'),
ignore_failures = True,
user = 'hive',
timeout = 30,
)
self.assertNoMoreResources()
def assert_configure_default(self, no_tmp=False, default_fs_default=u'hdfs://c6401.ambari.apache.org:8020', with_cs_enabled=False):
self.assertResourceCalled('HdfsResource', '/apps/hive/warehouse',
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
keytab = UnknownConfigurationMock(),
kinit_path_local = '/usr/bin/kinit',
user = 'hdfs',
dfs_type = '',
owner = 'hive',
group = 'hadoop',
hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
type = 'directory',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='missing_principal', default_fs=default_fs_default,
mode = 0777,
)
self.assertResourceCalled('HdfsResource', '/user/hive',
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
keytab = UnknownConfigurationMock(),
kinit_path_local = '/usr/bin/kinit',
user = 'hdfs',
dfs_type = '',
owner = 'hive',
hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
type = 'directory',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='missing_principal', default_fs=default_fs_default,
mode = 0755,
)
self.assertResourceCalled('Directory', '/etc/hive2',
mode=0755,
)
self.assertResourceCalled('Directory', '/usr/hdp/current/hive-server2-hive2/conf',
owner='hive',
group='hadoop',
create_parents=True,
mode = 0755,
)
self.assertResourceCalled('XmlConfig', 'mapred-site.xml',
group='hadoop',
conf_dir='/usr/hdp/current/hive-server2-hive2/conf',
mode=0644,
configuration_attributes={u'final': {u'mapred.healthChecker.script.path': u'true',
u'mapreduce.jobtracker.staging.root.dir': u'true'}},
owner='hive',
configurations=self.getConfig()['configurations']['mapred-site'],
)
self.assertResourceCalled('File', '/usr/hdp/current/hive-server2-hive2/conf/hive-default.xml.template',
owner='hive',
group='hadoop',
mode = 0644,
)
self.assertResourceCalled('File', '/usr/hdp/current/hive-server2-hive2/conf/hive-env.sh.template',
owner='hive',
group='hadoop',
mode = 0644,
)
hive_site_conf = {}
hive_site_conf.update(self.getConfig()['configurations']['hive-site'])
hive_site_conf.update(self.getConfig()['configurations']['hive-interactive-site'])
hive_site_conf['hive.exec.post.hooks'] = 'a,b,org.apache.hadoop.hive.ql.hooks.ATSHook'
del hive_site_conf['hive.enforce.bucketing']
del hive_site_conf['hive.enforce.sorting']
del hive_site_conf['hive.llap.io.memory.size']
hive_site_conf['hive.llap.io.memory.size'] = 357564416L
hiveserver2_site_conf = {}
hiveserver2_site_conf.update(self.getConfig()['configurations']['hiveserver2-site'])
hiveserver2_site_conf.update(self.getConfig()['configurations']['hiveserver2-interactive-site'])
mapred_site_conf = {}
mapred_site_conf.update(self.getConfig()['configurations']['mapred-site'])
self.assertResourceCalled("Directory", "/usr/hdp/current/hive-server2-hive2/conf/conf.server",
owner=u"hive",
group=u"hadoop",
create_parents=True,
mode = 0700)
self.assertResourceCalled('XmlConfig', 'mapred-site.xml',
group='hadoop',
conf_dir='/usr/hdp/current/hive-server2-hive2/conf/conf.server',
mode=0600,
configuration_attributes={u'final': {u'mapred.healthChecker.script.path': u'true',
u'mapreduce.jobtracker.staging.root.dir': u'true'}},
owner='hive',
configurations=mapred_site_conf,
)
self.assertResourceCalled("File", "/usr/hdp/current/hive-server2-hive2/conf/conf.server/hive-default.xml.template",
owner=u"hive",
group=u"hadoop",
mode = 0600)
self.assertResourceCalled("File", "/usr/hdp/current/hive-server2-hive2/conf/conf.server/hive-env.sh.template",
owner=u"hive",
group=u"hadoop",
mode = 0600)
self.assertResourceCalled('XmlConfig', 'tez-site.xml',
group='hadoop',
conf_dir='/etc/tez_hive2/conf',
mode=0664,
configuration_attributes=UnknownConfigurationMock(),
owner='tez',
configurations=self.getConfig()['configurations']['tez-interactive-site'],
)
# Verify that config files got created under /etc/hive2/conf and /etc/hive2/conf/conf.server
hive_conf_dirs_list = ['/usr/hdp/current/hive-server2-hive2/conf', '/usr/hdp/current/hive-server2-hive2/conf/conf.server']
# Making copy of 'hive_site_conf' in 'hive_site_conf_for_client', and deleting 'javax.jdo.option.ConnectionPassword' config
# from there.
hive_site_conf_for_client = hive_site_conf.copy()
del hive_site_conf_for_client['javax.jdo.option.ConnectionPassword']
if 'hadoop.security.credential.provider.path' in hive_site_conf_for_client:
del hive_site_conf_for_client['hadoop.security.credential.provider.path']
for conf_dir in hive_conf_dirs_list:
# if 'conf_dir' is '/usr/hdp/current/hive-server2-hive2/conf', we don't expect 'javax.jdo.option.ConnectionPassword' config
# to be part of 'hive_site_conf', as we delete it for the HIVE client file. Thus, deleting it here for checking the contents.
if conf_dir == '/usr/hdp/current/hive-server2-hive2/conf':
self.assertResourceCalled('XmlConfig', 'hive-site.xml',
group='hadoop',
conf_dir=conf_dir,
mode=0644,
configuration_attributes={u'final': {u'hive.optimize.bucketmapjoin.sortedmerge': u'true',
u'javax.jdo.option.ConnectionDriverName': u'true',
u'javax.jdo.option.ConnectionPassword': u'true'}},
owner='hive',
configurations=hive_site_conf_for_client,
)
else:
if with_cs_enabled:
self.assertResourceCalled('File', '/usr/hdp/current/hive-server2-hive2/conf/conf.server/hive-site.jceks',
content=StaticFile('/var/lib/ambari-agent/data/abc.jceks'),
owner='hive',
group='hadoop',
mode = 0640,
)
self.assertTrue('hadoop.security.credential.provider.path' in hive_site_conf)
hive_site_conf['hadoop.security.credential.provider.path'] = 'jceks://file/usr/hdp/current/hive-server2-hive2/conf/conf.server/hive-site.jceks'
self.assertResourceCalled('XmlConfig', 'hive-site.xml',
group='hadoop',
conf_dir=conf_dir,
mode=0600,
configuration_attributes={u'final': {u'hive.optimize.bucketmapjoin.sortedmerge': u'true',
u'javax.jdo.option.ConnectionDriverName': u'true',
u'javax.jdo.option.ConnectionPassword': u'true'}},
owner='hive',
configurations=hive_site_conf,
)
if conf_dir == '/usr/hdp/current/hive-server2-hive2/conf/conf.server':
self.assertResourceCalled('XmlConfig', 'hiveserver2-site.xml',
group='hadoop',
conf_dir=conf_dir,
mode=0600,
configuration_attributes={},
owner='hive',
configurations=hiveserver2_site_conf,
)
self.assertResourceCalled('File', os.path.join(conf_dir, 'hive-env.sh'),
content=InlineTemplate(self.getConfig()['configurations']['hive-interactive-env']['content']),
owner='hive',
group='hadoop',
mode = 0600,
)
self.assertResourceCalled('File', os.path.join(conf_dir, 'llap-daemon-log4j2.properties'),
content=InlineTemplate('con\ntent'),
owner='hive',
group='hadoop',
mode=0600,
)
self.assertResourceCalled('File', os.path.join(conf_dir, 'llap-cli-log4j2.properties'),
content=InlineTemplate('con\ntent'),
owner='hive',
group='hadoop',
mode=0600,
)
self.assertResourceCalled('File', os.path.join(conf_dir, 'hive-log4j2.properties'),
content=InlineTemplate('con\ntent'), # Test new line
owner='hive',
group='hadoop',
mode=0600,
)
self.assertResourceCalled('File', os.path.join(conf_dir, 'hive-exec-log4j2.properties'),
content=InlineTemplate('con\ntent'), # Test new line
owner='hive',
group='hadoop',
mode=0600,
)
self.assertResourceCalled('File', os.path.join(conf_dir, 'beeline-log4j2.properties'),
content=InlineTemplate('con\ntent'), # Test new line
owner='hive',
group='hadoop',
mode=0600,
)
self.assertResourceCalled('File', os.path.join(conf_dir, 'hadoop-metrics2-hiveserver2.properties'),
content=Template("hadoop-metrics2-hiveserver2.properties.j2"),
owner='hive',
group='hadoop',
mode = 0600,
)
self.assertResourceCalled('File', os.path.join(conf_dir, 'hadoop-metrics2-llapdaemon.properties'),
content=Template("hadoop-metrics2-llapdaemon.j2"),
owner='hive',
group='hadoop',
mode = 0600
)
self.assertResourceCalled('File', os.path.join(conf_dir, 'hadoop-metrics2-llaptaskscheduler.properties'),
content=Template("hadoop-metrics2-llaptaskscheduler.j2"),
owner='hive',
group='hadoop',
mode = 0600
)
else:
self.assertResourceCalled('XmlConfig', 'hiveserver2-site.xml',
group='hadoop',
conf_dir=conf_dir,
mode=0644,
configuration_attributes={},
owner='hive',
configurations=hiveserver2_site_conf,
)
self.assertResourceCalled('File', os.path.join(conf_dir, 'hive-env.sh'),
content=InlineTemplate(self.getConfig()['configurations']['hive-interactive-env']['content']),
owner='hive',
group='hadoop',
mode = 0644,
)
self.assertResourceCalled('File', os.path.join(conf_dir, 'llap-daemon-log4j2.properties'),
content=InlineTemplate('con\ntent'),
owner='hive',
group='hadoop',
mode=0644,
)
self.assertResourceCalled('File', os.path.join(conf_dir, 'llap-cli-log4j2.properties'),
content=InlineTemplate('con\ntent'),
owner='hive',
group='hadoop',
mode=0644,
)
self.assertResourceCalled('File', os.path.join(conf_dir, 'hive-log4j2.properties'),
content=InlineTemplate('con\ntent'), # Test new line
owner='hive',
group='hadoop',
mode=0644,
)
self.assertResourceCalled('File', os.path.join(conf_dir, 'hive-exec-log4j2.properties'),
content=InlineTemplate('con\ntent'), # Test new line
owner='hive',
group='hadoop',
mode=0644,
)
self.assertResourceCalled('File', os.path.join(conf_dir, 'beeline-log4j2.properties'),
content=InlineTemplate('con\ntent'), # Test new line
owner='hive',
group='hadoop',
mode=0644,
)
self.assertResourceCalled('File', os.path.join(conf_dir, 'hadoop-metrics2-hiveserver2.properties'),
content=Template("hadoop-metrics2-hiveserver2.properties.j2"),
owner='hive',
group='hadoop',
mode = 0644,
)
self.assertResourceCalled('File', os.path.join(conf_dir, 'hadoop-metrics2-llapdaemon.properties'),
content=Template("hadoop-metrics2-llapdaemon.j2"),
owner='hive',
group='hadoop',
mode = 0644
)
self.assertResourceCalled('File', os.path.join(conf_dir, 'hadoop-metrics2-llaptaskscheduler.properties'),
content=Template("hadoop-metrics2-llaptaskscheduler.j2"),
owner='hive',
group='hadoop',
mode = 0644
)
pass
self.assertResourceCalled('Directory', '/etc/security/limits.d',
owner='root',
group='root',
create_parents=True,
)
self.assertResourceCalled('File', '/etc/security/limits.d/hive.conf',
content=Template('hive.conf.j2'),
owner='root',
group='root',
mode=0644,
)
self.assertResourceCalled('File',
'/tmp/mysql-connector-java.jar',
content=DownloadSource('http://c6401.ambari.apache.org:8080/resources/mysql-connector-java.jar'),
)
self.assertResourceCalled('Execute', ('cp',
'--remove-destination',
'/tmp/mysql-connector-java.jar',
'/usr/hdp/current/hive-server2-hive2/lib/mysql-connector-java.jar'),
path=['/bin', '/usr/bin/'],
sudo=True,
)
self.assertResourceCalled('File', '/usr/hdp/current/hive-server2-hive2/lib/mysql-connector-java.jar',
mode=0644,
)
self.assertResourceCalled('File', '/usr/lib/ambari-agent/DBConnectionVerification.jar',
content=DownloadSource('http://c6401.ambari.apache.org:8080/resources'
'/DBConnectionVerification.jar'),
mode=0644,
)
self.assertResourceCalled('File', '/tmp/start_hiveserver2_interactive_script',
content=Template('startHiveserver2Interactive.sh.j2'),
mode=0755,
)
self.assertResourceCalled('Directory', '/var/run/hive',
owner='hive',
mode=0755,
group='hadoop',
create_parents=True,
cd_access='a',
)
self.assertResourceCalled('Directory', '/var/log/hive',
owner='hive',
mode=0755,
group='hadoop',
create_parents=True,
cd_access='a',
)
self.assertResourceCalled('Directory', '/var/lib/hive2',
owner='hive',
mode=0755,
group='hadoop',
create_parents=True,
cd_access='a',
)
# Tests for function '_make_valid_json()' with will be passed in with 'llapstatus' output which may be :
# (1). A string parseable as JSON, or
# (2). May have extra lines in beginning (eg: from MOTD logging embedded), which needs to be removed before parsed as JSON
# Status : RUNNING having MOTD lines in beginning
def test_make_valid_json_1(self):
# Setting up input for fn. '_make_valid_json()'
input_file_handle = open(self.get_src_folder() + "/test/python/stacks/2.5/HIVE/running_withMOTDmsg.txt","r")
llap_app_info = input_file_handle.read()
llap_app_info_as_json = self.hsi._make_valid_json(llap_app_info)
# Set up expected output
expected_ouput_file_handle = open(self.get_src_folder() + "/test/python/stacks/2.5/HIVE/running.json","r")
expected_ouput_data = expected_ouput_file_handle.read()
expected_ouput_data_as_json = json.loads(expected_ouput_data)
# Verification
self.assertEqual(llap_app_info_as_json, expected_ouput_data_as_json)
# Status : RUNNING w/o MOTD lines in beginning
# Expected : No change
def test_make_valid_json_2(self):
# Setting up input for fn. '_make_valid_json()'
input_file_handle = open(self.get_src_folder() + "/test/python/stacks/2.5/HIVE/running.json","r")
llap_app_info = input_file_handle.read()
expected_llap_app_info_as_json = json.loads(llap_app_info)
llap_app_info_as_json = self.hsi._make_valid_json(llap_app_info)
# Verification
self.assertEqual(llap_app_info_as_json, expected_llap_app_info_as_json)
# Status : RUNNING_PARTIAL (2 out of 3 running -> < 80% instances ON) having MOTD lines in beginning
def test_make_valid_json_3(self):
# Setting up input for fn. '_make_valid_json()'
input_file_handle = open(self.get_src_folder() + "/test/python/stacks/2.5/HIVE/oneContainerDown_withMOTDmsg.txt","r")
llap_app_info = input_file_handle.read()
llap_app_info_as_json = self.hsi._make_valid_json(llap_app_info)
# Set up expected output
expected_ouput_file_handle = open(self.get_src_folder() + "/test/python/stacks/2.5/HIVE/oneContainerDown.json","r")
expected_ouput_data = expected_ouput_file_handle.read()
expected_ouput_data_as_json = json.loads(expected_ouput_data)
# Verification
self.assertEqual(llap_app_info_as_json, expected_ouput_data_as_json)
# Status : RUNNING_PARTIAL (2 out of 3 running -> < 80% instances ON) w/o MOTD lines in beginning
# Expected : No change
def test_make_valid_json_4(self):
# Setting up input for fn. '_make_valid_json()'
input_file_handle = open(self.get_src_folder() + "/test/python/stacks/2.5/HIVE/oneContainerDown.json","r")
llap_app_info = input_file_handle.read()
expected_llap_app_info_as_json = json.loads(llap_app_info)
llap_app_info_as_json = self.hsi._make_valid_json(llap_app_info)
# Verification
self.assertEqual(llap_app_info_as_json, expected_llap_app_info_as_json)
# Status : LAUNCHING having MOTD lines in beginning
def test_make_valid_json_5(self):
# Setting up input for fn. '_make_valid_json()'
input_file_handle = open(self.get_src_folder() + "/test/python/stacks/2.5/HIVE/starting_withMOTDmsg.txt","r")
llap_app_info = input_file_handle.read()
llap_app_info_as_json = self.hsi._make_valid_json(llap_app_info)
# Set up expected output
expected_ouput_file_handle = open(self.get_src_folder() + "/test/python/stacks/2.5/HIVE/starting.json","r")
expected_ouput_data = expected_ouput_file_handle.read()
expected_ouput_data_as_json = json.loads(expected_ouput_data)
# Verification
self.assertEqual(llap_app_info_as_json, expected_ouput_data_as_json)
# Status : LAUNCHING w/o MOTD lines in beginning
# Expected : No change
def test_make_valid_json_6(self):
# Setting up input for fn. '_make_valid_json()'
input_file_handle = open(self.get_src_folder() + "/test/python/stacks/2.5/HIVE/starting.json","r")
llap_app_info = input_file_handle.read()
expected_llap_app_info_as_json = json.loads(llap_app_info)
llap_app_info_as_json = self.hsi._make_valid_json(llap_app_info)
# Verification
self.assertEqual(llap_app_info_as_json, expected_llap_app_info_as_json)
# Status : COMPLETE having MOTD lines in beginning
def test_make_valid_json_7(self):
# Setting up input for fn. '_make_valid_json()'
input_file_handle = open(self.get_src_folder() + "/test/python/stacks/2.5/HIVE/appComplete_withMOTDmsg.txt","r")
llap_app_info = input_file_handle.read()
llap_app_info_as_json = self.hsi._make_valid_json(llap_app_info)
# Set up expected output
expected_ouput_file_handle = open(self.get_src_folder() + "/test/python/stacks/2.5/HIVE/appComplete.json","r")
expected_ouput_data = expected_ouput_file_handle.read()
expected_ouput_data_as_json = json.loads(expected_ouput_data)
# Verification
self.assertEqual(llap_app_info_as_json, expected_ouput_data_as_json)
# Status : COMPLETE w/o MOTD lines in beginning
# Expected : No change
def test_make_valid_json_8(self):
# Setting up input for fn. '_make_valid_json()'
input_file_handle = open(self.get_src_folder() + "/test/python/stacks/2.5/HIVE/appComplete.json","r")
llap_app_info = input_file_handle.read()
expected_llap_app_info_as_json = json.loads(llap_app_info)
llap_app_info_as_json = self.hsi._make_valid_json(llap_app_info)
# Verification
self.assertEqual(llap_app_info_as_json, expected_llap_app_info_as_json)
# Status : INVALID APP having MOTD lines in beginning
def test_make_valid_json_9(self):
# Setting up input for fn. '_make_valid_json()'
input_file_handle = open(self.get_src_folder() + "/test/python/stacks/2.5/HIVE/invalidApp_withMOTDmsg.txt","r")
llap_app_info = input_file_handle.read()
llap_app_info_as_json = self.hsi._make_valid_json(llap_app_info)
# Set up expected output
expected_ouput_file_handle = open(self.get_src_folder() + "/test/python/stacks/2.5/HIVE/invalidApp.json","r")
expected_ouput_data = expected_ouput_file_handle.read()
expected_ouput_data_as_json = json.loads(expected_ouput_data)
# Verification
self.assertEqual(llap_app_info_as_json, expected_ouput_data_as_json)
# Status : INVALID APP w/o MOTD lines in beginning
# Expected : No change
def test_make_valid_json_10(self):
# Setting up input for fn. '_make_valid_json()'
input_file_handle = open(self.get_src_folder() + "/test/python/stacks/2.5/HIVE/invalidApp.json","r")
llap_app_info = input_file_handle.read()
expected_llap_app_info_as_json = json.loads(llap_app_info)
llap_app_info_as_json = self.hsi._make_valid_json(llap_app_info)
# Verification
self.assertEqual(llap_app_info_as_json, expected_llap_app_info_as_json)
# Tests for function '_make_valid_json()' : will be passed in with 'llapstatus' output which will be :
# (1). A string parseable as JSON, but has 2 and 3.
# (2). Has extra lines in beginning (eg: from MOTD logging embedded)
# AND/OR
# (3). Extra lines at the end.
# Begginning and end lines need to be removed before parsed as JSON
def test_make_valid_json_11(self):
# Setting up input for fn. '_make_valid_json()'
input_file_handle = open(self.get_src_folder() + "/test/python/stacks/2.5/HIVE/running_withMOTDmsg_andTrailingMsg.txt","r")
llap_app_info = input_file_handle.read()
llap_app_info_as_json = self.hsi._make_valid_json(llap_app_info)
# Set up expected output
expected_ouput_file_handle = open(self.get_src_folder() + "/test/python/stacks/2.5/HIVE/running.json","r")
expected_ouput_data = expected_ouput_file_handle.read()
expected_ouput_data_as_json = json.loads(expected_ouput_data)
# Verification
self.assertEqual(llap_app_info_as_json, expected_ouput_data_as_json)
# Tests for fn : 'check_llap_app_status_in_hdp_tp()'
# Status : RUNNING
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_tp')
def test_check_llap_app_status_running_all_wait_negative_in_llap_tp(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('running.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_tp(self.llap_app_name, -1)
self.assertEqual(status, True)
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_tp')
def test_check_llap_app_status_running_all_wait_0_in_llap_tp(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('running.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_tp(self.llap_app_name, 0)
self.assertEqual(status, True)
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_tp')
def test_check_llap_app_status_running_all_wait_2_in_llap_tp(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('running.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_tp(self.llap_app_name, 2)
self.assertEqual(status, True)
# Status : RUNNING_PARTIAL (2 out of 3 running -> < 80% instances ON)
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_tp')
def test_check_llap_app_status_one_container_down_wait_negative_in_llap_tp(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('oneContainerDown.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_tp(self.llap_app_name, -1)
self.assertEqual(status, False)
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_tp')
def test_check_llap_app_status_one_container_down_wait_0_in_llap_tp(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('oneContainerDown.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_tp(self.llap_app_name, 0)
self.assertEqual(status, False)
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_tp')
def test_check_llap_app_status_one_container_down_wait_2_in_llap_tp(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('oneContainerDown.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_tp(self.llap_app_name, 2)
self.assertEqual(status, False)
# Status : RUNNING_PARTIAL (4 out of 5 running -> > 80% instances ON)
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_tp')
def test_check_llap_app_status_two_container_down_1_wait_negative_in_llap_tp(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('oneContainerDown1.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_tp(self.llap_app_name, -1)
self.assertEqual(status, True)
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_tp')
def test_check_llap_app_status_two_container_down_1_wait_0_in_llap_tp(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('oneContainerDown1.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_tp(self.llap_app_name, 0)
self.assertEqual(status, True)
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_tp')
def test_check_llap_app_status_two_container_down_1_wait_2_in_llap_tp(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('oneContainerDown1.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_tp(self.llap_app_name, 2)
self.assertEqual(status, True)
# Status : LAUNCHING
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_tp')
def test_check_llap_app_status_starting_wait_negative_in_llap_tp(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('starting.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_tp(self.llap_app_name, -1)
self.assertEqual(status, False)
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_tp')
def test_check_llap_app_status_starting_wait_0_in_llap_tp(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('starting.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_tp(self.llap_app_name, 0)
self.assertEqual(status, False)
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_tp')
def test_check_llap_app_status_starting_wait_2_in_llap_tp(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('starting.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_tp(self.llap_app_name, 2)
self.assertEqual(status, False)
# Status : COMPLETE
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_tp')
def test_check_llap_app_status_complete_wait_negative_in_llap_tp(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('appComplete.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_tp(self.llap_app_name, -1)
self.assertEqual(status, False)
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_tp')
def test_check_llap_app_status_complete_wait_0_in_llap_tp(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('appComplete.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_tp(self.llap_app_name, 0)
self.assertEqual(status, False)
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_tp')
def test_check_llap_app_status_complete_wait_2_in_llap_tp(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('appComplete.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_tp(self.llap_app_name, 2)
self.assertEqual(status, False)
# Status : APP_NOT_FOUND
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_tp')
def test_check_llap_app_status_invalid_wait_negative_in_llap_tp(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('invalidApp.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_tp(self.llap_app_name, -1)
self.assertEqual(status, False)
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_tp')
def test_check_llap_app_status_invalid_wait_0_in_llap_tp(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('invalidApp.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_tp(self.llap_app_name, 0)
self.assertEqual(status, False)
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_tp')
def test_check_llap_app_status_invalid_wait_2_in_llap_tp(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('invalidApp.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_tp(self.llap_app_name, 2)
self.assertEqual(status, False)
# Tests for fn : 'check_llap_app_status_in_llap_ga()'
# Status : RUNNING
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_ga')
def test_check_llap_app_status_running_all_wait_negative_in_llap_ga(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('running.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_ga(self.llap_app_name, -1)
self.assertEqual(status, True)
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_ga')
def test_check_llap_app_status_running_all_wait_0_in_llap_ga(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('running.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_ga(self.llap_app_name, 0)
self.assertEqual(status, True)
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_ga')
def test_check_llap_app_status_running_all_wait_2_in_llap_ga(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('running.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_ga(self.llap_app_name, 2)
self.assertEqual(status, True)
# Status : RUNNING_PARTIAL (2 out of 3 running -> < 80% instances ON)
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_ga')
def test_check_llap_app_status_one_container_down_wait_negative_in_llap_ga(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('oneContainerDown.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_ga(self.llap_app_name, -1)
self.assertEqual(status, False)
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_ga')
def test_check_llap_app_status_one_container_down_wait_0_in_llap_ga(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('oneContainerDown.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_ga(self.llap_app_name, 0)
self.assertEqual(status, False)
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_ga')
def test_check_llap_app_status_one_container_down_wait_2_in_llap_ga(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('oneContainerDown.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_ga(self.llap_app_name, 2)
self.assertEqual(status, False)
# Status : RUNNING_PARTIAL (4 out of 5 running -> > 80% instances ON)
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_ga')
def test_check_llap_app_status_two_container_down_1_wait_negative_in_llap_ga(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('oneContainerDown1.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_ga(self.llap_app_name, -1)
self.assertEqual(status, True)
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_ga')
def test_check_llap_app_status_two_container_down_1_wait_0_in_llap_ga(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('oneContainerDown1.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_ga(self.llap_app_name, 0)
self.assertEqual(status, True)
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_ga')
def test_check_llap_app_status_two_container_down_1_wait_2_in_llap_ga(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('oneContainerDown1.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_ga(self.llap_app_name, 2)
self.assertEqual(status, True)
# Status : LAUNCHING
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_ga')
def test_check_llap_app_status_starting_wait_negative_in_llap_ga(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('starting.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_ga(self.llap_app_name, -1)
self.assertEqual(status, False)
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_ga')
def test_check_llap_app_status_starting_wait_0_in_llap_ga(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('starting.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_ga(self.llap_app_name, 0)
self.assertEqual(status, False)
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_ga')
def test_check_llap_app_status_starting_wait_2_in_llap_ga(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('starting.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_ga(self.llap_app_name, 2)
self.assertEqual(status, False)
# Status : COMPLETE
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_ga')
def test_check_llap_app_status_complete_wait_negative_in_llap_ga(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('appComplete.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_ga(self.llap_app_name, -1)
self.assertEqual(status, False)
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_ga')
def test_check_llap_app_status_complete_wait_0_in_llap_ga(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('appComplete.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_ga(self.llap_app_name, 0)
self.assertEqual(status, False)
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_ga')
def test_check_llap_app_status_complete_wait_2_in_llap_ga(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('appComplete.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_ga(self.llap_app_name, 2)
self.assertEqual(status, False)
# Status : APP_NOT_FOUND
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_ga')
def test_check_llap_app_status_invalid_wait_negative_in_llap_ga(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('invalidApp.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_ga(self.llap_app_name, -1)
self.assertEqual(status, False)
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_ga')
def test_check_llap_app_status_invalid_wait_0_in_llap_ga(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('invalidApp.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_ga(self.llap_app_name, 0)
self.assertEqual(status, False)
@patch("time.sleep")
@patch('hive_server_interactive.HiveServerInteractiveDefault._get_llap_app_status_info_in_llap_ga')
def test_check_llap_app_status_invalid_wait_2_in_llap_ga(self, mock_get_llap_app_status_data, sleep_mock):
sleep_mock.return_value = 1
llap_app_json = self.load_json('invalidApp.json')
mock_get_llap_app_status_data.return_value = llap_app_json
status = self.hsi.check_llap_app_status_in_llap_ga(self.llap_app_name, 2)
self.assertEqual(status, False)
| 50.464769
| 355
| 0.616363
|
2c90894721f0ecafa2751cbb405e21d40c000c08
| 2,735
|
py
|
Python
|
main.py
|
carreb/autopull
|
cce0f50fd5648685b5608b319ff9646e329cdd19
|
[
"MIT"
] | null | null | null |
main.py
|
carreb/autopull
|
cce0f50fd5648685b5608b319ff9646e329cdd19
|
[
"MIT"
] | 1
|
2022-02-14T16:05:00.000Z
|
2022-02-15T14:43:57.000Z
|
main.py
|
carreb/autopull
|
cce0f50fd5648685b5608b319ff9646e329cdd19
|
[
"MIT"
] | null | null | null |
from importlib.resources import contents
from github import Github
from time import sleep
from datetime import datetime
import git
import os
import configparser
#var
config = configparser.ConfigParser()
config.read('config.ini')
info = config['info']
repo_name = info['repo-name']
owner_username = info['owner-username']
token = info['token']
git_dir = "./" + repo_name
git = git.cmd.Git(git_dir)
g = Github(token)
repo = g.get_repo(owner_username+"/"+repo_name)
filename = info['filename']
sleepamount = int(info['sleep-amount'])
def clearcommand():
global clear
clear = 'clear'
if os.name in ('nt', 'dos'):
command = 'cls'
def startup():
global updatefile
global doneworking
doneworking = False
try:
updatefile = repo.get_contents(filename)
print("Updatefile found!")
doneworking = True
except:
print("Updatefile not found... Creating one")
doneworking = False
try:
repo.create_file(filename, "[AUTOMATED] Create updatefile", "hello world", branch="main")
print("Successfully created updatefile!")
doneworking = True
except:
print("Failed to create updatefile!")
exit()
clearcommand()
startup()
def check():
while True:
os.system(clear)
now = str(datetime.now())
contents = repo.get_contents("")
try:
updatefile = repo.get_contents(filename)
except:
print("[" + now + "] There are no updates!")
print("[" + now + "] Checking for updates...")
for ContentFile in contents:
if ContentFile.name == filename:
os.system(clear)
print("[" + now + "] Update file found!")
print("[" + now + "] Removing update file...")
try:
repo.delete_file(updatefile.path, "[AUTOMATED] Removed update file", updatefile.sha, branch="main")
print("[" + now + "] Update file removed!")
print("[" + now + "] Attempting to pull repo...")
try:
git.pull()
print("[" + now + "] Repo pulled!")
except:
print("[" + now + "]" " Failed to pull repo! Recreating update file to try again...")
repo.create_file(filename, "[AUTOMATED] Error occurred while pulling.", "hello world", branch="main")
except:
print("[" + now + "] Error occurred while removing update file!")
sleep(sleepamount)
sleep(5)
if doneworking == True:
check()
| 32.951807
| 126
| 0.547349
|
505fadcdbe57499071c2c4e0ca7a1e20d7f37311
| 27,597
|
py
|
Python
|
spark_fhir_schemas/stu3/complex_types/careplan.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | 2
|
2020-10-31T23:25:01.000Z
|
2021-06-09T14:12:42.000Z
|
spark_fhir_schemas/stu3/complex_types/careplan.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | null | null | null |
spark_fhir_schemas/stu3/complex_types/careplan.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | null | null | null |
from typing import Union, List, Optional
from pyspark.sql.types import StructType, StructField, StringType, ArrayType, DataType
# This file is auto-generated by generate_schema so do not edit manually
# noinspection PyPep8Naming
class CarePlanSchema:
"""
Describes the intention of how one or more practitioners intend to deliver
care for a particular patient, group or community for a period of time,
possibly limited to care for a specific condition or set of conditions.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueQuantity",
],
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
) -> Union[StructType, DataType]:
"""
Describes the intention of how one or more practitioners intend to deliver
care for a particular patient, group or community for a period of time,
possibly limited to care for a specific condition or set of conditions.
id: The logical id of the resource, as used in the URL for the resource. Once
assigned, this value never changes.
extension: May be used to represent additional information that is not part of the basic
definition of the resource. In order to make the use of extensions safe and
manageable, there is a strict set of governance applied to the definition and
use of extensions. Though any implementer is allowed to define an extension,
there is a set of requirements that SHALL be met as part of the definition of
the extension.
meta: The metadata about the resource. This is content that is maintained by the
infrastructure. Changes to the content may not always be associated with
version changes to the resource.
implicitRules: A reference to a set of rules that were followed when the resource was
constructed, and which must be understood when processing the content.
language: The base language in which the resource is written.
text: A human-readable narrative that contains a summary of the resource, and may be
used to represent the content of the resource to a human. The narrative need
not encode all the structured data, but is required to contain sufficient
detail to make it "clinically safe" for a human to just read the narrative.
Resource definitions may define what content should be represented in the
narrative to ensure clinical safety.
contained: These resources do not have an independent existence apart from the resource
that contains them - they cannot be identified independently, and nor can they
have their own independent transaction scope.
resourceType: This is a CarePlan resource
identifier: This records identifiers associated with this care plan that are defined by
business processes and/or used to refer to it when a direct URL reference to
the resource itself is not appropriate (e.g. in CDA documents, or in written /
printed documentation).
definition: Identifies the protocol, questionnaire, guideline or other specification the
care plan should be conducted in accordance with.
basedOn: A care plan that is fulfilled in whole or in part by this care plan.
replaces: Completed or terminated care plan whose function is taken by this new care
plan.
partOf: A larger care plan of which this particular care plan is a component or step.
status: Indicates whether the plan is currently being acted upon, represents future
intentions or is now a historical record.
intent: Indicates the level of authority/intentionality associated with the care plan
and where the care plan fits into the workflow chain.
category: Identifies what "kind" of plan this is to support differentiation between
multiple co-existing plans; e.g. "Home health", "psychiatric", "asthma",
"disease management", "wellness plan", etc.
title: Human-friendly name for the CarePlan.
description: A description of the scope and nature of the plan.
subject: Identifies the patient or group whose intended care is described by the plan.
context: Identifies the original context in which this particular CarePlan was created.
period: Indicates when the plan did (or is intended to) come into effect and end.
author: Identifies the individual(s) or ogranization who is responsible for the
content of the care plan.
careTeam: Identifies all people and organizations who are expected to be involved in the
care envisioned by this plan.
addresses: Identifies the conditions/problems/concerns/diagnoses/etc. whose management
and/or mitigation are handled by this plan.
supportingInfo: Identifies portions of the patient's record that specifically influenced the
formation of the plan. These might include co-morbidities, recent procedures,
limitations, recent assessments, etc.
goal: Describes the intended objective(s) of carrying out the care plan.
activity: Identifies a planned action to occur as part of the plan. For example, a
medication to be used, lab tests to perform, self-monitoring, education, etc.
note: General notes about the care plan not covered elsewhere.
"""
from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.stu3.complex_types.meta import MetaSchema
from spark_fhir_schemas.stu3.complex_types.narrative import NarrativeSchema
from spark_fhir_schemas.stu3.simple_types.resourcelist import ResourceListSchema
from spark_fhir_schemas.stu3.complex_types.identifier import IdentifierSchema
from spark_fhir_schemas.stu3.complex_types.reference import ReferenceSchema
from spark_fhir_schemas.stu3.complex_types.codeableconcept import (
CodeableConceptSchema,
)
from spark_fhir_schemas.stu3.complex_types.period import PeriodSchema
from spark_fhir_schemas.stu3.complex_types.careplan_activity import (
CarePlan_ActivitySchema,
)
from spark_fhir_schemas.stu3.complex_types.annotation import AnnotationSchema
if (
max_recursion_limit
and nesting_list.count("CarePlan") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["CarePlan"]
schema = StructType(
[
# The logical id of the resource, as used in the URL for the resource. Once
# assigned, this value never changes.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the resource. In order to make the use of extensions safe and
# manageable, there is a strict set of governance applied to the definition and
# use of extensions. Though any implementer is allowed to define an extension,
# there is a set of requirements that SHALL be met as part of the definition of
# the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The metadata about the resource. This is content that is maintained by the
# infrastructure. Changes to the content may not always be associated with
# version changes to the resource.
StructField(
"meta",
MetaSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# A reference to a set of rules that were followed when the resource was
# constructed, and which must be understood when processing the content.
StructField("implicitRules", StringType(), True),
# The base language in which the resource is written.
StructField("language", StringType(), True),
# A human-readable narrative that contains a summary of the resource, and may be
# used to represent the content of the resource to a human. The narrative need
# not encode all the structured data, but is required to contain sufficient
# detail to make it "clinically safe" for a human to just read the narrative.
# Resource definitions may define what content should be represented in the
# narrative to ensure clinical safety.
StructField(
"text",
NarrativeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# These resources do not have an independent existence apart from the resource
# that contains them - they cannot be identified independently, and nor can they
# have their own independent transaction scope.
StructField(
"contained",
ArrayType(
ResourceListSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# This is a CarePlan resource
StructField("resourceType", StringType(), True),
# This records identifiers associated with this care plan that are defined by
# business processes and/or used to refer to it when a direct URL reference to
# the resource itself is not appropriate (e.g. in CDA documents, or in written /
# printed documentation).
StructField(
"identifier",
ArrayType(
IdentifierSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Identifies the protocol, questionnaire, guideline or other specification the
# care plan should be conducted in accordance with.
StructField(
"definition",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# A care plan that is fulfilled in whole or in part by this care plan.
StructField(
"basedOn",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Completed or terminated care plan whose function is taken by this new care
# plan.
StructField(
"replaces",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# A larger care plan of which this particular care plan is a component or step.
StructField(
"partOf",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Indicates whether the plan is currently being acted upon, represents future
# intentions or is now a historical record.
StructField("status", StringType(), True),
# Indicates the level of authority/intentionality associated with the care plan
# and where the care plan fits into the workflow chain.
StructField("intent", StringType(), True),
# Identifies what "kind" of plan this is to support differentiation between
# multiple co-existing plans; e.g. "Home health", "psychiatric", "asthma",
# "disease management", "wellness plan", etc.
StructField(
"category",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Human-friendly name for the CarePlan.
StructField("title", StringType(), True),
# A description of the scope and nature of the plan.
StructField("description", StringType(), True),
# Identifies the patient or group whose intended care is described by the plan.
StructField(
"subject",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Identifies the original context in which this particular CarePlan was created.
StructField(
"context",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Indicates when the plan did (or is intended to) come into effect and end.
StructField(
"period",
PeriodSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Identifies the individual(s) or ogranization who is responsible for the
# content of the care plan.
StructField(
"author",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Identifies all people and organizations who are expected to be involved in the
# care envisioned by this plan.
StructField(
"careTeam",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Identifies the conditions/problems/concerns/diagnoses/etc. whose management
# and/or mitigation are handled by this plan.
StructField(
"addresses",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Identifies portions of the patient's record that specifically influenced the
# formation of the plan. These might include co-morbidities, recent procedures,
# limitations, recent assessments, etc.
StructField(
"supportingInfo",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Describes the intended objective(s) of carrying out the care plan.
StructField(
"goal",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Identifies a planned action to occur as part of the plan. For example, a
# medication to be used, lab tests to perform, self-monitoring, education, etc.
StructField(
"activity",
ArrayType(
CarePlan_ActivitySchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# General notes about the care plan not covered elsewhere.
StructField(
"note",
ArrayType(
AnnotationSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
return schema
| 50.451554
| 100
| 0.538863
|
7f7c31d0a32c80932b84a3b2831caef6fef1ffa3
| 10,096
|
py
|
Python
|
npms/viz/viz_multi_shape.py
|
Tiamat-Tech/npms
|
2d1bce8c98b0f24aa69273975c52b2fbdb101c29
|
[
"MIT"
] | 96
|
2021-08-01T18:19:04.000Z
|
2022-03-04T02:53:47.000Z
|
npms/viz/viz_multi_shape.py
|
Tiamat-Tech/npms
|
2d1bce8c98b0f24aa69273975c52b2fbdb101c29
|
[
"MIT"
] | 3
|
2021-08-18T12:57:39.000Z
|
2021-11-30T07:18:46.000Z
|
npms/viz/viz_multi_shape.py
|
Tiamat-Tech/npms
|
2d1bce8c98b0f24aa69273975c52b2fbdb101c29
|
[
"MIT"
] | 8
|
2021-08-03T01:51:10.000Z
|
2021-12-31T09:46:25.000Z
|
import os, sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import shutil
import open3d as o3d
import numpy as np
import trimesh
import json
from utils.pcd_utils import (BBox,
transform_pointcloud_to_opengl_coords,
rotate_around_axis,
origin, normalize_transformation)
from data_scripts import config_data as cfg
from utils.gaps_utils import read_pts_file
from utils.utils import filter_identities
class ViewerMulti:
def __init__(
self,
identity_names=[],
target_animations=[],
exclude_identities_with=None,
only_identities_with=None,
):
self.identity_names = identity_names
self.target_animations = target_animations
self.time = 0
self.p_ref = None
self.p_cur = None
self.mesh_cur = None
self.show_ref = False
self.show_cur = False
self.exclude_identities_with = exclude_identities_with
self.only_identities_with = only_identities_with
self.initialize()
def initialize(self):
self.p_refs_list = {}
self.p_curs_list = []
self.mesh_curs_list = []
self.frames_list = []
self.identity_name_by_frame_id = {}
self.animation_frame_id_by_frame_id = {}
loaded_frames = 0
global_id = -1
is_done = False
# Prepare identity names
if len(self.identity_names) == 0:
self.identity_names = os.listdir(dataset_dir)
for identity_name in self.identity_names:
identity_dir = os.path.join(dataset_dir, identity_name)
if not os.path.isdir(identity_dir):
print(f"Skipping {identity_dir}, since it's not a directory")
continue
if self.exclude_identities_with is not None and self.exclude_identities_with in identity_name:
continue
if self.only_identities_with is not None and self.only_identities_with not in identity_name:
continue
# T-Pose
ref_mesh_path = os.path.join(identity_dir, "a_t_pose", "000000", f"{mesh_filename}")
if os.path.isfile(ref_mesh_path):
ref_mesh = o3d.io.read_triangle_mesh(ref_mesh_path)
ref_vertices = np.asarray(ref_mesh.vertices)
p_ref = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(ref_vertices))
p_ref.paint_uniform_color([0, 1, 0])
self.p_refs_list[identity_name] = p_ref
# Go over animations
for animation_name in sorted(os.listdir(identity_dir)):
if len(self.target_animations) > 0 and animation_name not in self.target_animations:
continue
animation_dir = os.path.join(identity_dir, animation_name)
if not os.path.isdir(animation_dir):
continue
# Go over the frames in the animation
for frame_id in sorted(os.listdir(animation_dir)):
global_id += 1
if load_every and global_id % load_every != 0:
continue
if global_id < from_frame_id:
continue
#################################################################
# Current
#################################################################
cur_mesh_path = os.path.join(animation_dir, frame_id, f"{mesh_filename}")
if not os.path.isfile(cur_mesh_path):
print(f"File {cur_mesh_path} is not a mesh")
continue
print("Loading", identity_name, animation_name, frame_id)
cur_mesh = o3d.io.read_triangle_mesh(cur_mesh_path)
cur_mesh.compute_vertex_normals()
cur_vertices = np.asarray(cur_mesh.vertices)
p_cur = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(cur_vertices))
p_cur.paint_uniform_color([0, 0, 1])
self.p_curs_list.append(p_cur)
self.mesh_curs_list.append(cur_mesh)
self.identity_name_by_frame_id[loaded_frames] = identity_name
self.animation_frame_id_by_frame_id[loaded_frames] = f"{animation_name}_{frame_id}"
# Increase counter of evaluated frames
loaded_frames += 1
print(f'Loaded {loaded_frames} frames')
if loaded_frames == num_samples:
print()
print(f"Stopping early. Already loaded {loaded_frames}")
print()
is_done = True
break
if is_done:
break
if is_done:
break
print()
print("Total num frames loaded:", len(self.p_curs_list))
# ###############################################################################################
# # Generate additional meshes.
# ###############################################################################################
self.world_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(
size=0.2, origin=[0, 0, 0]
)
# unit bbox
p_min = -0.5
p_max = 0.5
self.unit_bbox = BBox.compute_bbox_from_min_point_and_max_point(
np.array([p_min]*3), np.array([p_max]*3)
)
print((np.array([p_min]*3) + np.array([p_max]*3)) / 2.)
# Load bbox used to normalize
bbox_json_path = os.path.join(dataset_dir, "bbox.json")
if os.path.isfile(bbox_json_path):
with open(bbox_json_path, 'r') as f:
bbox = json.loads(f.read())
bbox = np.array(bbox)
self.max_bbox = BBox.compute_bbox_from_min_point_and_max_point(
np.array(bbox[0]), np.array(bbox[1])
)
def update_ref(self, vis):
param = vis.get_view_control().convert_to_pinhole_camera_parameters()
if self.p_ref is not None:
vis.remove_geometry(self.p_ref)
if self.show_ref:
if self.identity_name_by_frame_id[self.time] in self.p_refs_list:
self.p_ref = self.p_refs_list[self.identity_name_by_frame_id[self.time]]
vis.add_geometry(self.p_ref)
ctr = vis.get_view_control()
ctr.convert_from_pinhole_camera_parameters(param)
def update_cur(self, vis):
param = vis.get_view_control().convert_to_pinhole_camera_parameters()
if self.p_cur is not None:
vis.remove_geometry(self.p_cur)
if self.mesh_cur is not None:
vis.remove_geometry(self.mesh_cur)
if self.show_cur:
self.p_cur = self.p_curs_list[self.time]
vis.add_geometry(self.p_cur)
self.mesh_cur = self.mesh_curs_list[self.time]
vis.add_geometry(self.mesh_cur)
ctr = vis.get_view_control()
ctr.convert_from_pinhole_camera_parameters(param)
def run(self):
# Define callbacks.
def toggle_next(vis):
self.time += 1
if self.time >= len(self.p_curs_list):
self.time = 0
print(f"Frame: {self.time} - Identity: {self.identity_name_by_frame_id[self.time]} - {self.animation_frame_id_by_frame_id[self.time]}")
self.update_ref(vis)
self.update_cur(vis)
return False
def toggle_previous(vis):
self.time -= 1
if self.time < 0:
self.time = len(self.p_curs_list) - 1
print(f"Frame: {self.time} - Identity: {self.identity_name_by_frame_id[self.time]} - {self.animation_frame_id_by_frame_id[self.time]}")
self.update_ref(vis)
self.update_cur(vis)
return False
def toggle_ref(vis):
self.show_ref = not self.show_ref
self.update_ref(vis)
return False
def toggle_cur(vis):
self.show_cur = not self.show_cur
self.update_cur(vis)
return False
key_to_callback = {}
key_to_callback[ord("D")] = toggle_next
key_to_callback[ord("A")] = toggle_previous
key_to_callback[ord("R")] = toggle_ref
key_to_callback[ord("C")] = toggle_cur
# Add mesh at initial time step.
assert self.time < len(self.p_curs_list)
print("Showing time", self.time)
self.p_cur = self.p_curs_list[self.time]
self.mesh_cur = self.mesh_curs_list[self.time]
self.show_cur = True
o3d.visualization.draw_geometries_with_key_callbacks([self.unit_bbox, self.world_frame, self.p_cur, self.mesh_cur], key_to_callback)
# o3d.visualization.draw_geometries_with_key_callbacks(
# [self.max_bbox, self.world_frame, self.unit_bbox, self.p_cur, self.mesh_cur], key_to_callback
# )
if __name__ == "__main__":
PROCESSED_MESH_FILENAME = "mesh_raw.ply"
REAL_SCAN_MESH_FILENAME = "mesh_real_scan.ply"
NORMALIZED_MESH_FILENAME = "mesh_normalized.ply"
# mesh_filename = "mesh_raw"
mesh_filename = NORMALIZED_MESH_FILENAME
cluster_location = "cluster"
dataset = "mixamo_trans_all"
dataset_dir = f"/{cluster_location}/lothlann/ppalafox/datasets/{dataset}"
num_samples = 2
load_every = 0
from_frame_id = 0
viewer = ViewerMulti(
# identity_names=['00096_jerseyshort'],
# target_animations=['ballerina_spin'],
# ["astra"],
# [cfg.identities[29]],
# cfg.identities,
# target_animations
# exclude_identities_with='test',
# only_identities_with='test',
)
viewer.run()
| 35.055556
| 147
| 0.565471
|
63d5fb092f0cc24e2c27f0886b0cd5ba71e075d6
| 4,033
|
py
|
Python
|
tests/test_hvac.py
|
joro75/mytoyota
|
405f7d84b3737846124aac6e7692aa6da52838a1
|
[
"MIT"
] | null | null | null |
tests/test_hvac.py
|
joro75/mytoyota
|
405f7d84b3737846124aac6e7692aa6da52838a1
|
[
"MIT"
] | 1
|
2022-02-12T16:46:48.000Z
|
2022-02-12T16:46:48.000Z
|
tests/test_hvac.py
|
joro75/mytoyota
|
405f7d84b3737846124aac6e7692aa6da52838a1
|
[
"MIT"
] | null | null | null |
"""pytest tests for mytoyota.models.hvac.Hvac"""
from mytoyota.models.hvac import Hvac
# pylint: disable=no-self-use
class TestHvac:
"""pytest functions to test Hvac"""
@staticmethod
def _create_example_data():
"""Create hvac with predefined data"""
return Hvac(
{
"currentTemperatureIndication": {
"timestamp": "2020-10-16T03:50:15Z",
"unit": "string",
"value": 22,
},
"targetTemperature": {
"timestamp": "2020-10-16T03:50:15Z",
"unit": "string",
"value": 21,
},
"startedAt": "",
"status": "",
"type": "",
"duration": 1,
"options": {
"frontDefogger": "",
"frontDriverSeatHeater": "",
"frontPassengerSeatHeater": "",
"mirrorHeater": "",
"rearDefogger": "",
"rearDriverSeatHeater": "",
"rearPassengerSeatHeater": "",
"steeringHeater": "",
},
"commandId": "",
}
)
@staticmethod
def _create_example_legacy_data():
"""Create legacy hvac with predefined data"""
return Hvac(
{
"BlowerStatus": 0,
"FrontDefoggerStatus": 0,
"InsideTemperature": 22,
"LatestAcStartTime": "2020-10-16T03:50:15Z",
"RearDefoggerStatus": 0,
"RemoteHvacMode": 0,
"RemoteHvacProhibitionSignal": 1,
"SettingTemperature": 21,
"TemperatureDisplayFlag": 0,
"Temperaturelevel": 29,
},
legacy=True,
)
def test_hvac(self):
"""Test Hvac"""
hvac = self._create_example_data()
assert hvac.legacy is False
assert hvac.current_temperature == 22
assert hvac.target_temperature == 21
assert hvac.started_at == ""
assert hvac.status == ""
assert hvac.type == ""
assert hvac.duration == 1
assert hvac.command_id == ""
assert isinstance(hvac.options, dict)
assert hvac.options == {
"frontDefogger": "",
"frontDriverSeatHeater": "",
"frontPassengerSeatHeater": "",
"mirrorHeater": "",
"rearDefogger": "",
"rearDriverSeatHeater": "",
"rearPassengerSeatHeater": "",
"steeringHeater": "",
}
assert hvac.last_updated == "2020-10-16T03:50:15Z"
assert hvac.front_defogger_is_on is None
assert hvac.rear_defogger_is_on is None
assert hvac.blower_on is None
def test_hvac_legacy(self):
"""Test legacy Hvac"""
hvac = self._create_example_legacy_data()
assert hvac.legacy is True
assert hvac.current_temperature == 22
assert hvac.target_temperature == 21
assert hvac.blower_on == 0
assert hvac.front_defogger_is_on is False
assert hvac.rear_defogger_is_on is False
assert hvac.last_updated is None
assert hvac.started_at is None
assert hvac.status is None
assert hvac.type is None
assert hvac.duration is None
assert hvac.options is None
assert hvac.command_id is None
def test_hvac_no_data(self):
"""Test Hvac with no initialization data"""
hvac = Hvac({})
assert hvac.legacy is False
assert hvac.current_temperature is None
assert hvac.target_temperature is None
assert hvac.started_at is None
assert hvac.status is None
assert hvac.type is None
assert hvac.duration is None
assert hvac.command_id is None
assert hvac.options is None
assert hvac.last_updated is None
| 31.023077
| 60
| 0.521944
|
337faeca365f18a0b66d561a127dccebaac916fc
| 16,194
|
py
|
Python
|
app.py
|
DanielAndreasen/TripMeal
|
78446b1a23b32cb0c5617a6c98cebefa30e5bcb0
|
[
"MIT"
] | 10
|
2017-02-21T22:50:22.000Z
|
2020-11-04T08:07:29.000Z
|
app.py
|
DanielAndreasen/TripMeal
|
78446b1a23b32cb0c5617a6c98cebefa30e5bcb0
|
[
"MIT"
] | 5
|
2017-02-22T13:12:39.000Z
|
2021-05-17T10:56:32.000Z
|
app.py
|
DanielAndreasen/TripMeal
|
78446b1a23b32cb0c5617a6c98cebefa30e5bcb0
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template, flash, jsonify, request, url_for, \
redirect, session, g
from dbconnect import connection
from MySQLdb import escape_string
from wtforms import Form, TextField, PasswordField, BooleanField, validators, TextAreaField, StringField, SelectField
from passlib.hash import sha256_crypt
from functools import wraps
import random
import gc
import os
class RegistrationForm(Form):
username = TextField('Username', [validators.Length(min=4, max=25)])
email = TextField('Email address', [validators.Length(min=5, max=50)])
password = PasswordField('Password', [validators.Required(),
validators.EqualTo('confirm',
message='Passwords must match')])
confirm = PasswordField('Repeat password')
class RecipeForm(Form):
title = StringField('Title', [validators.Length(min=1, max=200)])
country = SelectField('Country')
ingredients = TextAreaField('Ingredients', [validators.Length(min=5)])
recipe = TextAreaField('Recipe', [validators.Length(min=30)])
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
try:
username = session['username']
except KeyError:
username = None
if username is None:
return redirect(url_for('login_page'))
return f(*args, **kwargs)
return decorated_function
def get_recipes(rid):
"""rid can be an integer or 'all'.
Return a dictionary with the rid and title"""
c, conn = connection()
if rid == 'all':
_ = c.execute('SELECT rid, title FROM recipes;')
d = c.fetchall()
rid_dict = {int(di[0]): di[1] for di in d}
c.close()
conn.close()
gc.collect()
return rid_dict
else:
_ = c.execute('SELECT rid, title FROM recipes WHERE rid = ("%s");' % rid)
d = c.fetchall()[0]
rid_dict = {int(d[0]): d[1]}
c.close()
conn.close()
gc.collect()
return rid_dict
def get_ranking():
c, conn = connection()
_ = c.execute('SELECT user, COUNT(*) AS nu FROM recipes GROUP BY user ORDER BY nu DESC;')
d = c.fetchall()
c.close()
conn.close()
gc.collect()
d = {name: int(count) for name, count in d}
return d
def convert2HTML(text):
if ('1.' in text) and ('2.' in text) and ('3.' in text): # We have a list here
final = 1
for line in text.split('\r\n'):
if line.startswith('%s.' % final):
final += 1
newtext = ''
i = 1
for line in text.split('\r\n'):
if line.startswith('%s.' % i):
if i == 1:
newtext += '<ol>\r\n'
i += 1
newline = '<li>' + line[2:] + '</li>\r\n'
newtext += newline
if i == final:
newtext += '</ol>'
else:
newtext += line + '\r\n'
return newtext
else:
return text
# Setup Flask
app = Flask(__name__)
app.config['SECRET_KEY'] = os.environ['TRIPMEAL_KEY']
@app.route('/')
def homepage():
return render_template('main.html')
@app.route('/login/', methods=['GET', 'POST'])
def login_page():
try:
error = None
c, conn = connection()
if request.method == 'POST':
username = escape_string(request.form['username']).decode()
data = c.execute('SELECT * FROM users WHERE username = ("%s");' % username)
data = c.fetchone()
if sha256_crypt.verify(request.form['password'], data[2]) and (data[1] == username):
session['logged_in'] = True
session['username'] = username
session['favourites'] = data[4]
flash('You are now logged in')
return redirect(url_for('user_page'))
else:
error = 'Invalid credentials, try again'
gc.collect()
return render_template('login.html', error=error)
except:
error = 'Invalid credentials, try again'
return render_template('login.html', error=error)
@app.route('/logout/')
@login_required
def logout_page():
if session['logged_in']:
session['logged_in'] = False
session['username'] = None
return redirect(url_for('list_recipes'))
@app.route('/register/', methods=['GET', 'POST'])
def register_page():
form = RegistrationForm(request.form)
try:
if request.method == 'POST' and form.validate():
username = form.username.data
email = form.email.data
password = sha256_crypt.encrypt(str(form.password.data))
c, conn = connection()
x = c.execute('SELECT * FROM users WHERE username = ("%s");' %
escape_string(username))
if int(x) > 0:
flash('That username is already taken, please choose another')
return render_template('register.html', form=form)
else:
c.execute('INSERT INTO users (username, password, email) VALUES ("%s", "%s", "%s");' %
(escape_string(username), escape_string(password), escape_string(email)))
conn.commit()
flash('Thanks for registering!')
c.close()
conn.close()
gc.collect()
session['logged_in'] = True
session['username'] = username
return redirect(url_for('favourites_page'))
return render_template('register.html', form=form)
except Exception as e:
return render_template('register.html', form=form)
@app.route('/newrecipe/', methods=['GET', 'POST'])
@login_required
def newrecipe():
if request.method == 'POST':
pass
# print request.form
return render_template('newrecipe.html')
@app.route('/addrecipe/', methods=['POST', 'GET'])
def addrecipe():
if request.method == 'POST':
title = escape_string(request.form['title'])
location = escape_string(request.form['country'])
ingredients = escape_string(','.join(request.form['ingredients'].split('\r\n')).strip(','))
recipe = escape_string(request.form['recipe'])
username = session['username']
c, conn = connection()
c.execute('INSERT INTO recipes (title, location, ingredients, recipe, user) VALUES ("%s", "%s", "%s", "%s", "%s");' %
(title, location, ingredients, recipe, username))
conn.commit() # Save to the database
flash("Thanks for your recipe :)")
c.close()
conn.close()
gc.collect() # Garbage collection
return redirect(url_for('newrecipe'))
else:
return render_template('main.html')
@app.route('/_background/')
def background():
try:
i = request.args.get('ingredients_submit', 0, type=str)
return jsonify(ingredient=i)
except Error:
return str(e)
@app.route('/recipes/')
def list_recipes():
c, conn = connection()
_ = c.execute('SELECT rid, title FROM recipes;')
recipes = c.fetchall()
c.close()
conn.close()
gc.collect()
return render_template('recipes.html', recipes=recipes)
@app.route('/recipe', methods=['POST', 'GET'], strict_slashes=False)
def list_recipe():
try:
if request.method == 'GET':
rid = request.args.get('rid')
c, conn = connection()
_ = c.execute('SELECT * FROM recipes WHERE rid = %s;' % escape_string(rid))
recipe = list(c.fetchall()[0])
recipe[4] = convert2HTML(recipe[4])
c.close()
conn.close()
gc.collect()
if request.args.get('fav') == 'true': # Insert recipe as a favourite in database
c, conn = connection()
_ = c.execute('SELECT favourites FROM users WHERE username = "%s";' % session['username'])
favs = c.fetchall()[0][0]
if favs is None:
_ = c.execute('UPDATE users SET favourites = "%s" WHERE username = "%s";' % (recipe[0], session['username']))
conn.commit()
else:
favs = favs.split(',')
if str(recipe[0]) not in favs:
favs = ','.join(favs)+',%s' % recipe[0]
_ = c.execute('UPDATE users SET favourites = "%s" WHERE username = "%s";' % (favs, session['username']))
conn.commit()
c.close()
conn.close()
gc.collect()
return render_template('recipe.html', recipe=recipe, fav=True)
elif request.args.get('fav') == 'false': # Delete a favourite from the database
c, conn = connection()
_ = c.execute('SELECT favourites FROM users WHERE username = "%s";' % session['username'])
favs = c.fetchall()[0][0]
favs = favs.split(',')
fav = str(recipe[0])
if fav in favs:
idx = favs.index(fav)
_ = favs.pop(idx)
favs = ','.join(favs)
_ = c.execute('UPDATE users SET favourites = "%s" WHERE username = "%s";' % (favs, session['username']))
conn.commit()
c.close()
conn.close()
gc.collect()
return render_template('recipe.html', recipe=recipe, fav=False)
else:
try:
logged_in = session['logged_in']
except KeyError:
logged_in = False
if logged_in:
c, conn = connection()
_ = c.execute('SELECT favourites FROM users WHERE username = ("%s");' % session['username'])
try:
favs = c.fetchall()[0][0].split(',')
except Exception: # No favs yet
return render_template('recipe.html', recipe=recipe, fav=False)
c.close()
conn.close()
gc.collect()
return render_template('recipe.html', recipe=recipe, fav=rid in favs)
else:
return render_template('recipe.html', recipe=recipe, fav=False)
else:
return redirect(url_for('list_recipes'))
except Exception as e:
return redirect(url_for('list_recipes'))
@app.route('/favourites/')
@login_required
def favourites_page():
try:
c, conn = connection()
_ = c.execute('SELECT favourites FROM users WHERE username = ("%s");' % session['username'])
favs = c.fetchall()[0][0]
c.close()
conn.close()
gc.collect()
try:
favs = filter(None, favs.split(','))
except Exception:
return render_template('favourites.html', favourites=False)
fav_dict = {}
for fav in favs:
c, conn = connection()
_ = c.execute('SELECT title FROM recipes WHERE rid = ("%s");' % fav)
fav_dict[fav] = c.fetchall()[0][0]
c.close()
conn.close()
gc.collect()
return render_template('favourites.html', favourites=fav_dict)
except Exception as e:
return render_template('favourites.html', favourites=False)
@app.route('/menu/')
def menu_page():
try:
logged_in = session['logged_in']
except KeyError:
logged_in = False
menu_dict = {}
n_favourites = 0
all_recipes = get_recipes('all')
rids = all_recipes.keys()
days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
if logged_in: # Include favourites
try:
c, conn = connection()
_ = c.execute('SELECT favourites FROM users WHERE username = ("%s");' % session['username'])
d = c.fetchall()[0][0]
favs = map(int, filter(None, d.split(',')))
c.close()
conn.close()
gc.collect
n_favourites = len(favs)
except Exception:
pass
if n_favourites:
while len(favs):
rid = random.choice(favs)
favs.pop(favs.index(rid)) # Don't use the same recipe twice per week
tmp = {rid: all_recipes[rid]}
day = random.choice(days)
days.pop(days.index(day))
menu_dict[day] = [rid, tmp[rid]]
for _ in range(7-n_favourites):
day = random.choice(days)
days.pop(days.index(day))
rid = random.choice(rids)
tmp = {rid: all_recipes[rid]}
menu_dict[day] = [rid, tmp[rid]]
return render_template('menu.html', menu=menu_dict)
@app.route('/user/')
@login_required
def user_page():
try:
user = session['username']
c, conn = connection()
if user == 'Daniel':
_ = c.execute('SELECT rid, location, title FROM recipes;')
else:
_ = c.execute('SELECT rid, location, title FROM recipes WHERE user="%s";' % user)
recipes = c.fetchall()
c.close()
conn.close()
gc.collect()
recipes = [
{
'rid': recipe[0],
'country': recipe[1],
'title': recipe[2]
} for recipe in recipes]
rank = get_ranking()
if user not in rank.keys():
number_recipes = 0
else:
number_recipes = rank[user]
total_recipes = sum(rank.values())
return render_template('user.html', user=user, nr=number_recipes, tr=total_recipes, recipes=recipes)
except Exception as e:
return render_template('favourites.html', favourites=False)
@app.route('/edit_recipe/<string:rid>', methods=['GET', 'POST'])
@login_required
def edit_recipe(rid):
# Get the recipe
# c.execute('INSERT INTO recipes (title, location, ingredients, recipe, user) VALUES ("%s", "%s", "%s", "%s", "%s");' %
c, conn = connection()
_ = c.execute('SELECT * FROM recipes WHERE rid="%s"' % rid)
recipe = c.fetchone()
c.close()
conn.close()
gc.collect()
# Fill the form
form = RecipeForm(request.form)
form.title.data = recipe[1]
form.country.data = recipe[2]
form.ingredients.data = '\n'.join(recipe[3].split(','))
form.recipe.data = recipe[4]
if request.method == 'POST':
title = escape_string(request.form['title'])
country = escape_string(request.form['country'])
ingredients = escape_string(','.join(request.form['ingredients'].split('\r\n')).strip(','))
recipe = escape_string(request.form['recipe'])
# Update the DB
c, conn = connection()
c.execute('UPDATE recipes SET title="%s", location="%s", ingredients="%s", recipe="%s" WHERE rid=%s' % (title, country, ingredients, recipe, rid))
conn.commit()
# Close connection
c.close()
conn.close()
gc.collect()
flash('Recipe updated')
return redirect(url_for('user_page'))
return render_template('edit_recipe.html', form=form)
@app.route('/delete_recipe/<string:rid>', methods=['GET', 'POST'])
@login_required
def delete_recipe(rid):
username = session['username']
if request.method == 'POST':
c, conn = connection()
if username == 'Daniel':
_ = c.execute('DELETE FROM recipes WHERE rid="%s"' % (rid))
else:
_ = c.execute('DELETE FROM recipes WHERE rid="%s" AND user="%s"' % (rid, username))
conn.commit()
c.close()
conn.close()
gc.collect()
flash('Recipe successfully deleted')
return redirect(url_for('user_page'))
else:
flash('Recipe not deleted')
return redirect(url_for('user_page'))
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port, debug=True)
| 34.825806
| 154
| 0.544214
|
292e5572e9b26b2b27b00331d2425f003f6e2cc2
| 4,846
|
py
|
Python
|
DM562/Python Exercises/asg3-wienerpolygon/wiener.py
|
RuneSemey/course-help
|
8eab36d89d02dd12df2c23786c982f4045262a45
|
[
"MIT"
] | 2
|
2019-10-03T16:25:18.000Z
|
2019-11-19T16:51:22.000Z
|
DM562/Python Exercises/asg3-wienerpolygon/wiener.py
|
RuneSemey/course-help
|
8eab36d89d02dd12df2c23786c982f4045262a45
|
[
"MIT"
] | 7
|
2019-10-03T15:31:57.000Z
|
2019-10-10T14:22:15.000Z
|
DM562/Python Exercises/asg3-wienerpolygon/wiener.py
|
RuneSemey/course-help
|
8eab36d89d02dd12df2c23786c982f4045262a45
|
[
"MIT"
] | 28
|
2019-10-03T14:46:09.000Z
|
2019-10-29T13:31:40.000Z
|
"""wiener.py
Lab 3: Wiener Index / Polygons
------------------------------
This lab has two independent parts, you need to provide you solutions
via implementations in wiener.py (this file) and polygon.py.
Hint: Start with the implementations for polygon.py, to ensure you get
most of the points with a small amount of work.
Number of point to be achieved in polygon.py : 92
Number of point to be achieved in wiener.py : 8
wienerIndex(W): 2
distanceMatrix(W): 2
lengthThreePaths(W): 2
boilingPoint(W): 2
In this part of the lab you need to implement the methods in order to
predict boiling points of chemical compounds, where the chemical
compounds are, for simplicity, encoded as adjacency matrices of
graphs. See the lecture slides for all the details.
The functions /docstring/s again contain some real examples and usage
of the functions. You can run these examples by executing the script
from command line:
python3 wiener.py
Note that the unit tests for the final grading may contain different
tests, and that certain requirements given below are not tested in the
tesing before the final testing.
"""
import numpy as np
from math import inf
def wienerIndex(W):
"""
Returns the Wiener Index (i.e, the sum of all pairwise shortest paths in a
graph represented by an edge weight matrix W). Note, that we assume all
weights to be floats.
Parameters:
-----------
W : np.ndarray , the edge weight matrix representation of a graph (floats)
Returns:
--------
float : the Wiener Index as presented on the lecture slides
Examples:
---------
>>> A = np.array([[0.0, inf, 1.0, inf, inf, inf], \
[inf, 0.0, 1.0, inf, inf, inf], \
[1.0, 1.0, 0.0, 1.0, 1.0, inf], \
[inf, inf, 1.0, 0.0, inf, 1.0], \
[inf, inf, 1.0, inf, 0.0, inf], \
[inf, inf, inf, 1.0, inf, 0.0]])
>>> wienerIndex(A)
28.0
"""
pass
def distanceMatrix(W):
"""
Given an edge weight matrix W, this returns the distance matrix D (as presented
on the lecture slides)
Parameters:
-----------
W : np.ndarray , the edge weight matrix representation of a graph (floats)
Returns:
--------
D : np.ndarray , the distance matrix of a chemical graph
Examples:
---------
>>> W = np.array([[0.0, inf, 1.0, inf, inf, inf], \
[inf, 0.0, 1.0, inf, inf, inf], \
[1.0, 1.0, 0.0, 1.0, 1.0, inf], \
[inf, inf, 1.0, 0.0, inf, 1.0], \
[inf, inf, 1.0, inf, 0.0, inf], \
[inf, inf, inf, 1.0, inf, 0.0]])
>>> distanceMatrix(W)
array([[ 0., 2., 1., 2., 2., 3.],
[ 2., 0., 1., 2., 2., 3.],
[ 1., 1., 0., 1., 1., 2.],
[ 2., 2., 1., 0., 2., 1.],
[ 2., 2., 1., 2., 0., 3.],
[ 3., 3., 2., 1., 3., 0.]])
"""
pass
def lengthThreePaths(W):
"""
Returns the number of all shortest paths of length 3.0 where the index of the
starting vertex i is smaller than the index of the goal vertex j (called p on
the lecture slides)
Parameters:
-----------
W : np.ndarray , the edge weight matrix representation of a graph (floats)
Returns:
--------
int : the number of all shortest paths of length 3
Examples:
---------
>>> W = np.array([[0.0, inf, 1.0, inf, inf, inf], \
[inf, 0.0, 1.0, inf, inf, inf], \
[1.0, 1.0, 0.0, 1.0, 1.0, inf], \
[inf, inf, 1.0, 0.0, inf, 1.0], \
[inf, inf, 1.0, inf, 0.0, inf], \
[inf, inf, inf, 1.0, inf, 0.0]])
>>> lengthThreePaths(W)
3
"""
pass
def boilingPoint(W):
"""
Returns the boiling point prediction as introduced on the lecture slides.
Parameters:
-----------
W : np.ndarray , the edge weight matrix representation of a graph (floats)
Returns:
--------
float : the predicted boiling point of the molecule
Raises:
-------
TypeError: if A is not a two-dimensional squared and symmetric edge weight
matrix with A[i,i] == 0 for all possible i
Examples:
---------
>>> W = np.array([[0.0, inf, 1.0, inf, inf, inf], \
[inf, 0.0, 1.0, inf, inf, inf], \
[1.0, 1.0, 0.0, 1.0, 1.0, inf], \
[inf, inf, 1.0, 0.0, inf, 1.0], \
[inf, inf, 1.0, inf, 0.0, inf], \
[inf, inf, inf, 1.0, inf, 0.0]])
>>> round(boilingPoint(W),3)
49.664
"""
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29.192771
| 83
| 0.524144
|
7bf1f88b52266224a0a123de193c37b3adf4241b
| 12,299
|
py
|
Python
|
tutorials/rnn/ptb/ptb_word_lm.py
|
obreitwi/tensorflow-models
|
c15fada28113eca32dc98d6e3bec4755d0d5b4c2
|
[
"Apache-2.0"
] | null | null | null |
tutorials/rnn/ptb/ptb_word_lm.py
|
obreitwi/tensorflow-models
|
c15fada28113eca32dc98d6e3bec4755d0d5b4c2
|
[
"Apache-2.0"
] | null | null | null |
tutorials/rnn/ptb/ptb_word_lm.py
|
obreitwi/tensorflow-models
|
c15fada28113eca32dc98d6e3bec4755d0d5b4c2
|
[
"Apache-2.0"
] | 1
|
2019-11-04T11:52:51.000Z
|
2019-11-04T11:52:51.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example / benchmark for building a PTB LSTM model.
Trains the model described in:
(Zaremba, et. al.) Recurrent Neural Network Regularization
http://arxiv.org/abs/1409.2329
There are 3 supported model configurations:
===========================================
| config | epochs | train | valid | test
===========================================
| small | 13 | 37.99 | 121.39 | 115.91
| medium | 39 | 48.45 | 86.16 | 82.07
| large | 55 | 37.87 | 82.62 | 78.29
The exact results may vary depending on the random initialization.
The hyperparameters used in the model:
- init_scale - the initial scale of the weights
- learning_rate - the initial value of the learning rate
- max_grad_norm - the maximum permissible norm of the gradient
- num_layers - the number of LSTM layers
- num_steps - the number of unrolled steps of LSTM
- hidden_size - the number of LSTM units
- max_epoch - the number of epochs trained with the initial learning rate
- max_max_epoch - the total number of epochs for training
- keep_prob - the probability of keeping weights in the dropout layer
- lr_decay - the decay of the learning rate for each epoch after "max_epoch"
- batch_size - the batch size
The data required for this example is in the data/ dir of the
PTB dataset from Tomas Mikolov's webpage:
$ wget http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz
$ tar xvf simple-examples.tgz
To run:
$ python ptb_word_lm.py --data_path=simple-examples/data/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import time
import numpy as np
import tensorflow as tf
import reader
flags = tf.flags
logging = tf.logging
flags.DEFINE_string(
"model", "small",
"A type of model. Possible options are: small, medium, large.")
flags.DEFINE_string("data_path", None,
"Where the training/test data is stored.")
flags.DEFINE_string("save_path", None,
"Model output directory.")
flags.DEFINE_bool("use_fp16", False,
"Train using 16-bit floats instead of 32bit floats")
FLAGS = flags.FLAGS
def data_type():
return tf.float16 if FLAGS.use_fp16 else tf.float32
class PTBInput(object):
"""The input data."""
def __init__(self, config, data, name=None):
self.batch_size = batch_size = config.batch_size
self.num_steps = num_steps = config.num_steps
self.epoch_size = ((len(data) // batch_size) - 1) // num_steps
self.input_data, self.targets = reader.ptb_producer(
data, batch_size, num_steps, name=name)
class PTBModel(object):
"""The PTB model."""
def __init__(self, is_training, config, input_):
self._input = input_
batch_size = input_.batch_size
num_steps = input_.num_steps
size = config.hidden_size
vocab_size = config.vocab_size
# Slightly better results can be obtained with forget gate biases
# initialized to 1 but the hyperparameters of the model would need to be
# different than reported in the paper.
def lstm_cell():
# With the latest TensorFlow source code (as of Mar 27, 2017),
# the BasicLSTMCell will need a reuse parameter which is unfortunately not
# defined in TensorFlow 1.0. To maintain backwards compatibility, we add
# an argument check here:
if 'reuse' in inspect.getargspec(
tf.contrib.rnn.BasicLSTMCell.__init__).args:
return tf.contrib.rnn.BasicLSTMCell(
size, forget_bias=0.0, state_is_tuple=True,
reuse=tf.get_variable_scope().reuse)
else:
return tf.contrib.rnn.BasicLSTMCell(
size, forget_bias=0.0, state_is_tuple=True)
attn_cell = lstm_cell
if is_training and config.keep_prob < 1:
def attn_cell():
return tf.contrib.rnn.DropoutWrapper(
lstm_cell(), output_keep_prob=config.keep_prob)
cell = tf.contrib.rnn.MultiRNNCell(
[attn_cell() for _ in range(config.num_layers)], state_is_tuple=True)
self._initial_state = cell.zero_state(batch_size, data_type())
with tf.device("/cpu:0"):
embedding = tf.get_variable(
"embedding", [vocab_size, size], dtype=data_type())
inputs = tf.nn.embedding_lookup(embedding, input_.input_data)
if is_training and config.keep_prob < 1:
inputs = tf.nn.dropout(inputs, config.keep_prob)
# Simplified version of models/tutorials/rnn/rnn.py's rnn().
# This builds an unrolled LSTM for tutorial purposes only.
# In general, use the rnn() or state_saving_rnn() from rnn.py.
#
# The alternative version of the code below is:
#
# inputs = tf.unstack(inputs, num=num_steps, axis=1)
# outputs, state = tf.nn.rnn(cell, inputs,
# initial_state=self._initial_state)
outputs = []
state = self._initial_state
with tf.variable_scope("RNN"):
for time_step in range(num_steps):
if time_step > 0: tf.get_variable_scope().reuse_variables()
(cell_output, state) = cell(inputs[:, time_step, :], state)
outputs.append(cell_output)
output = tf.reshape(tf.concat(axis=1, values=outputs), [-1, size])
softmax_w = tf.get_variable(
"softmax_w", [size, vocab_size], dtype=data_type())
softmax_b = tf.get_variable("softmax_b", [vocab_size], dtype=data_type())
logits = tf.matmul(output, softmax_w) + softmax_b
loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[logits],
[tf.reshape(input_.targets, [-1])],
[tf.ones([batch_size * num_steps], dtype=data_type())])
self._cost = cost = tf.reduce_sum(loss) / batch_size
self._final_state = state
if not is_training:
return
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),
config.max_grad_norm)
optimizer = tf.train.GradientDescentOptimizer(self._lr)
self._train_op = optimizer.apply_gradients(
zip(grads, tvars),
global_step=tf.contrib.framework.get_or_create_global_step())
self._new_lr = tf.placeholder(
tf.float32, shape=[], name="new_learning_rate")
self._lr_update = tf.assign(self._lr, self._new_lr)
def assign_lr(self, session, lr_value):
session.run(self._lr_update, feed_dict={self._new_lr: lr_value})
@property
def input(self):
return self._input
@property
def initial_state(self):
return self._initial_state
@property
def cost(self):
return self._cost
@property
def final_state(self):
return self._final_state
@property
def lr(self):
return self._lr
@property
def train_op(self):
return self._train_op
class SmallConfig(object):
"""Small config."""
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 5
num_layers = 2
num_steps = 20
hidden_size = 200
max_epoch = 4
max_max_epoch = 13
keep_prob = 1.0
lr_decay = 0.5
batch_size = 20
vocab_size = 10000
class MediumConfig(object):
"""Medium config."""
init_scale = 0.05
learning_rate = 1.0
max_grad_norm = 5
num_layers = 2
num_steps = 35
hidden_size = 650
max_epoch = 6
max_max_epoch = 39
keep_prob = 0.5
lr_decay = 0.8
batch_size = 20
vocab_size = 10000
class LargeConfig(object):
"""Large config."""
init_scale = 0.04
learning_rate = 1.0
max_grad_norm = 10
num_layers = 2
num_steps = 35
hidden_size = 1500
max_epoch = 14
max_max_epoch = 55
keep_prob = 0.35
lr_decay = 1 / 1.15
batch_size = 20
vocab_size = 10000
class TestConfig(object):
"""Tiny config, for testing."""
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 1
num_layers = 1
num_steps = 2
hidden_size = 2
max_epoch = 1
max_max_epoch = 1
keep_prob = 1.0
lr_decay = 0.5
batch_size = 20
vocab_size = 10000
def run_epoch(session, model, eval_op=None, verbose=False):
"""Runs the model on the given data."""
start_time = time.time()
costs = 0.0
iters = 0
state = session.run(model.initial_state)
fetches = {
"cost": model.cost,
"final_state": model.final_state,
}
if eval_op is not None:
fetches["eval_op"] = eval_op
for step in range(model.input.epoch_size):
feed_dict = {}
for i, (c, h) in enumerate(model.initial_state):
feed_dict[c] = state[i].c
feed_dict[h] = state[i].h
vals = session.run(fetches, feed_dict)
cost = vals["cost"]
state = vals["final_state"]
costs += cost
iters += model.input.num_steps
if verbose and step % (model.input.epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / model.input.epoch_size, np.exp(costs / iters),
iters * model.input.batch_size / (time.time() - start_time)))
return np.exp(costs / iters)
def get_config():
if FLAGS.model == "small":
return SmallConfig()
elif FLAGS.model == "medium":
return MediumConfig()
elif FLAGS.model == "large":
return LargeConfig()
elif FLAGS.model == "test":
return TestConfig()
else:
raise ValueError("Invalid model: %s", FLAGS.model)
def main(_):
if not FLAGS.data_path:
raise ValueError("Must set --data_path to PTB data directory")
raw_data = reader.ptb_raw_data(FLAGS.data_path)
train_data, valid_data, test_data, _ = raw_data
config = get_config()
eval_config = get_config()
eval_config.batch_size = 1
eval_config.num_steps = 1
with tf.Graph().as_default():
initializer = tf.random_uniform_initializer(-config.init_scale,
config.init_scale)
with tf.name_scope("Train"):
train_input = PTBInput(config=config, data=train_data, name="TrainInput")
with tf.variable_scope("Model", reuse=None, initializer=initializer):
m = PTBModel(is_training=True, config=config, input_=train_input)
tf.summary.scalar("Training Loss", m.cost)
tf.summary.scalar("Learning Rate", m.lr)
with tf.name_scope("Valid"):
valid_input = PTBInput(config=config, data=valid_data, name="ValidInput")
with tf.variable_scope("Model", reuse=True, initializer=initializer):
mvalid = PTBModel(is_training=False, config=config, input_=valid_input)
tf.summary.scalar("Validation Loss", mvalid.cost)
with tf.name_scope("Test"):
test_input = PTBInput(config=eval_config, data=test_data, name="TestInput")
with tf.variable_scope("Model", reuse=True, initializer=initializer):
mtest = PTBModel(is_training=False, config=eval_config,
input_=test_input)
sv = tf.train.Supervisor(logdir=FLAGS.save_path)
with sv.managed_session() as session:
for i in range(config.max_max_epoch):
lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0)
m.assign_lr(session, config.learning_rate * lr_decay)
print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
train_perplexity = run_epoch(session, m, eval_op=m.train_op,
verbose=True)
print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
valid_perplexity = run_epoch(session, mvalid)
print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))
test_perplexity = run_epoch(session, mtest)
print("Test Perplexity: %.3f" % test_perplexity)
if FLAGS.save_path:
print("Saving model to %s." % FLAGS.save_path)
sv.saver.save(session, FLAGS.save_path, global_step=sv.global_step)
if __name__ == "__main__":
tf.app.run()
| 31.862694
| 81
| 0.669241
|
fbbd1cc5a0d89039832575a593b631016bd6a814
| 2,451
|
py
|
Python
|
caldera/dataset/_graph_dataset.py
|
jvrana/caldera
|
a346324e77f20739e00a82f97530dda4906f59dd
|
[
"MIT"
] | 2
|
2021-12-13T17:52:17.000Z
|
2021-12-13T17:52:18.000Z
|
caldera/dataset/_graph_dataset.py
|
jvrana/caldera
|
a346324e77f20739e00a82f97530dda4906f59dd
|
[
"MIT"
] | 4
|
2020-10-06T21:06:15.000Z
|
2020-10-10T01:18:23.000Z
|
caldera/dataset/_graph_dataset.py
|
jvrana/caldera
|
a346324e77f20739e00a82f97530dda4906f59dd
|
[
"MIT"
] | null | null | null |
from typing import List
from typing import Optional
from typing import overload
import torch
from torch.utils.data import Dataset
from caldera.data import GraphBatch
from caldera.data import GraphData
from caldera.transforms._base import TransformCallable
class GraphDataset(Dataset):
"""Graph dataset."""
def __init__(
self, datalist: List[GraphData], transform: Optional[TransformCallable] = None
):
"""Creates a graph dataset.
:param datalist:
:param transform:
"""
assert isinstance(datalist, list)
self.datalist = datalist
self.transform = transform
def __len__(self):
return len(self.datalist)
def mem_sizes(self):
return torch.IntTensor([d.memsize() for d in self.dataset])
def get(self, idx, transform: Optional[TransformCallable] = None):
transform = transform or self.transform
if torch.is_tensor(idx):
idx = idx.tolist()
if isinstance(idx, int):
idx = [idx]
one_item = True
else:
one_item = False
if isinstance(idx, slice):
samples = self.datalist[idx]
else:
samples = []
for i in idx:
sample = self.datalist[i]
if transform:
sample = transform(sample)
samples.append(sample)
if one_item:
samples = samples[0]
return samples
@overload
def __getitem__(self, idx: torch.LongTensor) -> List[GraphData]:
...
@overload
def __getitem__(self, idx: List[int]) -> List[GraphData]:
...
def __getitem__(self, idx: int) -> GraphData:
return self.get(idx)
class GraphBatchDataset(GraphDataset):
"""Dataset that loads a :class:`caldera.data.GraphData` list into.
:class:`caldera.data.GraphBatch` instances, and then performs transform (if
provided).
"""
@overload
def __getitem__(self, idx: torch.LongTensor) -> GraphBatch:
...
@overload
def __getitem__(self, idx: List[int]) -> GraphData:
...
def __getitem__(self, idx: int) -> GraphBatch:
samples = self.get(idx, transform=None)
if isinstance(samples, GraphData):
samples = [samples]
batch = GraphBatch.from_data_list(samples)
if self.transform:
batch = self.transform(batch)
return batch
| 26.074468
| 86
| 0.603427
|
de869e9af2d05f16908d65ff7bff0ebcef20654c
| 693
|
py
|
Python
|
apps/cms/migrations/0003_auto_20211013_1635.py
|
truhlik/what_articles
|
a212b78c7137f1a4e7008bb06c927c2a60ef9b9f
|
[
"MIT"
] | null | null | null |
apps/cms/migrations/0003_auto_20211013_1635.py
|
truhlik/what_articles
|
a212b78c7137f1a4e7008bb06c927c2a60ef9b9f
|
[
"MIT"
] | null | null | null |
apps/cms/migrations/0003_auto_20211013_1635.py
|
truhlik/what_articles
|
a212b78c7137f1a4e7008bb06c927c2a60ef9b9f
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.8 on 2021-10-13 16:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cms', '0002_auto_20211013_1612'),
]
operations = [
migrations.AddField(
model_name='article',
name='title_de',
field=models.CharField(default='test', max_length=255, verbose_name='title de'),
preserve_default=False,
),
migrations.AddField(
model_name='article',
name='title_en',
field=models.CharField(default='test', max_length=255, verbose_name='title en'),
preserve_default=False,
),
]
| 26.653846
| 92
| 0.593074
|
26d892d2cedddbc5215fdd76ceb0a0e4a54142ab
| 14,773
|
py
|
Python
|
src/gui.py
|
Jack477/CommanderPi_rockpi
|
25bd80d982d6627b71d9e174021611f6d734de3d
|
[
"MIT"
] | 3
|
2021-03-24T10:46:20.000Z
|
2021-11-12T12:24:45.000Z
|
src/gui.py
|
Jack477/CommanderPi_rockpi
|
25bd80d982d6627b71d9e174021611f6d734de3d
|
[
"MIT"
] | null | null | null |
src/gui.py
|
Jack477/CommanderPi_rockpi
|
25bd80d982d6627b71d9e174021611f6d734de3d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import sys
import os
import resources as rs
import update as up
import tkinter as tk
import theme as th
import importlib
import webbrowser
from tkinter import messagebox as msb
from tkinter import *
from tkinter import ttk
from PIL import Image, ImageTk
### TODO: Move change_theme function to theme.py?
### split resources.py into smaller files
### move window_list from theme.py to resources
home_path = sys.argv[1]
def change_theme(master):
if int(th.color_mode)==0:
print("Setting color theme to 1")
th.color_mode=1
else:
th.color_mode=0
rs.config.set('DEFAULT', 'color_mode', str(th.color_mode))
with open(home_path+'/CommanderPi/src/cpi.config', 'w') as configfile:
rs.config.write(configfile)
th.set_theme(master)
#print(th.color_mode)
### Use in window class: master.protocol("WM_DELETE_WINDOW", lambda:on_Window_Close(master))
def on_Window_Close(master):
if isinstance(master, tk.Tk):
window_name = master.__class__
print(window_name)
th.window_list.pop()
master.destroy()
### Using to keybind window kill
def killwindow(event, master):
on_Window_Close(master)
### Open new window with his own master
def bopen(window):
x = window()
class Network_Window:
def __init__(master):
master = tk.Tk()
master.geometry("480x310")
master.title("Commander Pi")
th.window_list.append(master)
mainframe = Frame(master)
mainframe.pack(padx=10, pady=10)
titleframe = Frame(mainframe)
titleframe.pack(fill=X)
image = Image.open(home_path+"/CommanderPi/src/icons/Networkings.png")
photo = ImageTk.PhotoImage(image, master=titleframe)
title_label = tk.Label( titleframe, text = " Networking", font=("TkDefaultFont", 18, "bold"), image = photo, compound=LEFT, anchor='w')
title_label.image = photo
title_label.pack(side=LEFT)
network_frame = Frame(mainframe)
network_frame.pack()
ether_title = tk.Label(network_frame, text="Ethernet:", font=("TkDefaultFont", 11, "bold"))
ether_title.grid(row=0, column=0)
wlan_title = tk.Label(network_frame, text="WiFi:", font=("TkDefaultFont", 11, "bold"))
wlan_title.grid(row=0, column=1)
ether_label = tk.Label( network_frame, text = rs.eth0_data, borderwidth=2, relief="groove", height=7, width=25, anchor='w', justify=LEFT )
ether_label.grid(row=1, column=0, sticky = W)
wlan_label = tk.Label( network_frame, text = rs.wlan0_data, borderwidth=2, relief="groove", height=7, width=25, anchor='w', justify=LEFT )
wlan_label.grid(row=1, column=1, sticky = W)
cc_frame = Frame(mainframe)
cc_frame.pack()
actual_country_code = tk.Label( cc_frame, text="Your country code: "+rs.get_country_code(), font=("TkDefaultFont", 11, "bold"))
actual_country_code.grid(row=0, column=0, columnspan=2)
country_code_label = tk.Label( cc_frame, text="Set your country code", font=("TkDefaultFont", 11, "bold"))
country_code_label.grid(row=1, column=0, columnspan=2)
country_code_entry = tk.Entry( cc_frame, justify=CENTER, width=5)
country_code_entry.grid(row=2, column=0, sticky=E)
country_code_button = tk.Button( cc_frame, text="Apply", command=lambda:push_cc(), font=("TkDefaultFont", 10, "bold"), cursor="hand2")
country_code_button.grid(row=2, column=1)
bind_label = tk.Label( mainframe, text="Press [Esc] to close", font=("TkDefaultFont", 11, "bold") )
bind_label.pack(side=BOTTOM)
def push_cc():
code = country_code_entry.get()
if isinstance(code, str) and len(code) == 2:
code = code.upper()
rs.set_country_code(code)
actual_country_code.configure(text="Your country code: "+rs.get_country_code())
msb.showinfo(title="Done!", message="Your country code is now set as "+code)
else:
msb.showwarning(title="Error", message="Country code should be two letters!")
th.set_theme(master)
master.bind('<Escape>', lambda e:killwindow(e, master))
master.protocol("WM_DELETE_WINDOW", lambda:on_Window_Close(master))
master.mainloop()
class Proc_Info_Window:
def __init__(master):
master = tk.Tk()
master.geometry("350x400")
master.title("Commander Pi")
th.window_list.append(master)
th.set_theme(master)
mainframe = Frame(master)
mainframe.pack(padx=10, pady=10)
titleframe = Frame(mainframe)
titleframe.pack(fill=X)
image = Image.open(home_path+"/CommanderPi/src/icons/CPUs.png")
photo = ImageTk.PhotoImage(image, master=titleframe)
title_label = tk.Label( titleframe, text = " CPU Details", font=("TkDefaultFont", 18, "bold"), image = photo, compound=LEFT, anchor='w')
title_label.image = photo
title_label.pack(side=LEFT)
separator = ttk.Separator(mainframe, orient='horizontal')
separator.pack(fill=X, expand=True, pady=15)
cpu_content_frame = Frame(mainframe)
cpu_content_frame.pack(fill=X)
cpu_label = tk.Label( cpu_content_frame, text = rs.getproc0(), justify=LEFT, width=20, anchor='w' )
cpu_label.grid(row=0, column=0, rowspan=14, sticky=W)
cpu_label2 = tk.Label( cpu_content_frame, text = rs.getproc1(), justify=LEFT, width=13, anchor='w' )
cpu_label2.grid(row=0, column=1, rowspan=14, sticky=W)
separator2 = ttk.Separator(mainframe, orient='horizontal')
separator2.pack(fill=X, expand=True, pady=15)
bind_label = tk.Label( mainframe, text="Press [Esc] to close", font=("TkDefaultFont", 11, "bold") )
bind_label.pack(side=BOTTOM)
master.bind('<Escape>', lambda e:killwindow(e, master))
master.protocol("WM_DELETE_WINDOW", lambda:on_Window_Close(master))
master.mainloop()
class About_Window:
def __init__(master):
master = tk.Tk()
master.geometry("400x450")
master.title("Commander Pi")
th.window_list.append(master)
mainframe = Frame(master)
mainframe.pack(padx=10, pady=10)
titleframe = Frame(mainframe)
titleframe.pack(fill=X)
image = Image.open(home_path+"/CommanderPi/src/icons/logo.png")
photo = ImageTk.PhotoImage(image, master=titleframe)
title_label = tk.Label( titleframe, text = " About Application", font=("TkDefaultFont", 18, "bold"), image = photo, compound=LEFT, anchor='w')
title_label.image = photo
title_label.pack(side=LEFT)
separator = ttk.Separator(mainframe, orient='horizontal')
separator.pack(fill=X, expand=True, pady=15)
content_frame = Frame(mainframe)
content_frame.pack()
about_label = tk.Label( content_frame, text = "Commander Pi 2020\n", justify=CENTER, font=("TkDefaultFont", 11, "bold"))
about_label.pack()
text_label = tk.Label( content_frame, text="By Jack477\nFor Twister OS Armbian\n\nGraphic elements by grayduck\nIcon derived from a work by Vectors Market", justify=CENTER)
text_label.pack(fill=X)
version_label = tk.Label( content_frame, text=rs.get_app_version(), font=("TkDefaultFont", 11, "bold"), justify=CENTER)
version_label.pack()
link = tk.Label( content_frame, text="Changelog", cursor="hand2", fg="#1D81DA", pady=5)
link.pack(fill=X)
mlink = 'https://github.com/Jack477/CommanderPi/blob/master/CHANGELOG.md'
link.bind("<Button-1>", lambda e: rs.cpi_open_url(mlink))
separator2 = ttk.Separator(mainframe, orient='horizontal')
separator2.pack(fill=X, expand=True, pady=15)
update_button = Button(mainframe, text="Check for updates", command=lambda:update_x(), cursor="hand2", font=("TkDefaultFont", 11, "bold"), state=DISABLED)
update_button.pack()
color_buton = Button(mainframe, text="Change color theme", command=lambda:change_theme(master), cursor="hand2", font=("TkDefaultFont", 11, "bold"))
color_buton.pack()
def update_x():
up.update_cpi()
bind_label = tk.Label( mainframe, text="Press [Esc] to close", font=("TkDefaultFont", 11, "bold") )
bind_label.pack(side=BOTTOM)
master.bind('<Escape>', lambda e:killwindow(e, master))
th.set_theme(master)
master.protocol("WM_DELETE_WINDOW", lambda:on_Window_Close(master))
master.mainloop()
### Main window
class Window:
def __init__(master):
master = tk.Tk()
master.geometry("420x550")
master.title("Commander Pi")
master.resizable(False, False)
#master.iconbitmap("@"+home_path+"/CommanderPi/src/xicon.ico")
icon = PhotoImage(file = home_path+"/CommanderPi/src/icon.png")
master.iconphoto(True, icon)
th.window_list.append(master)
mainframe = Frame(master)
mainframe.pack(padx=10, pady=10)
titleframe = Frame(mainframe)
titleframe.pack()
loadimg = Image.open(home_path+"/CommanderPi/src/icons/title_logo.png")
img = ImageTk.PhotoImage(image=loadimg)
img_label = tk.Label ( titleframe, image=img)
img_label.image = img
img_label.grid(row=0, column=0, columnspan=2)
#title_label = tk.Label( titleframe, text = "Commander Pi", font=("TkDefaultFont", 22, "bold") )
#title_label.grid(row=0, column=1)
separator = ttk.Separator(mainframe, orient='horizontal')
separator.pack(fill=X, expand=True, pady=10)
infoframe = Frame(mainframe)
infoframe.pack(fill=X)
title2_label = tk.Label( infoframe, text = "ROCK PI VERSION\nReal-time system information:\n", font=("TkDefaultFont", 11, "bold"), anchor='w')
title2_label.grid(row=0, column=0, columnspan=2, sticky=W)
board_version_label = tk.Label( infoframe, text= rs.board_version, fg="red", anchor='w')
board_version_label.grid(row=1, column=0, columnspan=2, sticky=W)
kernel_version_label = tk.Label( infoframe, text = "Kernel version: ", width=30, anchor='w' )
kernel_version_label.grid(row=2, column=0, sticky=W)
kernel_version_label2 = tk.Label( infoframe, text = rs.kernel_version , width=15, anchor='w')
kernel_version_label2.grid(row=2, column=1)
kernel_mode_label = tk.Label( infoframe, text = "Operating mode: ", width=30, anchor='w')
kernel_mode_label.grid(row=3, column=0, sticky=W)
kernel_mode_label2 = tk.Label( infoframe, text = rs.get_kernel_mode(), width=15, anchor='w')
kernel_mode_label2.grid(row=3, column=1)
processor_architecture_label = tk.Label( infoframe, text="Processor architecture: ", width=30, anchor='w' )
processor_architecture_label.grid(row=4, column=0, sticky=W)
processor_architecture_label2 = tk.Label( infoframe, text=rs.processor_architecture, width=15, anchor='w')
processor_architecture_label2.grid(row=4, column=1)
memory_use_label = tk.Label( infoframe, text = "Memory usage: ", width=30, anchor='w' )
memory_use_label.grid(row=5, column=0, sticky=W)
memory_use_label2 = tk.Label( infoframe, text = "", width=15, anchor='w' )
memory_use_label2.grid(row=5, column=1)
actual_gpu_temp_label = tk.Label( infoframe, text = "Actual GPU temperature: ", width=30, anchor='w' )
actual_gpu_temp_label.grid(row=6, column=0, sticky=W)
actual_gpu_temp_label2 = tk.Label( infoframe, text = "", width=15, anchor='w' )
actual_gpu_temp_label2.grid(row=6, column=1)
actual_cpu_temp_label = tk.Label( infoframe, text = "Actual CPU temperature: ", width=30, anchor='w' )
actual_cpu_temp_label.grid(row=7, column=0, sticky=W)
actual_cpu_temp_label2 = tk.Label( infoframe, text = "", width=15, anchor='w' )
actual_cpu_temp_label2.grid(row=7, column=1)
actual_cpu_usage_label = tk.Label( infoframe, text = "Processor frequency usage is: ", width=30, anchor='w')
actual_cpu_usage_label.grid(row=8, column=0, sticky=W)
actual_cpu_usage_label2 = tk.Label(infoframe, text = "", width=15, anchor='w')
actual_cpu_usage_label2.grid(row=8, column=1)
actual_gpu_usage_label = tk.Label( infoframe, text = "GPU frequency (V3D) usage is: ", width=30, anchor='w')
actual_gpu_usage_label.grid(row=9, column=0, sticky=W)
actual_gpu_usage_label2 = tk.Label(infoframe, text = "", width=15, anchor='w')
actual_gpu_usage_label2.grid(row=9, column=1)
used_label = tk.Label ( infoframe, text="Used disk space: ", width=30, anchor='w')
used_label.grid(row=10, column=0, sticky=W)
##BORDER TO TABLE borderwidth=2, relief="groove",
used_label2 = tk.Label ( infoframe, text=rs.used+"/"+rs.total+" GiB", width=15, anchor='w')
used_label2.grid(row=10, column=1)
separator2 = ttk.Separator(mainframe, orient='horizontal')
separator2.pack(fill=X, expand=True, pady=10)
#REFRESH CPU USAGE, MEMORY USAGE AND TEMPERATURE
def refresh():
#for x in th.window_list:
# print(x.__class__)
ttext = rs.reftemp()
ptext = rs.refusage()
mtext = rs.refmem()
gtext = rs.reftemp2()
gputext = rs.refgpu()
#dtext = str(rs.get_disk_percent())
#dtext = "CPU usage " + rs.cpu_usagex +" MHz"
memory_use_label2.configure(text = mtext + "/100%")
actual_cpu_temp_label2.configure(text = ttext)
actual_cpu_usage_label2.configure(text = ptext)
actual_gpu_temp_label2.configure(text = gtext)
actual_gpu_usage_label2.configure(text = gputext)
master.after(1000, refresh)
refresh()
advanced_label = tk.Label( mainframe, text = "Advanced tools", font=("TkDefaultFont", 11, "bold"), anchor='w' )
advanced_label.pack(fill=X)
btn_frame = Frame(mainframe)
btn_frame.pack(fill=X)
photo1 = PhotoImage(file = home_path+"/CommanderPi/src/icons/CPUs.png")
#photoimage1 = photo1.subsample(15, 15)
proc_info_button = Button ( btn_frame, text="CPU details", command = lambda:bopen(Proc_Info_Window), width=60, height=80, cursor="hand2", image = photo1, compound=TOP)
proc_info_button.grid(row=0, column=0, padx=4)
#photo2 = PhotoImage(file = home_path+"/CommanderPi/src/icons/Bootloaders.png")
#btn4 = Button (btn_frame, text="Bootloader", width=60, height=80, cursor="hand2", image = photo2, compound=TOP)
#btn4.grid(row=0, column=1, padx=4)
photo3 = PhotoImage(file = home_path+"/CommanderPi/src/icons/Networkings.png")
btn5 = Button (btn_frame, text="Network", command = lambda:bopen(Network_Window), width=60, height=80, cursor="hand2", image = photo3, compound=TOP)
btn5.grid(row=0, column=2, padx=4)
#photo4 = PhotoImage(file = home_path+"/CommanderPi/src/icons/Overclockings.png")
#btn2 = Button(btn_frame, text="Overclock", command = lambda:bopen(Overclock_Window), width=60, height=80, cursor="hand2", image = photo4, compound=TOP)
#btn2.grid(row=0, column=3, padx=4)
btn3 = Button( mainframe, text="About/Update", command = lambda:bopen(About_Window), font=("TkDefaultFont", 11, "bold"), cursor="hand2")
btn3.pack(side=BOTTOM, pady=5)
master.protocol("WM_DELETE_WINDOW", lambda:on_Window_Close(master))
th.set_theme(master)
#up.check_update()
master.mainloop()
| 38.272021
| 175
| 0.695323
|
7310ddf8902515ec57c147bac72a9d0d43af3a9f
| 10,297
|
py
|
Python
|
autotest/gdrivers/ers.py
|
Instant-Monkey/gdal
|
e4755c13394841ded981c885da2fc4e257f22c6b
|
[
"MIT"
] | null | null | null |
autotest/gdrivers/ers.py
|
Instant-Monkey/gdal
|
e4755c13394841ded981c885da2fc4e257f22c6b
|
[
"MIT"
] | 3
|
2019-02-27T00:43:06.000Z
|
2019-06-28T21:57:10.000Z
|
autotest/gdrivers/ers.py
|
TileDB-Inc/gdal
|
dc824494cc3f96ead82eb42be9a8bcfb1466073b
|
[
"MIT"
] | 1
|
2021-04-26T14:47:38.000Z
|
2021-04-26T14:47:38.000Z
|
#!/usr/bin/env pytest
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test ERS format driver.
# Author: Frank Warmerdam <warmerdam@pobox.com>
#
###############################################################################
# Copyright (c) 2007, Frank Warmerdam <warmerdam@pobox.com>
# Copyright (c) 2011-2013, Even Rouault <even dot rouault at spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
from osgeo import gdal
from osgeo import osr
import gdaltest
import pytest
###############################################################################
# Perform simple read test.
def test_ers_1():
tst = gdaltest.GDALTest('ERS', 'srtm.ers', 1, 64074)
tst.testOpen()
ds = gdal.Open('data/srtm.ers')
md = ds.GetRasterBand(1).GetMetadata()
expected_md = {'STATISTICS_MEAN': '-4020.25', 'STATISTICS_MINIMUM': '-4315', 'STATISTICS_MAXIMUM': '-3744', 'STATISTICS_MEDIAN': '-4000'}
assert md == expected_md
###############################################################################
# Create simple copy and check.
def test_ers_2():
tst = gdaltest.GDALTest('ERS', 'float32.bil', 1, 27)
return tst.testCreateCopy(new_filename='tmp/float32.ers',
check_gt=1, vsimem=1)
###############################################################################
# Test multi-band file.
def test_ers_3():
tst = gdaltest.GDALTest('ERS', 'rgbsmall.tif', 2, 21053)
return tst.testCreate(new_filename='tmp/rgbsmall.ers')
###############################################################################
# Test HeaderOffset case.
def test_ers_4():
gt = (143.59625, 0.025, 0.0, -39.38125, 0.0, -0.025)
srs = """GEOGCS["GEOCENTRIC DATUM of AUSTRALIA",
DATUM["GDA94",
SPHEROID["GRS80",6378137,298.257222101]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433]]"""
tst = gdaltest.GDALTest('ERS', 'ers_dem.ers', 1, 56588)
return tst.testOpen(check_prj=srs, check_gt=gt)
###############################################################################
# Confirm we can recognised signed 8bit data.
def test_ers_5():
ds = gdal.Open('data/8s.ers')
md = ds.GetRasterBand(1).GetMetadata('IMAGE_STRUCTURE')
assert md['PIXELTYPE'] == 'SIGNEDBYTE', 'Failed to detect SIGNEDBYTE'
ds = None
###############################################################################
# Confirm a copy preserves the signed byte info.
def test_ers_6():
drv = gdal.GetDriverByName('ERS')
src_ds = gdal.Open('data/8s.ers')
ds = drv.CreateCopy('tmp/8s.ers', src_ds)
md = ds.GetRasterBand(1).GetMetadata('IMAGE_STRUCTURE')
assert md['PIXELTYPE'] == 'SIGNEDBYTE', 'Failed to detect SIGNEDBYTE'
ds = None
drv.Delete('tmp/8s.ers')
###############################################################################
# Test opening a file with everything in lower case.
def test_ers_7():
ds = gdal.Open('data/caseinsensitive.ers')
desc = ds.GetRasterBand(1).GetDescription()
assert desc == 'RTP 1st Vertical Derivative', 'did not get expected values.'
###############################################################################
# Test GCP support
def test_ers_8():
src_ds = gdal.Open('../gcore/data/gcps.vrt')
drv = gdal.GetDriverByName('ERS')
ds = drv.CreateCopy('/vsimem/ers_8.ers', src_ds)
ds = None
gdal.Unlink('/vsimem/ers_8.ers.aux.xml')
ds = gdal.Open('/vsimem/ers_8.ers')
expected_gcps = src_ds.GetGCPs()
gcps = ds.GetGCPs()
gcp_count = ds.GetGCPCount()
wkt = ds.GetGCPProjection()
ds = None
assert wkt == """PROJCS["NUTM11",GEOGCS["NAD27",DATUM["North_American_Datum_1927",SPHEROID["Clarke 1866",6378206.4,294.978698213898,AUTHORITY["EPSG","7008"]],TOWGS84[-3,142,183,0,0,0,0],AUTHORITY["EPSG","6267"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4267"]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",-117],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",0],UNIT["Metre",1],AXIS["Easting",EAST],AXIS["Northing",NORTH]]""", \
'did not get expected GCP projection'
assert len(gcps) == len(expected_gcps) and len(gcps) == gcp_count, \
'did not get expected GCP number'
for i, gcp in enumerate(gcps):
if gcp.GCPPixel != pytest.approx(expected_gcps[i].GCPPixel, abs=1e-6) or \
gcp.GCPLine != pytest.approx(expected_gcps[i].GCPLine, abs=1e-6) or \
gcp.GCPX != pytest.approx(expected_gcps[i].GCPX, abs=1e-6) or \
gcp.GCPY != pytest.approx(expected_gcps[i].GCPY, abs=1e-6):
print(gcps[i])
pytest.fail('did not get expected GCP %d' % i)
drv.Delete('/vsimem/ers_8.ers')
###############################################################################
# Test NoData support (#4207)
def test_ers_9():
drv = gdal.GetDriverByName('ERS')
ds = drv.Create('/vsimem/ers_9.ers', 1, 1)
ds.GetRasterBand(1).SetNoDataValue(123)
ds = None
f = gdal.VSIFOpenL('/vsimem/ers_9.ers.aux.xml', 'rb')
if f is not None:
gdal.VSIFCloseL(f)
drv.Delete('/vsimem/ers_9.ers')
pytest.fail('/vsimem/ers_9.ers.aux.xml should not exist')
ds = gdal.Open('/vsimem/ers_9.ers')
val = ds.GetRasterBand(1).GetNoDataValue()
ds = None
drv.Delete('/vsimem/ers_9.ers')
assert val == 123, 'did not get expected nodata value'
###############################################################################
# Test PROJ, DATUM, UNITS support (#4229)
def test_ers_10():
drv = gdal.GetDriverByName('ERS')
ds = drv.Create('/vsimem/ers_10.ers', 1, 1, options=['DATUM=GDA94', 'PROJ=MGA55', 'UNITS=METERS'])
proj = ds.GetMetadataItem("PROJ", "ERS")
datum = ds.GetMetadataItem("DATUM", "ERS")
units = ds.GetMetadataItem("UNITS", "ERS")
assert proj == 'MGA55', 'did not get expected PROJ'
assert datum == 'GDA94', 'did not get expected DATUM'
assert units == 'METERS', 'did not get expected UNITS'
# This should be overridden by the above values
sr = osr.SpatialReference()
sr.ImportFromEPSG(4326)
ds.SetProjection(sr.ExportToWkt())
proj = ds.GetMetadataItem("PROJ", "ERS")
datum = ds.GetMetadataItem("DATUM", "ERS")
units = ds.GetMetadataItem("UNITS", "ERS")
assert proj == 'MGA55', 'did not get expected PROJ'
assert datum == 'GDA94', 'did not get expected DATUM'
assert units == 'METERS', 'did not get expected UNITS'
ds = None
f = gdal.VSIFOpenL('/vsimem/ers_10.ers.aux.xml', 'rb')
if f is not None:
gdal.VSIFCloseL(f)
drv.Delete('/vsimem/ers_10.ers')
pytest.fail('/vsimem/ers_10.ers.aux.xml should not exist')
ds = gdal.Open('/vsimem/ers_10.ers')
wkt = ds.GetProjectionRef()
proj = ds.GetMetadataItem("PROJ", "ERS")
datum = ds.GetMetadataItem("DATUM", "ERS")
units = ds.GetMetadataItem("UNITS", "ERS")
md_ers = ds.GetMetadata("ERS")
ds = None
drv.Delete('/vsimem/ers_10.ers')
assert proj == 'MGA55', 'did not get expected PROJ'
assert datum == 'GDA94', 'did not get expected DATUM'
assert units == 'METERS', 'did not get expected UNITS'
assert md_ers["PROJ"] == proj and md_ers["DATUM"] == datum and md_ers["UNITS"] == units, \
('GetMetadata() not consistent with '
'GetMetadataItem()')
assert wkt.startswith("""PROJCS["MGA55"""), 'did not get expected projection'
ds = drv.Create('/vsimem/ers_10.ers', 1, 1, options=['DATUM=GDA94', 'PROJ=MGA55', 'UNITS=FEET'])
ds = None
# Check that we can update those values with SetProjection()
ds = gdal.Open('/vsimem/ers_10.ers', gdal.GA_Update)
sr = osr.SpatialReference()
sr.ImportFromEPSG(4326)
ds.SetProjection(sr.ExportToWkt())
proj = ds.GetMetadataItem("PROJ", "ERS")
datum = ds.GetMetadataItem("DATUM", "ERS")
units = ds.GetMetadataItem("UNITS", "ERS")
assert proj == 'GEODETIC', 'did not get expected PROJ'
assert datum == 'WGS84', 'did not get expected DATUM'
assert units == 'METERS', 'did not get expected UNITS'
ds = None
ds = gdal.Open('/vsimem/ers_10.ers')
proj = ds.GetMetadataItem("PROJ", "ERS")
datum = ds.GetMetadataItem("DATUM", "ERS")
units = ds.GetMetadataItem("UNITS", "ERS")
ds = None
drv.Delete('/vsimem/ers_10.ers')
assert proj == 'GEODETIC', 'did not get expected PROJ'
assert datum == 'WGS84', 'did not get expected DATUM'
assert units == 'METERS', 'did not get expected UNITS'
###############################################################################
# Test fix for https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=8744
def test_ers_recursive_opening():
ds = gdal.Open('/vsitar/data/test_ers_recursive.tar/test.ers')
ds.GetFileList()
###############################################################################
# Cleanup
def test_ers_cleanup():
gdaltest.clean_tmp()
| 33.650327
| 614
| 0.588132
|
76f0427af25c4989945127e6a8cb7e3e94718da5
| 990
|
py
|
Python
|
countdown with user input.py
|
Pergamon256/Countdown
|
8c928d4ec64c98b6535089d7498ccfa2425af5d1
|
[
"MIT"
] | null | null | null |
countdown with user input.py
|
Pergamon256/Countdown
|
8c928d4ec64c98b6535089d7498ccfa2425af5d1
|
[
"MIT"
] | null | null | null |
countdown with user input.py
|
Pergamon256/Countdown
|
8c928d4ec64c98b6535089d7498ccfa2425af5d1
|
[
"MIT"
] | null | null | null |
import time
x = int(input("Enter countdown minutes: "))
y = int(input("Enter countdown seconds: "))
print('Countdown started') #You can edit this to display whatever text you want, even include strings using {} and .format()
def countdown(m,s):
i=m
j=s
k=0
try:
while True:
if(j == -1):
j = 59
i -= 1
if((i > 9) and (j > 9)):
print(str(i)+":"+str(j), end='\r') #Carriage return only works with python 3; end='\r' will not work with python 2.
elif(i > 9):
print(str(i)+":"+str(k)+str(j), end='\r')
elif(j > 9):
print(str(k)+str(i)+":"+str(j), end='\r')
else:
print(str(k)+str(i)+":"+str(k)+str(j), end='\r')
time.sleep(1)
j -= 1
if(i == 0 and j == -1):
break
except KeyboardInterrupt:
pass
print("", end='\r')
if(i == 0 and j == -1):
print('Countdown ended') #You can edit this to display whatever text you want, even include strings using {} and .format()
time.sleep(1)
result = countdown(x,y)
countdown(result)
| 27.5
| 124
| 0.586869
|
fe14cba8bcfee021100f33f0dfe413f1571a894e
| 474
|
py
|
Python
|
django-ng/django_ng_project/django_ng_app/migrations/0013_auto_20170516_0106.py
|
bounouhmahmoud/ensi
|
209e57340359af3ea063c064982198848dc36c5f
|
[
"MIT"
] | 1
|
2018-07-10T06:59:05.000Z
|
2018-07-10T06:59:05.000Z
|
django-ng/django_ng_project/django_ng_app/migrations/0013_auto_20170516_0106.py
|
Arsalen/BusinessStrategies
|
209e57340359af3ea063c064982198848dc36c5f
|
[
"MIT"
] | null | null | null |
django-ng/django_ng_project/django_ng_app/migrations/0013_auto_20170516_0106.py
|
Arsalen/BusinessStrategies
|
209e57340359af3ea063c064982198848dc36c5f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-16 01:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_ng_app', '0012_auto_20170515_1926'),
]
operations = [
migrations.AlterField(
model_name='search',
name='expression',
field=models.TextField(default=None, max_length=254),
),
]
| 22.571429
| 65
| 0.630802
|
fd45cc32616cf3d83a53f2682836eba99ddf8e58
| 228
|
py
|
Python
|
src/configuration/settings.py
|
arturosolutions/dta_test
|
2beb7e332a13929070655ec41894a727e9be03e0
|
[
"MIT"
] | null | null | null |
src/configuration/settings.py
|
arturosolutions/dta_test
|
2beb7e332a13929070655ec41894a727e9be03e0
|
[
"MIT"
] | null | null | null |
src/configuration/settings.py
|
arturosolutions/dta_test
|
2beb7e332a13929070655ec41894a727e9be03e0
|
[
"MIT"
] | null | null | null |
import os
DEBUG = True
CURR_DIR = os.path.dirname(os.path.abspath(__file__))
BASE_DIR = os.path.dirname(CURR_DIR)
LOG_DIR = '{}/logs'.format(BASE_DIR)
COMPANY_TABLENAME = 'company'
SOURCE_TABLENAME = 'login_source'
| 20.727273
| 54
| 0.723684
|
40b3b0aa6706428ef6ddde1f0eba186bc70357ab
| 397
|
py
|
Python
|
app/rss_feeder/wsgi.py
|
RSaab/rss-scraper
|
9bf608878e7d08fea6508ae90b27f1c226b313f1
|
[
"MIT"
] | null | null | null |
app/rss_feeder/wsgi.py
|
RSaab/rss-scraper
|
9bf608878e7d08fea6508ae90b27f1c226b313f1
|
[
"MIT"
] | null | null | null |
app/rss_feeder/wsgi.py
|
RSaab/rss-scraper
|
9bf608878e7d08fea6508ae90b27f1c226b313f1
|
[
"MIT"
] | null | null | null |
"""
WSGI config for rss_feeder project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'rss_feeder.settings')
application = get_wsgi_application()
| 23.352941
| 78
| 0.788413
|
2ad01f049ae585b9690e1f335cfc08ccf670e810
| 10,659
|
py
|
Python
|
sdk/python/pulumi_azure_native/importexport/v20200801/job.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/importexport/v20200801/job.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/importexport/v20200801/job.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['JobArgs', 'Job']
@pulumi.input_type
class JobArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
job_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['JobDetailsArgs']] = None,
tags: Optional[Any] = None):
"""
The set of arguments for constructing a Job resource.
:param pulumi.Input[str] resource_group_name: The resource group name uniquely identifies the resource group within the user subscription.
:param pulumi.Input[str] job_name: The name of the import/export job.
:param pulumi.Input[str] location: Specifies the supported Azure location where the job should be created
:param pulumi.Input['JobDetailsArgs'] properties: Specifies the job properties
:param Any tags: Specifies the tags that will be assigned to the job.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if job_name is not None:
pulumi.set(__self__, "job_name", job_name)
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The resource group name uniquely identifies the resource group within the user subscription.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="jobName")
def job_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the import/export job.
"""
return pulumi.get(self, "job_name")
@job_name.setter
def job_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "job_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the supported Azure location where the job should be created
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['JobDetailsArgs']]:
"""
Specifies the job properties
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['JobDetailsArgs']]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter
def tags(self) -> Optional[Any]:
"""
Specifies the tags that will be assigned to the job.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[Any]):
pulumi.set(self, "tags", value)
class Job(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
job_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[pulumi.InputType['JobDetailsArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[Any] = None,
__props__=None):
"""
Contains the job information.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] job_name: The name of the import/export job.
:param pulumi.Input[str] location: Specifies the supported Azure location where the job should be created
:param pulumi.Input[pulumi.InputType['JobDetailsArgs']] properties: Specifies the job properties
:param pulumi.Input[str] resource_group_name: The resource group name uniquely identifies the resource group within the user subscription.
:param Any tags: Specifies the tags that will be assigned to the job.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: JobArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Contains the job information.
:param str resource_name: The name of the resource.
:param JobArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(JobArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
job_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[pulumi.InputType['JobDetailsArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[Any] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = JobArgs.__new__(JobArgs)
__props__.__dict__["job_name"] = job_name
__props__.__dict__["location"] = location
__props__.__dict__["properties"] = properties
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["identity"] = None
__props__.__dict__["name"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:importexport/v20200801:Job"), pulumi.Alias(type_="azure-native:importexport:Job"), pulumi.Alias(type_="azure-nextgen:importexport:Job"), pulumi.Alias(type_="azure-native:importexport/v20161101:Job"), pulumi.Alias(type_="azure-nextgen:importexport/v20161101:Job"), pulumi.Alias(type_="azure-native:importexport/v20210101:Job"), pulumi.Alias(type_="azure-nextgen:importexport/v20210101:Job")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Job, __self__).__init__(
'azure-native:importexport/v20200801:Job',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Job':
"""
Get an existing Job resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = JobArgs.__new__(JobArgs)
__props__.__dict__["identity"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["properties"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return Job(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.IdentityDetailsResponse']]:
"""
Specifies the job identity details
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the Azure location where the job is created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the job.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output['outputs.JobDetailsResponse']:
"""
Specifies the job properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
SystemData of ImportExport Jobs.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Any]]:
"""
Specifies the tags that are assigned to the job.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Specifies the type of the job resource.
"""
return pulumi.get(self, "type")
| 40.071429
| 478
| 0.634487
|
eac813c4d8e6114c0487fbab6a3f4bfb0cab6d6b
| 43,183
|
py
|
Python
|
userbot/modules/memes.py
|
jefanya14/Bot
|
2166a9cce2a595dd4dbe08f09c036c3757c1f25b
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2021-11-28T16:04:59.000Z
|
2021-11-28T16:04:59.000Z
|
userbot/modules/memes.py
|
jefanya14/Bot
|
2166a9cce2a595dd4dbe08f09c036c3757c1f25b
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 5
|
2021-11-28T21:14:32.000Z
|
2021-11-29T09:20:22.000Z
|
userbot/modules/memes.py
|
jefanya14/Bot
|
2166a9cce2a595dd4dbe08f09c036c3757c1f25b
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2021-11-29T05:25:48.000Z
|
2021-11-29T05:25:48.000Z
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot module for having some fun with people. """
import os
import time
import urllib
from asyncio import sleep
from collections import deque
from random import choice, getrandbits, randint
from re import sub
import requests
from cowpy import cow
from userbot import CMD_HELP, LOGS, bot
from userbot.events import register
from userbot.modules.admin import get_user_from_event
# ================= CONSTANT =================
METOOSTR = [
"Me too thanks",
"Haha yes, me too",
"Same lol",
"Me irl",
"Same here",
"Haha yes",
"Me rn",
]
ZALG_LIST = [
[
"̖",
" ̗",
" ̘",
" ̙",
" ̜",
" ̝",
" ̞",
" ̟",
" ̠",
" ̤",
" ̥",
" ̦",
" ̩",
" ̪",
" ̫",
" ̬",
" ̭",
" ̮",
" ̯",
" ̰",
" ̱",
" ̲",
" ̳",
" ̹",
" ̺",
" ̻",
" ̼",
" ͅ",
" ͇",
" ͈",
" ͉",
" ͍",
" ͎",
" ͓",
" ͔",
" ͕",
" ͖",
" ͙",
" ͚",
" ",
],
[
" ̍",
" ̎",
" ̄",
" ̅",
" ̿",
" ̑",
" ̆",
" ̐",
" ͒",
" ͗",
" ͑",
" ̇",
" ̈",
" ̊",
" ͂",
" ̓",
" ̈́",
" ͊",
" ͋",
" ͌",
" ̃",
" ̂",
" ̌",
" ͐",
" ́",
" ̋",
" ̏",
" ̽",
" ̉",
" ͣ",
" ͤ",
" ͥ",
" ͦ",
" ͧ",
" ͨ",
" ͩ",
" ͪ",
" ͫ",
" ͬ",
" ͭ",
" ͮ",
" ͯ",
" ̾",
" ͛",
" ͆",
" ̚",
],
[
" ̕",
" ̛",
" ̀",
" ́",
" ͘",
" ̡",
" ̢",
" ̧",
" ̨",
" ̴",
" ̵",
" ̶",
" ͜",
" ͝",
" ͞",
" ͟",
" ͠",
" ͢",
" ̸",
" ̷",
" ͡",
],
]
EMOJIS = [
"😂",
"😂",
"👌",
"✌",
"💞",
"👍",
"👌",
"💯",
"🎶",
"👀",
"😂",
"👓",
"👏",
"👐",
"🍕",
"💥",
"🍴",
"💦",
"💦",
"🍑",
"🍆",
"😩",
"😏",
"👉👌",
"👀",
"👅",
"😩",
"🚰",
]
INSULT_STRINGS = [
"Owww ... Such a stupid idiot.",
"Don't drink and type.",
"I think you should go home or better a mental asylum.",
"Command not found. Just like your brain.",
"Do you realize you are making a fool of yourself? Apparently not.",
"You can type better than that.",
"Bot rule 544 section 9 prevents me from replying to stupid humans like you.",
"Sorry, we do not sell brains.",
"Believe me you are not normal.",
"I bet your brain feels as good as new, seeing that you never use it.",
"If I wanted to kill myself I'd climb your ego and jump to your IQ.",
"Zombies eat brains... you're safe.",
"You didn't evolve from apes, they evolved from you.",
"Come back and talk to me when your I.Q. exceeds your age.",
"I'm not saying you're stupid, I'm just saying you've got bad luck when it comes to thinking.",
"What language are you speaking? Cause it sounds like bullshit.",
"Stupidity is not a crime so you are free to go.",
"You are proof that evolution CAN go in reverse.",
"I would ask you how old you are but I know you can't count that high.",
"As an outsider, what do you think of the human race?",
"Brains aren't everything. In your case they're nothing.",
"Ordinarily people live and learn. You just live.",
"I don't know what makes you so stupid, but it really works.",
"Keep talking, someday you'll say something intelligent! (I doubt it though)",
"Shock me, say something intelligent.",
"Your IQ's lower than your shoe size.",
"Alas! Your neurotransmitters are no more working.",
"Are you crazy you fool.",
"Everyone has the right to be stupid but you are abusing the privilege.",
"I'm sorry I hurt your feelings when I called you stupid. I thought you already knew that.",
"You should try tasting cyanide.",
"Your enzymes are meant to digest rat poison.",
"You should try sleeping forever.",
"Pick up a gun and shoot yourself.",
"You could make a world record by jumping from a plane without parachute.",
"Stop talking BS and jump in front of a running bullet train.",
"Try bathing with Hydrochloric Acid instead of water.",
"Try this: if you hold your breath underwater for an hour, you can then hold it forever.",
"Go Green! Stop inhaling Oxygen.",
"God was searching for you. You should leave to meet him.",
"give your 100%. Now, go donate blood.",
"Try jumping from a hundred story building but you can do it only once.",
"You should donate your brain seeing that you never used it.",
"Volunteer for target in an firing range.",
"Head shots are fun. Get yourself one.",
"You should try swimming with great white sharks.",
"You should paint yourself red and run in a bull marathon.",
"You can stay underwater for the rest of your life without coming back up.",
"How about you stop breathing for like 1 day? That'll be great.",
"Try provoking a tiger while you both are in a cage.",
"Have you tried shooting yourself as high as 100m using a canon.",
"You should try holding TNT in your mouth and igniting it.",
"Try playing catch and throw with RDX its fun.",
"I heard phogine is poisonous but i guess you wont mind inhaling it for fun.",
"Launch yourself into outer space while forgetting oxygen on Earth.",
"You should try playing snake and ladders, with real snakes and no ladders.",
"Dance naked on a couple of HT wires.",
"Active Volcano is the best swimming pool for you.",
"You should try hot bath in a volcano.",
"Try to spend one day in a coffin and it will be yours forever.",
"Hit Uranium with a slow moving neutron in your presence. It will be a worthwhile experience.",
"You can be the first person to step on sun. Have a try.",
]
UWUS = [
"(・`ω´・)",
";;w;;",
"owo",
"UwU",
">w<",
"^w^",
r"\(^o\) (/o^)/",
"( ^ _ ^)∠☆",
"(ô_ô)",
"~:o",
";-;",
"(*^*)",
"(>_",
"(♥_♥)",
"*(^O^)*",
"((+_+))",
]
IWIS = [
"┐(´д`)┌",
"┐(´~`)┌",
"┐(´ー`)┌",
"┐( ̄ヘ ̄)┌",
"╮(╯∀╰)╭",
"╮(╯_╰)╭",
"┐(´д`)┌",
"┐(´∀`)┌",
"ʅ(́◡◝)ʃ",
"┐(゚~゚)┌",
"┐('д')┌",
"┐(‘~`;)┌",
"ヘ(´-`;)ヘ",
"┐( -“-)┌",
"ʅ(´◔౪◔)ʃ",
"ヽ(゜~゜o)ノ",
"ヽ(~~~ )ノ",
"┐(~ー~;)┌",
"┐(-。ー;)┌",
r"¯\_(ツ)_/¯",
r"¯\_(⊙_ʖ⊙)_/¯",
r"¯\_༼ ಥ ‿ ಥ ༽_/¯",
"乁( ⁰͡ Ĺ̯ ⁰͡ ) ㄏ",
]
FACEREACTS = [
"ʘ‿ʘ",
"ヾ(-_- )ゞ",
"(っ˘ڡ˘ς)",
"(´ж`ς)",
"( ಠ ʖ̯ ಠ)",
"(° ͜ʖ͡°)╭∩╮",
"(ᵟຶ︵ ᵟຶ)",
"(งツ)ว",
"ʚ(•`",
"(っ▀¯▀)つ",
"(◠﹏◠)",
"( ͡ಠ ʖ̯ ͡ಠ)",
"( ఠ ͟ʖ ఠ)",
"(∩`-´)⊃━☆゚.*・。゚",
"(⊃。•́‿•̀。)⊃",
"(._.)",
"{•̃_•̃}",
"(ᵔᴥᵔ)",
"♨_♨",
"⥀.⥀",
"ح˚௰˚づ ",
"(҂◡_◡)",
"ƪ(ړײ)ƪ",
"(っ•́。•́)♪♬",
"◖ᵔᴥᵔ◗ ♪ ♫ ",
"(☞゚ヮ゚)☞",
"[¬º-°]¬",
"(Ծ‸ Ծ)",
"(•̀ᴗ•́)و ̑̑",
"ヾ(´〇`)ノ♪♪♪",
"(ง'̀-'́)ง",
"ლ(•́•́ლ)",
"ʕ •́؈•̀ ₎",
"♪♪ ヽ(ˇ∀ˇ )ゞ",
"щ(゚Д゚щ)",
"( ˇ෴ˇ )",
"눈_눈",
"(๑•́ ₃ •̀๑) ",
"( ˘ ³˘)♥ ",
"ԅ(≖‿≖ԅ)",
"♥‿♥",
"◔_◔",
"⁽⁽ଘ( ˊᵕˋ )ଓ⁾⁾",
"乁( ◔ ౪◔)「 ┑( ̄Д  ̄)┍",
"( ఠൠఠ )ノ",
"٩(๏_๏)۶",
"┌(ㆆ㉨ㆆ)ʃ",
"ఠ_ఠ",
"(づ。◕‿‿◕。)づ",
"(ノಠ ∩ಠ)ノ彡( \\o°o)\\",
"“ヽ(´▽`)ノ”",
"༼ ༎ຶ ෴ ༎ຶ༽",
"。゚( ゚இ‸இ゚)゚。",
"(づ ̄ ³ ̄)づ",
"(⊙.☉)7",
"ᕕ( ᐛ )ᕗ",
"t(-_-t)",
"(ಥ⌣ಥ)",
"ヽ༼ ಠ益ಠ ༽ノ",
"༼∵༽ ༼⍨༽ ༼⍢༽ ༼⍤༽",
"ミ●﹏☉ミ",
"(⊙_◎)",
"¿ⓧ_ⓧﮌ",
"ಠ_ಠ",
"(´・_・`)",
"ᕦ(ò_óˇ)ᕤ",
"⊙﹏⊙",
"(╯°□°)╯︵ ┻━┻",
r"¯\_(⊙︿⊙)_/¯",
"٩◔̯◔۶",
"°‿‿°",
"ᕙ(⇀‸↼‶)ᕗ",
"⊂(◉‿◉)つ",
"V•ᴥ•V",
"q(❂‿❂)p",
"ಥ_ಥ",
"ฅ^•ﻌ•^ฅ",
"ಥ﹏ಥ",
"( ^_^)o自自o(^_^ )",
"ಠ‿ಠ",
"ヽ(´▽`)/",
"ᵒᴥᵒ#",
"( ͡° ͜ʖ ͡°)",
"┬─┬ ノ( ゜-゜ノ)",
"ヽ(´ー`)ノ",
"☜(⌒▽⌒)☞",
"ε=ε=ε=┌(;*´Д`)ノ",
"(╬ ಠ益ಠ)",
"┬─┬⃰͡ (ᵔᵕᵔ͜ )",
"┻━┻ ︵ヽ(`Д´)ノ︵ ┻━┻",
r"¯\_(ツ)_/¯",
"ʕᵔᴥᵔʔ",
"(`・ω・´)",
"ʕ•ᴥ•ʔ",
"ლ(`ー´ლ)",
"ʕʘ̅͜ʘ̅ʔ",
"( ゚Д゚)",
r"¯\(°_o)/¯",
"(。◕‿◕。)",
]
RUNS_STR = [
"Runs to Thanos..",
"Runs far, far away from earth..",
"Running faster than Bolt coz i'mma userbot !!",
"Runs to Marie..",
"This Group is too cancerous to deal with.",
"Cya bois",
"Kys",
"I go away",
"I am just walking off, coz me is too fat.",
"I Fugged off!",
"Will run for chocolate.",
"I run because I really like food.",
"Running...\nbecause dieting is not an option.",
"Wicked fast runnah",
"If you wanna catch me, you got to be fast...\nIf you wanna stay with me, you got to be good...\nBut if you wanna pass me...\nYou've got to be kidding.",
"Anyone can run a hundred meters, it's the next forty-two thousand and two hundred that count.",
"Why are all these people following me?",
"Are the kids still chasing me?",
"Running a marathon...there's an app for that.",
]
CHASE_STR = [
"Where do you think you're going?",
"Huh? what? did they get away?",
"ZZzzZZzz... Huh? what? oh, just them again, nevermind.",
"Get back here!",
"Not so fast...",
"Look out for the wall!",
"Don't leave me alone with them!!",
"You run, you die.",
"Jokes on you, I'm everywhere",
"You're gonna regret that...",
"You could also try /kickme, I hear that's fun.",
"Go bother someone else, no-one here cares.",
"You can run, but you can't hide.",
"Is that all you've got?",
"I'm behind you...",
"You've got company!",
"We can do this the easy way, or the hard way.",
"You just don't get it, do you?",
"Yeah, you better run!",
"Please, remind me how much I care?",
"I'd run faster if I were you.",
"That's definitely the droid we're looking for.",
"May the odds be ever in your favour.",
"Famous last words.",
"And they disappeared forever, never to be seen again.",
'"Oh, look at me! I\'m so cool, I can run from a bot!" - this person',
"Yeah yeah, just tap /kickme already.",
"Here, take this ring and head to Mordor while you're at it.",
"Legend has it, they're still running...",
"Unlike Harry Potter, your parents can't protect you from me.",
"Fear leads to anger. Anger leads to hate. Hate leads to suffering. If you keep running in fear, you might "
"be the next Vader.",
"Multiple calculations later, I have decided my interest in your shenanigans is exactly 0.",
"Legend has it, they're still running.",
"Keep it up, not sure we want you here anyway.",
"You're a wiza- Oh. Wait. You're not Harry, keep moving.",
"NO RUNNING IN THE HALLWAYS!",
"Hasta la vista, baby.",
"Who let the dogs out?",
"It's funny, because no one cares.",
"Ah, what a waste. I liked that one.",
"Frankly, my dear, I don't give a damn.",
"My milkshake brings all the boys to yard... So run faster!",
"You can't HANDLE the truth!",
"A long time ago, in a galaxy far far away... Someone would've cared about that. Not anymore though.",
"Hey, look at them! They're running from the inevitable banhammer... Cute.",
"Han shot first. So will I.",
"What are you running after, a white rabbit?",
"As The Doctor would say... RUN!",
]
HELLOSTR = [
"Hi !",
"‘Ello, gov'nor!",
"What’s crackin’?",
"‘Sup, homeslice?",
"Howdy, howdy ,howdy!",
"Hello, who's there, I'm talking.",
"You know who this is.",
"Yo!",
"Whaddup.",
"Greetings and salutations!",
"Hello, sunshine!",
"Hey, howdy, hi!",
"What’s kickin’, little chicken?",
"Peek-a-boo!",
"Howdy-doody!",
"Hey there, freshman!",
"I come in peace!",
"Ahoy, matey!",
"Hiya!",
]
SHGS = [
"┐(´д`)┌",
"┐(´~`)┌",
"┐(´ー`)┌",
"┐( ̄ヘ ̄)┌",
"╮(╯∀╰)╭",
"╮(╯_╰)╭",
"┐(´д`)┌",
"┐(´∀`)┌",
"ʅ(́◡◝)ʃ",
"┐(゚~゚)┌",
"┐('д')┌",
"┐(‘~`;)┌",
"ヘ(´-`;)ヘ",
"┐( -“-)┌",
"ʅ(´◔౪◔)ʃ",
"ヽ(゜~゜o)ノ",
"ヽ(~~~ )ノ",
"┐(~ー~;)┌",
"┐(-。ー;)┌",
r"¯\_(ツ)_/¯",
r"¯\_(⊙_ʖ⊙)_/¯",
r"¯\_༼ ಥ ‿ ಥ ༽_/¯",
"乁( ⁰͡ Ĺ̯ ⁰͡ ) ㄏ",
]
CRI = [
"أ‿أ",
"╥﹏╥",
"(;﹏;)",
"(ToT)",
"(┳Д┳)",
"(ಥ﹏ಥ)",
"(;へ:)",
"(T_T)",
"(πーπ)",
"(T▽T)",
"(⋟﹏⋞)",
"(iДi)",
"(´Д⊂ヽ",
"(;Д;)",
"(>﹏<)",
"(TдT)",
"(つ﹏⊂)",
"༼☯﹏☯༽",
"(ノ﹏ヽ)",
"(ノAヽ)",
"(╥_╥)",
"(T⌓T)",
"(༎ຶ⌑༎ຶ)",
"(☍﹏⁰)。",
"(ಥ_ʖಥ)",
"(つд⊂)",
"(≖͞_≖̥)",
"(இ﹏இ`。)",
"༼ಢ_ಢ༽",
"༼ ༎ຶ ෴ ༎ຶ༽",
]
SLAP_TEMPLATES_EN = [
"{hits} {victim} with a {item}.",
"{hits} {victim} in the face with a {item}.",
"{hits} {victim} around a bit with a {item}.",
"{throws} a {item} at {victim}.",
"grabs a {item} and {throws} it at {victim}'s face.",
"{hits} a {item} at {victim}.",
"{throws} a few {item} at {victim}.",
"grabs a {item} and {throws} it in {victim}'s face.",
"launches a {item} in {victim}'s general direction.",
"sits on {victim}'s face while slamming a {item} {where}.",
"starts slapping {victim} silly with a {item}.",
"pins {victim} down and repeatedly {hits} them with a {item}.",
"grabs up a {item} and {hits} {victim} with it.",
"starts slapping {victim} silly with a {item}.",
"holds {victim} down and repeatedly {hits} them with a {item}.",
"prods {victim} with a {item}.",
"picks up a {item} and {hits} {victim} with it.",
"ties {victim} to a chair and {throws} a {item} at them.",
"{hits} {victim} {where} with a {item}.",
"ties {victim} to a pole and whips them {where} with a {item}."
"gave a friendly push to help {victim} learn to swim in lava.",
"sent {victim} to /dev/null.",
"sent {victim} down the memory hole.",
"beheaded {victim}.",
"threw {victim} off a building.",
"replaced all of {victim}'s music with Nickelback.",
"spammed {victim}'s email.",
"made {victim} a knuckle sandwich.",
"slapped {victim} with pure nothing.",
"hit {victim} with a small, interstellar spaceship.",
"quickscoped {victim}.",
"put {victim} in check-mate.",
"RSA-encrypted {victim} and deleted the private key.",
"put {victim} in the friendzone.",
"slaps {victim} with a DMCA takedown request!",
]
ITEMS_EN = [
"cast iron skillet",
"large trout",
"baseball bat",
"cricket bat",
"wooden cane",
"nail",
"printer",
"shovel",
"pair of trousers",
"CRT monitor",
"diamond sword",
"baguette",
"physics textbook",
"toaster",
"portrait of Richard Stallman",
"television",
"mau5head",
"five ton truck",
"roll of duct tape",
"book",
"laptop",
"old television",
"sack of rocks",
"rainbow trout",
"cobblestone block",
"lava bucket",
"rubber chicken",
"spiked bat",
"gold block",
"fire extinguisher",
"heavy rock",
"chunk of dirt",
"beehive",
"piece of rotten meat",
"bear",
"ton of bricks",
]
THROW_EN = [
"throws",
"flings",
"chucks",
"hurls",
]
HIT_EN = [
"yeets"
"hits",
"whacks",
"slaps",
"smacks",
"bashes",
]
WHERE_EN = ["in the chest", "on the head", "on the butt", "on the crotch"]
# ID translation by @yincen
SLAP_TEMPLATES_ID = [
"{hits} {victim} dengan {item}.",
"{throws} sebuah {item} kepada {victim}.",
"mengambil {item} dan {hits} {victim} .",
"Mengambil Sebuah {item} dan {hits} {victim} Dengan itu.",
"Menjatuhkan {victim} Ke Lava.",
"Mengirimkan {victim} ke Kawah.",
"Membuang {victim} Ke Laut.",
"Mengeluarkan {victim} Dari Bumi.",
"Melempar {victim} Ke luar angkasa.",
"Menaruh {victim} di Pluto.",
"Melemparkan sebuah {item} ke {victim}.",
"Melemparkan {item} kepada {victim}.",
"Menampar {victim} menggunakan {item}.",
"Membuang {victim} Ke udara.",
"Menghapus {victim} Dari Daftar Teman.",
"Melemparkan {item} {where} {victim}.",
"Meletakan {item} {where} {victim}.",
"Menyerang {victim} menggunakan {anime}.",
"Mengehack Seluruh akun {victim}",
]
ITEMS_ID = [
"Tabung Gas",
"Televisi 42 In",
"Raket",
"Raket Nyamuk",
"Kaca",
"Buku",
"Linggis",
"Telur",
"Jarum",
"Monitor Tabung",
"Obeng",
"Almunium",
"Emas",
"Printer",
"Speaker",
"Gas Lpg",
"Tangki Bensin",
"Tandon Air",
"Bola Boling",
"Laptop",
"Hardisk Rusak",
"Wajan Panas",
"Virus Corona",
"Meja Kantor",
"Meja Arsip",
"Lemari",
"Ember Besi",
"Besi Beton",
"Timah Panas",
"Harimau",
"Batu Krikil",
"Makanan Basi",
"Pesawat AirBus",
"Roket Nasa",
"Satelit Nasa",
"Matahari",
"Meteor",
"Berkas Kantor",
"Beton panas",
"Cermin",
"Batu Giok",
"Botol",
"Nezuko",
"Kaset Pita",
"Tiang Jemuran",
"Pisau Lipat",
"Bongkahan Es ",
"Asteroid",
]
THROW_ID = [
"Melempar",
"Melemparkan",
"Mematak",
]
HIT_ID = [
"Memukul",
"melemparkan",
"Memukuli",
"Membogem",
]
WHERE_ID = ["di pipi", "di kepala", "di bokong", "di badan"]
SLAP_TEMPLATES_Jutsu = [
"Menyerang {victim} Menggunakan {hits}.",
"Menyerang {victim} Menggunakan {item}.",
"Melemparkan {throws} kepada {victim} .",
"Melemparkan {throws} {where} {victim}.",
]
ITEMS_Jutsu = [
"KAA MEE HAA MEE HAA",
"Chibaku Tensei",
"Amaterasu",
]
THROW_Jutsu = [
"Futon Rasen Shuriken",
"Shuriken",
]
HIT_Jutsu = [
"Rasengan",
"Chidori",
]
WHERE_Jutsu = ["Di Pipi", "Di Kepala", "Di Bokong", "Di Badan ,Di Pantat"]
# ===========================================
@register(outgoing=True, pattern=r"^.(\w+)say (.*)")
async def univsaye(cowmsg):
"""For .cowsay module, userbot wrapper for cow which says things."""
arg = cowmsg.pattern_match.group(1).lower()
text = cowmsg.pattern_match.group(2)
if arg == "cow":
arg = "default"
if arg not in cow.COWACTERS:
return
cheese = cow.get_cow(arg)
cheese = cheese()
await cowmsg.edit(f"`{cheese.milk(text).replace('`', '´')}`")
@register(outgoing=True, pattern="^:/$", ignore_unsafe=True)
async def kek(keks):
"""Check yourself ;)"""
uio = ["/", "\\"]
for i in range(1, 15):
time.sleep(0.3)
await keks.edit(":" + uio[i % 2])
@register(outgoing=True, pattern=r"^.coinflip (.*)")
async def coin(event):
r = choice(["heads", "tails"])
input_str = event.pattern_match.group(1)
if input_str:
input_str = input_str.lower()
if r == "heads":
if input_str == "heads":
await event.edit(
"The coin landed on: **Heads**.\nYou were correct.")
elif input_str == "tails":
await event.edit(
"The coin landed on: **Heads**.\nYou weren't correct, try again ..."
)
else:
await event.edit("The coin landed on: **Heads**.")
elif r == "tails":
if input_str == "tails":
await event.edit(
"The coin landed on: **Tails**.\nYou were correct.")
elif input_str == "heads":
await event.edit(
"The coin landed on: **Tails**.\nYou weren't correct, try again ..."
)
else:
await event.edit("The coin landed on: **Tails**.")
@register(pattern="^.slap(?: |$)(.*)", outgoing=True)
async def who(event):
"""slaps a user, or get slapped if not a reply."""
replied_user = await get_user_from_event(event)
if replied_user:
replied_user = replied_user[0]
else:
return
caption = await slap(replied_user, event)
try:
await event.edit(caption)
except BaseException:
await event.edit(
"`Can't slap this person, need to fetch some sticks and stones !!`"
)
async def slap(replied_user, event):
"""Construct a funny slap sentence !!"""
user_id = replied_user.id
first_name = replied_user.first_name
username = replied_user.username
if username:
slapped = "@{}".format(username)
else:
slapped = f"[{first_name}](tg://user?id={user_id})"
slap_str = event.pattern_match.group(1)
if slap_str == "en" or slap_str not in ["id", "jutsu"]:
temp = choice(SLAP_TEMPLATES_EN)
item = choice(ITEMS_EN)
hit = choice(HIT_EN)
throw = choice(THROW_EN)
where = choice(WHERE_EN)
elif slap_str == "id":
temp = choice(SLAP_TEMPLATES_ID)
item = choice(ITEMS_ID)
hit = choice(HIT_ID)
throw = choice(THROW_ID)
where = choice(WHERE_ID)
else:
temp = choice(SLAP_TEMPLATES_Jutsu)
item = choice(ITEMS_Jutsu)
hit = choice(HIT_Jutsu)
throw = choice(THROW_Jutsu)
where = choice(WHERE_Jutsu)
return "..." + temp.format(
victim=slapped, item=item, hits=hit, throws=throw, where=where)
@register(outgoing=True, pattern="^-_-$", ignore_unsafe=True)
async def lol(lel):
"""Ok..."""
okay = "-_-"
for _ in range(10):
okay = okay[:-1] + "_-"
await lel.edit(okay)
@register(outgoing=True, pattern="^.boobs(?: |$)(.*)")
async def boobs(e):
await e.edit("`Finding some big boobs...`")
await sleep(3)
await e.edit("`Sending some big boobs...`")
nsfw = requests.get("http://api.oboobs.ru/noise/1").json()[0]["preview"]
urllib.request.urlretrieve("http://media.oboobs.ru/{}".format(nsfw),
"*.jpg")
os.rename("*.jpg", "boobs.jpg")
await e.client.send_file(e.chat_id, "boobs.jpg")
os.remove("boobs.jpg")
await e.delete()
@register(outgoing=True, pattern="^.butts(?: |$)(.*)")
async def butts(e):
await e.edit("`Finding some beautiful butts...`")
await sleep(3)
await e.edit("`Sending some beautiful butts...`")
nsfw = requests.get("http://api.obutts.ru/noise/1").json()[0]["preview"]
urllib.request.urlretrieve("http://media.obutts.ru/{}".format(nsfw),
"*.jpg")
os.rename("*.jpg", "butts.jpg")
await e.client.send_file(e.chat_id, "butts.jpg")
os.remove("butts.jpg")
await e.delete()
@register(outgoing=True, pattern="^.(yes|no|maybe|decide)$")
async def decide(event):
decision = event.pattern_match.group(1).lower()
message_id = event.reply_to_msg_id if event.reply_to_msg_id else None
if decision != "decide":
r = requests.get(f"https://yesno.wtf/api?force={decision}").json()
else:
r = requests.get("https://yesno.wtf/api").json()
await event.delete()
await event.client.send_message(event.chat_id,
str(r["answer"]).upper(),
reply_to=message_id,
file=r["image"])
@register(outgoing=True, pattern="^;_;$", ignore_unsafe=True)
async def fun(e):
t = ";_;"
for _ in range(10):
t = t[:-1] + "_;"
await e.edit(t)
@register(outgoing=True, pattern="^.cry$")
async def cry(e):
"""nangis aja"""
await e.edit(choice(CRI))
@register(outgoing=True, pattern="^.insult$")
async def insult(e):
"""I make you cry !!"""
await e.edit(choice(INSULT_STRINGS))
@register(outgoing=True, pattern="^.cp(?: |$)(.*)")
async def copypasta(cp_e):
"""Copypasta the famous meme"""
textx = await cp_e.get_reply_message()
message = cp_e.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await cp_e.edit("`😂🅱️IvE👐sOME👅text👅for✌️Me👌tO👐MAkE👀iT💞funNy!💦`")
return
reply_text = choice(EMOJIS)
# choose a random character in the message to be substituted with 🅱️
b_char = choice(message).lower()
for owo in message:
if owo == " ":
reply_text += choice(EMOJIS)
elif owo in EMOJIS:
reply_text += owo
reply_text += choice(EMOJIS)
elif owo.lower() == b_char:
reply_text += "🅱️"
else:
reply_text += owo.upper() if bool(getrandbits(1)) else owo.lower()
reply_text += choice(EMOJIS)
await cp_e.edit(reply_text)
@register(outgoing=True, pattern="^.vapor(?: |$)(.*)")
async def vapor(vpr):
"""Vaporize everything!"""
reply_text = []
textx = await vpr.get_reply_message()
message = vpr.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await vpr.edit("`Give some text for vapor!`")
return
for charac in message:
if 0x21 <= ord(charac) <= 0x7F:
reply_text.append(chr(ord(charac) + 0xFEE0))
elif ord(charac) == 0x20:
reply_text.append(chr(0x3000))
else:
reply_text.append(charac)
await vpr.edit("".join(reply_text))
@register(outgoing=True, pattern="^.str(?: |$)(.*)")
async def stretch(stret):
"""Stretch it."""
textx = await stret.get_reply_message()
message = stret.text
message = stret.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await stret.edit("`GiiiiiiiB sooooooomeeeeeee teeeeeeext!`")
return
count = randint(3, 10)
reply_text = sub(r"([aeiouAEIOUaeiouAEIOUаеиоуюяыэё])", (r"\1" * count),
message)
await stret.edit(reply_text)
@register(outgoing=True, pattern="^.zal(?: |$)(.*)")
async def zal(zgfy):
"""Invoke the feeling of chaos."""
reply_text = []
textx = await zgfy.get_reply_message()
message = zgfy.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await zgfy.edit(
"`gͫ ̆ i̛ ̺ v͇̆ ȅͅ a̢ͦ s̴̪ c̸̢ ä̸ rͩͣ y͖͞ t̨͚ é̠ x̢͖ t͔͛`"
)
return
for charac in message:
if not charac.isalpha():
reply_text.append(charac)
continue
for _ in range(3):
textz = randint(0, 2)
if textz == 0:
charac = charac.strip() + choice(ZALG_LIST[0]).strip()
elif textz == 1:
charac = charac.strip() + choice(ZALG_LIST[1]).strip()
else:
charac = charac.strip() + choice(ZALG_LIST[2]).strip()
reply_text.append(charac)
await zgfy.edit("".join(reply_text))
@register(outgoing=True, pattern="^.hi$")
async def hoi(hello):
"""Greet everyone!"""
await hello.edit(choice(HELLOSTR))
@register(outgoing=True, pattern="^.owo(?: |$)(.*)")
async def faces(owo):
"""UwU"""
textx = await owo.get_reply_message()
message = owo.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await owo.edit("` UwU no text given! `")
return
reply_text = sub(r"(r|l)", "w", message)
reply_text = sub(r"(R|L)", "W", reply_text)
reply_text = sub(r"n([aeiou])", r"ny\1", reply_text)
reply_text = sub(r"N([aeiouAEIOU])", r"Ny\1", reply_text)
reply_text = sub(r"\!+", " " + choice(UWUS), reply_text)
reply_text = reply_text.replace("ove", "uv")
reply_text += " " + choice(UWUS)
await owo.edit(reply_text)
@register(outgoing=True, pattern="^.react$")
async def react_meme(react):
"""Make your userbot react to everything."""
await react.edit(choice(FACEREACTS))
@register(outgoing=True, pattern="^.ii(?: |$)(.*)")
async def faces(siwis):
"""IwI"""
textx = await siwis.get_reply_message()
message = siwis.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await siwis.edit("` IwI no text given! `")
return
reply_text = sub(r"(a|i|u|e|o)", "i", message)
reply_text = sub(r"(A|I|U|E|O)", "I", reply_text)
reply_text = sub(r"\!+", " " + choice(IWIS), reply_text)
reply_text += " " + choice(IWIS)
await siwis.edit(reply_text)
@register(outgoing=True, pattern="^.shg$")
async def shrugger(shg):
r"""¯\_(ツ)_/¯"""
await shg.edit(choice(SHGS))
@register(outgoing=True, pattern="^.chase$")
async def police(chase):
"""Run boi run, i'm gonna catch you !!"""
await chase.edit(choice(CHASE_STR))
@register(outgoing=True, pattern="^.run$")
async def runner_lol(run):
"""Run, run, RUNNN!"""
await run.edit(choice(RUNS_STR))
@register(outgoing=True, pattern="^.metoo$")
async def metoo(hahayes):
"""Haha yes"""
await hahayes.edit(choice(METOOSTR))
@register(outgoing=True, pattern="^.iff$")
async def pressf(f):
"""Pays respects"""
args = f.text.split()
arg = (f.text.split(" ", 1))[1] if len(args) > 1 else None
if len(args) == 1:
r = randint(0, 3)
LOGS.info(r)
if r == 0:
await f.edit("┏━━━┓\n┃┏━━┛\n┃┗━━┓\n┃┏━━┛\n┃┃\n┗┛")
elif r == 1:
await f.edit("╭━━━╮\n┃╭━━╯\n┃╰━━╮\n┃╭━━╯\n┃┃\n╰╯")
else:
arg = "F"
if arg is not None:
out = ""
F_LENGTHS = [5, 1, 1, 4, 1, 1, 1]
for line in F_LENGTHS:
c = max(round(line / len(arg)), 1)
out += (arg * c) + "\n"
await f.edit("`" + out + "`")
@register(outgoing=True, pattern="^Oof$")
async def Oof(e):
t = "Oof"
for _ in range(15):
t = t[:-1] + "of"
await e.edit(t)
@register(outgoing=True, pattern="^.moon$")
async def moon(event):
deq = deque(list("🌗🌘🌑🌒🌓🌔🌕🌖"))
try:
for _ in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.earth$")
async def earth(event):
deq = deque(list("🌏🌍🌎🌎🌍🌏🌍🌎"))
try:
for _ in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.clock$")
async def clock(event):
deq = deque(list("🕙🕘🕗🕖🕕🕔🕓🕒🕑🕐🕛"))
try:
for _ in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.rain$")
async def rain(event):
deq = deque(list("☀️🌤⛅️🌥☁️🌧⛈"))
try:
for _ in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.love$")
async def love(event):
deq = deque(list("❤️🧡💛💚💙💜🖤💕💞💓💗💖💘💝"))
try:
for _ in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.mock(?: |$)(.*)")
async def spongemocktext(mock):
"""Do it and find the real fun."""
reply_text = []
textx = await mock.get_reply_message()
message = mock.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await mock.edit("`gIvE sOMEtHInG tO MoCk!`")
return
for charac in message:
if charac.isalpha() and randint(0, 1):
to_app = charac.upper() if charac.islower() else charac.lower()
reply_text.append(to_app)
else:
reply_text.append(charac)
await mock.edit("".join(reply_text))
@register(outgoing=True, pattern="^.clap(?: |$)(.*)")
async def claptext(memereview):
"""Praise people!"""
textx = await memereview.get_reply_message()
message = memereview.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await memereview.edit("`Hah, I don't clap pointlessly!`")
return
reply_text = "👏 "
reply_text += message.replace(" ", " 👏 ")
reply_text += " 👏"
await memereview.edit(reply_text)
@register(outgoing=True, pattern="^.bt$")
async def bluetext(bt_e):
"""Believe me, you will find this useful."""
if await bt_e.get_reply_message() and bt_e.is_group:
await bt_e.edit(
"/BLUETEXT /MUST /CLICK.\n"
"/ARE /YOU /A /STUPID /ANIMAL /WHICH /IS /ATTRACTED /TO /COLOURS?")
@register(outgoing=True, pattern=r"^.f (.*)")
async def payf(event):
paytext = event.pattern_match.group(1)
pay = "{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}".format(
paytext * 8,
paytext * 8,
paytext * 2,
paytext * 2,
paytext * 2,
paytext * 6,
paytext * 6,
paytext * 2,
paytext * 2,
paytext * 2,
paytext * 2,
paytext * 2,
)
await event.edit(pay)
@register(outgoing=True, pattern="^.lfy (.*)")
async def let_me_google_that_for_you(lmgtfy_q):
textx = await lmgtfy_q.get_reply_message()
qry = lmgtfy_q.pattern_match.group(1)
if qry:
query = str(qry)
elif textx:
query = textx
query = query.message
query_encoded = query.replace(" ", "+")
lfy_url = f"http://lmgtfy.com/?s=g&iie=1&q={query_encoded}"
payload = {"format": "json", "url": lfy_url}
r = requests.get("http://is.gd/create.php", params=payload)
await lmgtfy_q.edit(f"Here you are, help yourself.\
\n[{query}]({r.json()['shorturl']})")
@register(pattern=r".scam(?: |$)(.*)", outgoing=True)
async def scam(event):
"""Just a small command to fake chat actions for fun !!"""
options = [
"typing",
"contact",
"game",
"location",
"voice",
"round",
"video",
"photo",
"document",
"cancel",
]
input_str = event.pattern_match.group(1)
args = input_str.split()
if len(args) == 0: # Let bot decide action and time
scam_action = choice(options)
scam_time = randint(30, 60)
elif len(args) == 1: # User decides time/action, bot decides the other.
try:
scam_action = str(args[0]).lower()
scam_time = randint(30, 60)
except ValueError:
scam_action = choice(options)
scam_time = int(args[0])
elif len(args) == 2: # User decides both action and time
scam_action = str(args[0]).lower()
scam_time = int(args[1])
else:
await event.edit("`Invalid Syntax !!`")
return
try:
if scam_time > 0:
await event.delete()
async with event.client.action(event.chat_id, scam_action):
await sleep(scam_time)
except BaseException:
return
@register(pattern=r".type(?: |$)(.*)", outgoing=True)
async def typewriter(typew):
"""Just a small command to make your keyboard become a typewriter!"""
textx = await typew.get_reply_message()
message = typew.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await typew.edit("`Give a text to type!`")
return
sleep_time = 0.03
typing_symbol = "|"
old_text = ""
await typew.edit(typing_symbol)
await sleep(sleep_time)
for character in message:
old_text = old_text + "" + character
typing_text = old_text + "" + typing_symbol
await typew.edit(typing_text)
await sleep(sleep_time)
await typew.edit(old_text)
await sleep(sleep_time)
@register(outgoing=True, pattern="^.fail$")
async def fail(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄ `"
"`\n████▌▄▌▄▐▐▌█████ `"
"`\n████▌▄▌▄▐▐▌▀████ `"
"`\n▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀ `")
@register(outgoing=True, pattern="^.lol$")
async def lol(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n╱┏┓╱╱╱╭━━━╮┏┓╱╱╱╱ `"
"`\n╱┃┃╱╱╱┃╭━╮┃┃┃╱╱╱╱ `"
"`\n╱┃┗━━┓┃╰━╯┃┃┗━━┓╱ `"
"`\n╱┗━━━┛╰━━━╯┗━━━┛╱ `")
@register(outgoing=True, pattern="^.lool$")
async def lool(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit(
"`\n╭╭━━━╮╮┈┈┈┈┈┈┈┈┈┈\n┈┃╭━━╯┈┈┈┈▕╲▂▂╱▏┈\n┈┃┃╱▔▔▔▔▔▔▔▏╱▋▋╮┈`"
"`\n┈┃╰▏┃╱╭╮┃╱╱▏╱╱▆┃┈\n┈╰━▏┗━╰╯┗━╱╱╱╰┻┫┈\n┈┈┈▏┏┳━━━━▏┏┳━━╯┈`"
"`\n┈┈┈▏┃┃┈┈┈┈▏┃┃┈┈┈┈ `")
@register(outgoing=True, pattern="^.stfu$")
async def stfu(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n█████████████████████████████████`"
"`\n██▀▀▀▀████▀▀▀▀████▀▀▀▀▀███▀▀██▀▀█`"
"`\n█──────██──────██───────██──██──█`"
"`\n█──██▄▄████──████──███▄▄██──██──█`"
"`\n█▄────▀████──████────█████──██──█`"
"`\n█▀▀██──████──████──███████──██──█`"
"`\n█──────████──████──███████──────█`"
"`\n██▄▄▄▄█████▄▄████▄▄████████▄▄▄▄██`"
"`\n█████████████████████████████████`")
@register(outgoing=True, pattern="^.gtfo$")
async def gtfo(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n███████████████████████████████ `"
"`\n█▀▀▀▀▀▀▀█▀▀▀▀▀▀█▀▀▀▀▀▀▀█▀▀▀▀▀▀█ `"
"`\n█───────█──────█───────█──────█ `"
"`\n█──███──███──███──███▄▄█──██──█ `"
"`\n█──███▄▄███──███─────███──██──█ `"
"`\n█──██───███──███──██████──██──█ `"
"`\n█──▀▀▀──███──███──██████──────█ `"
"`\n█▄▄▄▄▄▄▄███▄▄███▄▄██████▄▄▄▄▄▄█ `"
"`\n███████████████████████████████ `")
@register(outgoing=True, pattern="^.nih$")
async def nih(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n(\\_/)`"
"`\n(●_●)`"
"`\n />🌹 *ini buat kamu`"
"`\n `"
"`\n(\\_/)`"
"`\n(●_●)`"
"`\n🌹<\\ *tapi boong`")
@register(outgoing=True, pattern="^.fag$")
async def gtfo(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n█████████`"
"`\n█▄█████▄█`"
"`\n█▼▼▼▼▼`"
"`\n█ STFU FAGGOT'S`"
"`\n█▲▲▲▲▲`"
"`\n█████████`"
"`\n ██ ██`")
@register(outgoing=True, pattern="^.taco$")
async def taco(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("\n{\\__/}" "\n(●_●)" "\n( >🌮 Want a taco?")
@register(outgoing=True, pattern="^.gey$")
async def gey(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n┈┈┈╭━━━━━╮┈┈┈┈┈\n┈┈┈┃┊┊┊┊┊┃┈┈┈┈┈`"
"`\n┈┈┈┃┊┊╭━╮┻╮┈┈┈┈\n┈┈┈╱╲┊┃▋┃▋┃┈┈┈┈\n┈┈╭┻┊┊╰━┻━╮┈┈┈┈`"
"`\n┈┈╰┳┊╭━━━┳╯┈┈┈┈\n┈┈┈┃┊┃╰━━┫┈NIGGA U GEY`"
"\n┈┈┈┈┈┈┏━┓┈┈┈┈┈┈")
@register(outgoing=True, pattern="^.gay$")
async def gey(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n┈┈┈╭━━━━━╮┈┈┈┈┈\n┈┈┈┃┊┊┊┊┊┃┈┈┈┈┈`"
"`\n┈┈┈┃┊┊╭━╮┻╮┈┈┈┈\n┈┈┈╱╲┊┃▋┃▋┃┈┈┈┈\n┈┈╭┻┊┊╰━┻━╮┈┈┈┈`"
"`\n┈┈╰┳┊╭━━━┳╯┈┈┈┈\n┈┈┈┃┊┃╰━━┫┈BAPAQ U GAY`"
"\n┈┈┈┈┈┈┏━┓┈┈┈┈┈┈")
@register(outgoing=True, pattern="^.bot$")
async def bot(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("` \n ╲╲╭━━━━╮ \n╭╮┃▆┈┈▆┃╭╮ \n┃╰┫▽▽▽┣╯┃ \n╰━┫△△△┣━╯`"
"`\n╲╲┃┈┈┈┈┃ \n╲╲┃┈┏┓┈┃ `")
@register(outgoing=True, pattern="^.hey$")
async def hey(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit(
"\n┈┈┈╱▔▔▔▔╲┈╭━━━━━\n┈┈▕▂▂▂▂▂▂▏┃HEY!┊😀`"
"`\n┈┈▕▔▇▔▔┳▔▏╰┳╮HEY!┊\n┈┈▕╭━╰╯━╮▏━╯╰━━━\n╱▔▔▏▅▅▅▅▕▔▔╲┈┈┈┈`"
"`\n▏┈┈╲▂▂▂▂╱┈┈┈▏┈┈┈`")
@register(outgoing=True, pattern="^.nou$")
async def nou(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n┈╭╮╭╮\n┈┃┃┃┃\n╭┻┗┻┗╮`"
"`\n┃┈▋┈▋┃\n┃┈╭▋━╮━╮\n┃┈┈╭╰╯╰╯╮`"
"`\n┫┈┈ NoU\n┃┈╰╰━━━━╯`"
"`\n┗━━┻━┛`")
@register(outgoing=True, pattern="^.tolol$")
async def tolol(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n░▀█▀░▄▀▄░█▒░░▄▀▄░█▒░`" "`\n░▒█▒░▀▄▀▒█▄▄░▀▄▀▒█▄▄`")
CMD_HELP.update({
"memes":
">`.cowsay`\
\nUsage: cow which says things.\
\n\n>`:/`\
\nUsage: Check yourself ;)\
\n\n>`-_-`\
\nUsage: Ok...\
\n\n>`;_;`\
\nUsage: Like `-_-` but crying.\
\n\n>`.cp`\
\nUsage: Copypasta the famous meme\
\n\n>`.vapor`\
\nUsage: Vaporize everything!\
\n\n>`.str`\
\nUsage: Stretch it.\
\n\n>`.zal`\
\nUsage: Invoke the feeling of chaos.\
\n\n>`Oof`\
\nUsage: Ooooof\
\n\n>`.moon`\
\nUsage: kensar moon animation.\
\n\n>`.clock`\
\nUsage: kensar clock animation.\
\n\n>`.hi`\
\nUsage: Greet everyone!\
\n\n>`.coinflip` <heads/tails>\
\nUsage: Flip a coin !!\
\n\n>`.owo`\
\nUsage: UwU\
\n\n>`.react`\
\nUsage: Make your userbot react to everything.\
\n\n>`.slap`\
\nUsage: reply to slap them with random objects !!\
\n\n>`.cry`\
\nUsage: y u du dis, i cri.\
\n\n>`.shg`\
\nUsage: Shrug at it !!\
\n\n>`.run`\
\nUsage: Let Me Run, run, RUNNN!\
\n\n>`.chase`\
\nUsage: You better start running\
\n\n>`.metoo`\
\nUsage: Haha yes\
\n\n>`.mock`\
\nUsage: Do it and find the real fun.\
\n\n>`.clap`\
\nUsage: Praise people!\
\n\n>`.f` <emoji/character>\
\nUsage: Pay Respects.\
\n\n>`.bt`\
\nUsage: Believe me, you will find this useful.\
\n\n>`.type`\
\nUsage: Just a small command to make your keyboard become a typewriter!\
\n\n>`.lfy` <query>\
\nUsage: Let me Google that for you real quick !!\
\n\n>`.boobs`\
\nUsage: Get b00bs imej\
\n\n>`.butts`\
\nUsage: Get 🅱️utts imej\
\n\n>`.decide` [Alternates: (`.yes`, `.no`, `.maybe`)]\
\nUsage: Make a quick decision.\
\n\n>`.scam` <action> <time>\
\n[Available Actions: (`typing`, `contact`, `game`, `location`, `voice`, `round`, `video`, `photo`, `document`, `cancel`)]\
\nUsage: Create fake chat actions, for fun. (Default action: typing)\
\n\nAnd many more\
\n| `.nou` | `.bot` | `.gay` | `.gey` | `.tf` | `.paw` | `.taco` | `.nih` |\
\n| `.fag` | `.gtfo` | `.stfu` | `.lol` | `.lool` | `.fail` | `.love` |\
\n| `.rain` | `.earth` | `.ii` | `.tolol` |\
\n\n\nThanks to 🅱️ottom🅱️ext🅱️ot (@NotAMemeBot) for some of these."
})
| 27.331013
| 157
| 0.492925
|
244babbf3623feea9ad9d1cd5fef7f49ecba161f
| 2,226
|
py
|
Python
|
aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/DeleteNatGatewayRequest.py
|
jia-jerry/aliyun-openapi-python-sdk
|
e90f3683a250cfec5b681b5f1d73a68f0dc9970d
|
[
"Apache-2.0"
] | 1
|
2021-03-08T02:59:17.000Z
|
2021-03-08T02:59:17.000Z
|
aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/DeleteNatGatewayRequest.py
|
jia-jerry/aliyun-openapi-python-sdk
|
e90f3683a250cfec5b681b5f1d73a68f0dc9970d
|
[
"Apache-2.0"
] | 1
|
2020-05-31T14:51:47.000Z
|
2020-05-31T14:51:47.000Z
|
aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/DeleteNatGatewayRequest.py
|
jia-jerry/aliyun-openapi-python-sdk
|
e90f3683a250cfec5b681b5f1d73a68f0dc9970d
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class DeleteNatGatewayRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'DeleteNatGateway','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_NatGatewayId(self):
return self.get_query_params().get('NatGatewayId')
def set_NatGatewayId(self,NatGatewayId):
self.add_query_param('NatGatewayId',NatGatewayId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
| 35.903226
| 75
| 0.774483
|
dc6b3a6418366530343586e5dec8c35e03595883
| 991
|
py
|
Python
|
Demo_6/libscipy_leastsq_demo.py
|
AlexYu-beta/Python_Study
|
6d90b605e47d9341b20a41e7384be269243b14ba
|
[
"Unlicense"
] | null | null | null |
Demo_6/libscipy_leastsq_demo.py
|
AlexYu-beta/Python_Study
|
6d90b605e47d9341b20a41e7384be269243b14ba
|
[
"Unlicense"
] | null | null | null |
Demo_6/libscipy_leastsq_demo.py
|
AlexYu-beta/Python_Study
|
6d90b605e47d9341b20a41e7384be269243b14ba
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
an example from http://old.sebug.net/paper/books/scipydoc/scipy_intro.html
a simple demo using python scientific stack numpy and scipy and more
"""
import numpy as np
from scipy.optimize import leastsq
import pylab as pl
def func(x, p):
"""
数据拟合所用的函数: A*sin(2*pi*k*x + theta)
"""
A, k, theta = p
return A*np.sin(2*np.pi*k*x+theta)
def residuals(p, y, x):
"""
实验数据x, y和拟合函数之间的差,p为拟合需要找到的系数
"""
return y - func(x, p)
x = np.linspace(0, -2*np.pi, 100)
A, k, theta = 10, 0.34, np.pi/6 # 真实数据的函数参数
y0 = func(x, [A, k, theta]) # 真实数据
y1 = y0 + 2 * np.random.randn(len(x)) # 加入噪声之后的实验数据
p0 = [7, 0.2, 0] # 第一次猜测的函数拟合参数
# 调用leastsq进行数据拟合
# residuals为计算误差的函数
# p0为拟合参数的初始值
# args为需要拟合的实验数据
plsq = leastsq(residuals, p0, args=(y1, x))
print u"真实参数:", [A, k, theta]
print u"拟合参数", plsq[0] # 实验数据拟合后的参数
pl.plot(x, y0, label=u"真实数据")
pl.plot(x, y1, label=u"带噪声的实验数据")
pl.plot(x, func(x, plsq[0]), label=u"拟合数据")
pl.legend()
pl.show()
| 23.046512
| 74
| 0.63774
|
d7aa5ff4450f4209596552c357d120b178d4be73
| 1,221
|
py
|
Python
|
punkin/_value.py
|
artPlusPlus/punkin
|
0265b06d67463a5aaf27e0b2a771220d41cae5ee
|
[
"MIT"
] | 1
|
2017-10-24T10:02:29.000Z
|
2017-10-24T10:02:29.000Z
|
punkin/_value.py
|
artPlusPlus/punkin
|
0265b06d67463a5aaf27e0b2a771220d41cae5ee
|
[
"MIT"
] | null | null | null |
punkin/_value.py
|
artPlusPlus/punkin
|
0265b06d67463a5aaf27e0b2a771220d41cae5ee
|
[
"MIT"
] | null | null | null |
from . import UNSET
class Value(object):
def __init__(self, context_key, setter=None):
self._context_key = context_key
self._setter = setter
self._data = {}
def __getitem__(self, context):
try:
return self._data[context[self._context_key]]
except KeyError:
return UNSET
def __setitem__(self, context, value):
if value is UNSET:
try:
del self[context]
except KeyError:
pass
return
if self._setter:
value = self._setter(value)
self._data[context[self._context_key]] = value
def __delitem__(self, context):
del self._data[context[self._context_key]]
def merge(self, source_value, context):
source_value = source_value[context]
target_value = self[context]
if source_value is UNSET:
self[context] = target_value
elif target_value is UNSET:
self[context] = source_value
else:
# TODO: Handle merge operations (add, subtract, multiply, etc)
# Example: self[context] = target_value * source_value
self[context] = source_value
| 29.071429
| 74
| 0.588043
|
5d6996d7ef313609fa9aed2286c4fe6b702c871a
| 14,563
|
py
|
Python
|
pytoshop/path.py
|
andrewvaliente/pytoshop
|
42f88f69906e6fe8e765ca2ebce4477ae6a203f1
|
[
"BSD-3-Clause"
] | 106
|
2016-11-22T18:52:22.000Z
|
2022-02-22T03:01:16.000Z
|
pytoshop/path.py
|
andrewvaliente/pytoshop
|
42f88f69906e6fe8e765ca2ebce4477ae6a203f1
|
[
"BSD-3-Clause"
] | 28
|
2017-05-26T00:54:59.000Z
|
2021-08-06T04:56:03.000Z
|
pytoshop/path.py
|
andrewvaliente/pytoshop
|
42f88f69906e6fe8e765ca2ebce4477ae6a203f1
|
[
"BSD-3-Clause"
] | 20
|
2017-06-05T21:12:18.000Z
|
2022-01-24T18:41:37.000Z
|
# -*- coding: utf-8 -*-
"""
Handle Bézier paths.
"""
from __future__ import unicode_literals, absolute_import
import struct
import six
from . import docs
from . import enums
from . import util
from typing import Any, BinaryIO, Dict, List, Optional, TYPE_CHECKING, Union # NOQA
if TYPE_CHECKING:
from . import core # NOQA
def _to_float(value): # type: (Union[float, int]) -> float
if isinstance(value, int):
value = float(value)
if not isinstance(value, float):
raise ValueError("Must be a float")
return value
def _read_point(x, size):
# type: (Union[float, int], Union[float, int]) -> float
return (float(x) / (1 << 24)) * float(size)
def _write_point(x, size):
# type: (Union[float, int], Union[float, int]) -> float
return int((x / float(size)) * (1 << 24))
class _PathRecordMeta(type):
"""
A metaclass that builds a mapping of subclasses.
"""
mapping = {} # type: Dict[int, PathRecord]
def __new__(cls, name, parents, dct):
new_cls = type.__new__(cls, name, parents, dct)
if '_type' in dct:
if dct['_type'] in cls.mapping:
raise ValueError("Duplicate type '{}'".format(dct['_type']))
cls.mapping[dct['_type']] = new_cls
return new_cls
@six.add_metaclass(_PathRecordMeta)
class PathRecord(object):
_type = -1
@property
def type(self):
# type: (...) -> int
return self._type
def length(self, header):
# type: (core.Header) -> int
return 26
@classmethod
@util.trace_read
def read(cls, fd, header):
# type: (BinaryIO, core.Header) -> PathRecord
type = util.read_value(fd, 'H')
new_cls = _PathRecordMeta.mapping[type]
return new_cls.read_data(fd, header)
@classmethod
def read_data(cls, fd, header):
# type: (BinaryIO, core.Header) -> PathRecord
raise NotImplementedError()
def write(self, fd, header):
# type: (BinaryIO, core.Header) -> None
util.write_value(fd, 'H', self.type)
self.write_data(fd, header)
def write_data(self, fd, header):
# type: (BinaryIO, core.Header) -> None
raise NotImplementedError()
class PathFillRuleRecord(PathRecord):
_type = enums.PathRecordType.path_fill_rule_record
@classmethod
def read_data(cls, fd, header):
# type: (BinaryIO, core.Header) -> PathRecord
padding = fd.read(24)
if padding != b'\0' * 24: # pragma: no cover
raise ValueError(
"Invalid padding in path fill rule record")
return cls()
def write_data(self, fd, header):
# type: (BinaryIO, core.Header) -> None
fd.write(b'\0' * 24)
class InitialFillRuleRecord(PathRecord):
def __init__(self,
all_pixels=False # type: bool
): # type: (...) -> None
self.all_pixels = all_pixels
_type = enums.PathRecordType.initial_fill_rule_record
@property
def all_pixels(self): # type: (...) -> bool
'Fill starts with all pixels'
return self._all_pixels
@all_pixels.setter
def all_pixels(self, value): # type: (Any) -> None
self._all_pixels = bool(value)
@classmethod
def read_data(cls, fd, header):
# type: (BinaryIO, core.Header) -> PathRecord
all_pixels = bool(util.read_value(fd, 'H'))
padding = fd.read(22)
if padding != b'\0' * 22: # pragma: no cover
raise ValueError(
"Invalid padding in initial fill rule record")
util.log("all_pixels: {}", all_pixels)
return cls(all_pixels=all_pixels)
def write_data(self, fd, header):
# type: (BinaryIO, core.Header) -> None
util.write_value(fd, 'H', self.all_pixels)
fd.write(b'\0' * 22)
class _LengthRecord(PathRecord):
def __init__(self,
num_knots=0 # type: int
): # type: (...) -> None
self.num_knots = num_knots
@property
def num_knots(self): # type: (...) -> int
"Number of Bezier knots"
return self._num_knots
@num_knots.setter
def num_knots(self, value): # type: (int) -> None
if (not isinstance(value, int) or
value < 0 or value > 65535):
raise ValueError("num_knots must be a 16-bit integer")
self._num_knots = value
@classmethod
def read_data(cls, fd, header):
# type: (BinaryIO, core.Header) -> PathRecord
num_knots = util.read_value(fd, 'H')
fd.read(22)
util.log('num_knots: {}', num_knots)
return cls(num_knots=num_knots)
def write_data(self, fd, header):
# type: (BinaryIO, core.Header) -> None
util.write_value(fd, 'H', self.num_knots)
fd.write(b'\0' * 22)
class ClosedSubpathLengthRecord(_LengthRecord):
_type = enums.PathRecordType.closed_subpath_length
class OpenSubpathLengthRecord(_LengthRecord):
_type = enums.PathRecordType.open_subpath_length
class _PointRecord(PathRecord):
def __init__(self,
y0=0.0, # type: float
x0=0.0, # type: float
y1=None, # type: Optional[float]
x1=None, # type: Optional[float]
y2=None, # type: Optional[float]
x2=None # type: Optional[float]
): # type: (...) -> None
self.y0 = y0
self.x0 = x0
self.y1 = y1
self.x1 = x1
self.y2 = y2
self.x2 = x2
@property
def y0(self): # type: (...) -> Union[int, float]
'y of control point preceding the knot, in pixels'
return self._y0
@y0.setter
def y0(self, value): # type: (Union[int, float]) -> None
self._y0 = _to_float(value)
@property
def x0(self): # type: (...) -> Union[int, float]
'x of control point preceding the knot, in pixels'
return self._x0
@x0.setter
def x0(self, value): # type: (Union[int, float]) -> None
self._x0 = _to_float(value)
@property
def y1(self): # type: (...) -> Optional[Union[int, float]]
'y of anchor point of the knot'
return self._y1
@y1.setter
def y1(self, value): # type: (Optional[Union[int, float]]) -> None
if value is None:
self._y1 = value
else:
self._y1 = _to_float(value)
@property
def x1(self): # type: (...) -> Optional[Union[int, float]]
'x of anchor point of the knot'
return self._x1
@x1.setter
def x1(self, value): # type: (Optional[Union[int, float]]) -> None
if value is None:
self._x1 = value
else:
self._x1 = _to_float(value)
@property
def y2(self): # type: (...) -> Optional[Union[int, float]]
'y of control point for the segment leaving the knot, in pixels'
return self._y2
@y2.setter
def y2(self, value): # type: (Optional[Union[int, float]]) -> None
if value is None:
self._y2 = value
else:
self._y2 = _to_float(value)
@property
def x2(self): # type: (...) -> Optional[Union[int, float]]
'x of control point for the segment leaving the knot, in pixels'
return self._x2
@x2.setter
def x2(self, value): # type: (Optional[Union[int, float]]) -> None
if value is None:
self._x2 = value
else:
self._x2 = _to_float(value)
@classmethod
def read_data(cls, fd, header):
# type: (BinaryIO, core.Header) -> PathRecord
data = fd.read(24)
y0, x0, y1, x1, y2, x2 = struct.unpack(str('>iiiiii'), data)
y0 = _read_point(y0, header.height)
x0 = _read_point(x0, header.width)
y1 = _read_point(y1, header.height)
x1 = _read_point(x1, header.width)
y2 = _read_point(y2, header.height)
x2 = _read_point(x2, header.width)
util.log(
'({}, {}) ({}, {}), ({}, {})',
y0, x0, y1, x1, y2, x2)
return cls(y0=y0, x0=x0, y1=y1, x1=x1, y2=y2, x2=x2)
def write_data(self, fd, header):
# type: (BinaryIO, core.Header) -> None
y0 = _write_point(self.y0, header.height)
x0 = _write_point(self.x0, header.width)
if self.y1 is None:
y1 = y0
else:
y1 = _write_point(self.y1, header.height)
if self.x1 is None:
x1 = x0
else:
x1 = _write_point(self.x1, header.width)
if self.y2 is None:
y2 = y0
else:
y2 = _write_point(self.y2, header.height)
if self.x2 is None:
x2 = x0
else:
x2 = _write_point(self.x2, header.width)
util.write_value(fd, 'iiiiii', y0, x0, y1, x1, y2, x2)
class ClosedSubpathBezierKnotLinked(_PointRecord):
_type = enums.PathRecordType.closed_subpath_bezier_knot_linked
class ClosedSubpathBezierKnotUnlinked(_PointRecord):
_type = enums.PathRecordType.closed_subpath_bezier_knot_unlinked
class OpenSubpathBezierKnotLinked(_PointRecord):
_type = enums.PathRecordType.open_subpath_bezier_knot_linked
class OpenSubpathBezierKnotUnlinked(_PointRecord):
_type = enums.PathRecordType.open_subpath_bezier_knot_unlinked
class ClipboardRecord(PathRecord):
def __init__(self,
top=0.0, # type: float
left=0.0, # type: float
bottom=0.0, # type: float
right=0.0, # type: float
resolution=0 # type: int
): # type: (...) -> None
self.top = top
self.left = left
self.bottom = bottom
self.right = right
self.resolution = resolution
_type = enums.PathRecordType.clipboard_record
@property
def top(self): # type: (...) -> Union[int, float]
"top, in pixels"
return self._top
@top.setter
def top(self, value): # type: (Union[int, float]) -> None
self._top = _to_float(value)
@property
def left(self): # type: (...) -> Union[int, float]
"left, in pixels"
return self._left
@left.setter
def left(self, value): # type: (Union[int, float]) -> None
self._left = _to_float(value)
@property
def bottom(self): # type: (...) -> Union[int, float]
"bottom, in pixels"
return self._bottom
@bottom.setter
def bottom(self, value): # type: (Union[int, float]) -> None
self._bottom = _to_float(value)
@property
def right(self): # type: (...) -> Union[int, float]
"right, in pixels"
return self._right
@right.setter
def right(self, value): # type: (Union[int, float]) -> None
self._right = _to_float(value)
@property
def resolution(self): # type: (...) -> int
"resolution"
return self._resolution
@resolution.setter
def resolution(self, value): # type: (int) -> None
if not isinstance(value, int):
raise TypeError("resolution must be an int")
self._resolution = value
@classmethod
def read_data(cls, fd, header):
# type: (BinaryIO, core.Header) -> PathRecord
data = fd.read(24)
top, left, bottom, right, resolution, _ = struct.unpack(
str('>iiiiii'), data)
top = _read_point(top, header.height)
left = _read_point(left, header.width)
bottom = _read_point(bottom, header.height)
right = _read_point(right, header.width)
util.log(
'position: ({}, {}, {}, {}), resolution: {}',
top, left, bottom, right, resolution)
return cls(top=top, left=left, bottom=bottom, right=right,
resolution=resolution)
def write_data(self, fd, header):
# type: (BinaryIO, core.Header) -> None
top = _write_point(self.top, header.height)
left = _write_point(self.left, header.width)
bottom = _write_point(self.bottom, header.height)
right = _write_point(self.right, header.width)
util.write_value(
fd, 'iiiiii', top, left, bottom, right, self.resolution, 0
)
class PathResource(object):
def __init__(self,
path_records=[] # List[PathRecord]
):
self.path_records = path_records
@property
def path_records(self): # type: (...) -> List[PathRecord]
"List of `PathRecord` instances."
return self._path_records
@path_records.setter
def path_records(self, value): # type: (List[PathRecord]) -> None
util.assert_is_list_of(value, PathRecord)
self._path_records = value
def length(self, header): # type: (core.Header) -> int
return sum(x.length(header) for x in self.path_records)
length.__doc__ = docs.length # type: ignore
@classmethod
def read(cls, fd, length, header):
# type: (BinaryIO, int, core.Header) -> PathResource
end = fd.tell() + length
path_records = []
while fd.tell() + 26 <= end:
path_records.append(
PathRecord.read(fd, header))
if (path_records[0].type !=
enums.PathRecordType.path_fill_rule_record):
raise ValueError(
'Path resource must start with path_fill_rule_record')
fd.seek(end - fd.tell(), 1)
return cls(path_records=path_records)
@classmethod
def from_rect(cls,
top, # type: Union[int, float]
left, # type: Union[int, float]
bottom, # type: Union[int, float]
right, # type: Union[int, float]
all_pixels=True # type: bool
): # type: (...) -> PathResource
return cls(path_records=[
PathFillRuleRecord(),
InitialFillRuleRecord(all_pixels=False),
OpenSubpathLengthRecord(num_knots=5),
OpenSubpathBezierKnotLinked(y0=top, x0=left),
OpenSubpathBezierKnotLinked(y0=top, x0=right),
OpenSubpathBezierKnotLinked(y0=bottom, x0=right),
OpenSubpathBezierKnotLinked(y0=bottom, x0=left),
OpenSubpathBezierKnotLinked(y0=top, x0=left),
]
)
def write(self, fd, header):
# type: (BinaryIO, core.Header) -> None
for path_record in self.path_records:
path_record.write(fd, header)
| 29.360887
| 84
| 0.572684
|
69754571cb5e1dc924e000b3492fa83dee6711a2
| 17
|
py
|
Python
|
samplepkg/__init__.py
|
schahal/sample
|
61e5e76598011e64d859eaeceb9f724feb319e07
|
[
"MIT"
] | null | null | null |
samplepkg/__init__.py
|
schahal/sample
|
61e5e76598011e64d859eaeceb9f724feb319e07
|
[
"MIT"
] | null | null | null |
samplepkg/__init__.py
|
schahal/sample
|
61e5e76598011e64d859eaeceb9f724feb319e07
|
[
"MIT"
] | null | null | null |
# Copyright 2020
| 8.5
| 16
| 0.764706
|
5971110ebc5c8bc9d4d9f1e47c09bb9348b439fb
| 538
|
py
|
Python
|
src/spyglass/decoding/__init__.py
|
LorenFrankLab/SpyGlass
|
5c2764b6ba5f7e9e47ddad5d2b9ce5039a0d3f41
|
[
"MIT"
] | 1
|
2022-03-22T12:13:18.000Z
|
2022-03-22T12:13:18.000Z
|
src/spyglass/decoding/__init__.py
|
LorenFrankLab/SpyGlass
|
5c2764b6ba5f7e9e47ddad5d2b9ce5039a0d3f41
|
[
"MIT"
] | 17
|
2022-03-22T14:42:04.000Z
|
2022-03-31T23:58:39.000Z
|
src/spyglass/decoding/__init__.py
|
LorenFrankLab/SpyGlass
|
5c2764b6ba5f7e9e47ddad5d2b9ce5039a0d3f41
|
[
"MIT"
] | 1
|
2022-03-23T20:04:25.000Z
|
2022-03-23T20:04:25.000Z
|
# flake8: noqa
from .clusterless import (ClusterlessClassifierParameters, MarkParameters,
MultiunitFiringRate, MultiunitHighSynchronyEvents,
MultiunitHighSynchronyEventsParameters,
UnitMarkParameters, UnitMarks, UnitMarksIndicator,
UnitMarksIndicatorSelection)
from .sorted_spikes import (SortedSpikesClassifierParameters,
SortedSpikesIndicator,
SortedSpikesIndicatorSelection)
| 53.8
| 76
| 0.630112
|
ca4eb6e28f84452d292d520db510e4773d86a486
| 3,845
|
py
|
Python
|
tests/collections/tts/test_waveglow.py
|
karpnv/NeMo
|
0572fccde6e33ccd692683d2e549bb08b0e2b100
|
[
"Apache-2.0"
] | null | null | null |
tests/collections/tts/test_waveglow.py
|
karpnv/NeMo
|
0572fccde6e33ccd692683d2e549bb08b0e2b100
|
[
"Apache-2.0"
] | null | null | null |
tests/collections/tts/test_waveglow.py
|
karpnv/NeMo
|
0572fccde6e33ccd692683d2e549bb08b0e2b100
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
from unittest import TestCase
import onnx
import pytest
import torch
from omegaconf import DictConfig
from nemo.collections.tts.models import WaveGlowModel
from nemo.collections.tts.modules import WaveGlowModule
from nemo.core.classes import typecheck
mcfg = DictConfig(
{
"_target_": "nemo.collections.tts.modules.waveglow.WaveGlowModule",
"n_flows": 12,
"n_group": 8,
"n_mel_channels": 80,
"n_early_every": 4,
"n_early_size": 2,
"n_wn_channels": 512,
"n_wn_layers": 8,
"wn_kernel_size": 3,
}
)
pcfg = DictConfig(
{
"_target_": "nemo.collections.asr.parts.features.FilterbankFeatures",
"dither": 0.0,
"nfilt": 80,
"stft_conv": True,
}
)
wcfg = DictConfig({"waveglow": mcfg, "sigma": 1.0, "preprocessor": pcfg,})
def input_example(sz):
mel = torch.randn(1, 1, 80, sz).cuda().half()
z = torch.randn(1, 8, sz * 256 // 8, 1).cuda().half()
return (
mel,
z,
)
def taco2wg(spec, z):
spec = spec.permute(0, 3, 2, 1).contiguous()
return spec.view(spec.size(0), spec.size(1), -1), z.view(z.size(0), z.size(1), -1)
# Wrapper method to convert Jasper's Taco2 output to WG input and call inference
def forward_wrapper(self, spec, z=None):
spec, z = taco2wg(spec, z)
audio = self.waveglow.norm_dist_to_audio(spec=spec, sigma=1.0, z=z)
return audio
class TestWaveGlow:
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
def test_export_to_onnx(self):
model = WaveGlowModel(wcfg)
model = model.cuda().half()
typecheck.set_typecheck_enabled(enabled=False)
with tempfile.TemporaryDirectory() as tmpdir, model.nemo_infer():
# Generate filename in the temporary directory.
tmp_file_name = os.path.join("waveglow.onnx")
n_mels = 80
# Test export.
inp = input_example(n_mels)
inp1 = taco2wg(*inp)
inp2 = inp1
res1 = model.waveglow(*inp1)
res2 = model.waveglow(*inp2)
assert torch.allclose(res1, res2, rtol=0.01, atol=0.1)
model.export(
tmp_file_name,
verbose=True,
input_example=inp,
output_example=res1,
try_script=False,
check_trace=False,
do_constant_folding=True,
dynamic_axes={"spec": [0], "z": [0], "audio": [0]},
forward_method=forward_wrapper,
)
try:
test_runtime = True
import onnxruntime
except (ImportError, ModuleNotFoundError):
test_runtime = False
if test_runtime:
omodel = onnx.load(tmp_file_name)
output_names = ['audio']
sess = onnxruntime.InferenceSession(omodel.SerializeToString())
output = sess.run(None, {"spec": inp[0].cpu().numpy(), "z": inp[1].cpu().numpy()})[0]
assert torch.allclose(torch.from_numpy(output), res2.cpu(), rtol=1, atol=100)
if __name__ == "__main__":
t = TestWaveGlow()
t.test_export_to_onnx()
| 31.260163
| 101
| 0.610403
|
b4454f59ac310df61dccc7b2c05be51e63305802
| 3,385
|
py
|
Python
|
maps/util.py
|
WPRDC/neighborhood-simulacrum
|
46892dfdbc8bc3201e31fee4ee991c49b208753e
|
[
"MIT"
] | null | null | null |
maps/util.py
|
WPRDC/neighborhood-simulacrum
|
46892dfdbc8bc3201e31fee4ee991c49b208753e
|
[
"MIT"
] | null | null | null |
maps/util.py
|
WPRDC/neighborhood-simulacrum
|
46892dfdbc8bc3201e31fee4ee991c49b208753e
|
[
"MIT"
] | null | null | null |
import json
import urllib.request
from typing import TYPE_CHECKING, Type
from urllib.error import URLError
from django.db import connection
from indicators.data import GeogCollection
if TYPE_CHECKING:
from django.db.models.sql import Query
from indicators.models import TimeAxis, Variable
from geo.models import AdminRegion
def refresh_tile_index():
""" Signal tile server to register new view """
try:
urllib.request.urlopen("http://127.0.0.1:3000/index.json")
except URLError:
pass
def as_tile_server_query(query: 'Query'):
import re
query_str = as_geometry_query(query)
return re.sub(r',\s*\"geo_adminregion\".\"mini_geom\"\s*,', ',',
re.sub(r',\s*\"geo_adminregion\".\"geom\"\s*,', ',', query_str))
def as_geometry_query(query: 'Query'):
"""
Removes casting of geometries to byte arrays as necessary for what django does
but not useful as a raw postgis query
:param query: QuerySet Query object to modify
"""
return str(query).replace('::bytea', '')
def menu_view_name(geog_type: Type['AdminRegion']):
return f'maps.v_{geog_type.geog_type_id.lower()}'
# noinspection SqlAmbiguousColumn ,SqlResolve
def store_map_data(
map_slug: str,
geog_collection: 'GeogCollection',
time_axis: 'TimeAxis',
variable: 'Variable',
use_percent: bool
):
table_name = f'maps.t_{map_slug}'
view_name = f'maps.v_{map_slug}'
data = variable.get_values(geog_collection, time_axis)
number_format_options = {'style': 'percent'} if use_percent else variable.locale_options
with connection.cursor() as cursor:
cursor.execute(f"""DROP VIEW IF EXISTS {view_name}""")
cursor.execute(f"""DROP TABLE IF EXISTS {table_name}""")
cursor.execute(f"""CREATE TABLE {table_name} (geoid varchar(63), value numeric)""")
sql_rows = [f"""('{datum.geog}', {datum.percent if use_percent else datum.value})""" for datum in data]
cursor.execute(f"""INSERT INTO {table_name} (geoid, value) VALUES """ + ", ".join(sql_rows))
base_geography_subquery = as_geometry_query(geog_collection.geog_type.objects.filter(in_extent=True).query)
cursor.execute(
f"""CREATE VIEW {view_name} AS
SELECT
geo.name as geo_name,
geo.geom as the_geom,
geo.geom_webmercator as the_geom_webmercator,
%(title)s as title,
dat.value::float as value,
%(number_format_options)s as number_format_options
FROM {table_name} dat
JOIN ({base_geography_subquery}) geo ON dat.geoid = geo.global_geoid""",
{'title': variable.name, 'number_format_options': json.dumps(number_format_options)}
)
refresh_tile_index()
def store_menu_layer(geog_type: Type['AdminRegion']):
print('Adding menu view for', geog_type.geog_type_id)
view_name = menu_view_name(geog_type)
view_query = as_tile_server_query(geog_type.objects.filter(in_extent=True).query)
with connection.cursor() as cursor:
cursor.execute(f"""DROP VIEW IF EXISTS {view_name}""")
cursor.execute(f"""CREATE VIEW {view_name} AS {view_query}""")
print('Menu view added.')
refresh_tile_index()
| 35.631579
| 115
| 0.651699
|
8dd10d6c3bd224f77b9055fe1522afd09f723631
| 2,536
|
py
|
Python
|
tests/test_console.py
|
SoheilSalmani/wikipedia-cli
|
ab9c65cf694a4ac120c4bc3b256233a976b293c4
|
[
"MIT"
] | null | null | null |
tests/test_console.py
|
SoheilSalmani/wikipedia-cli
|
ab9c65cf694a4ac120c4bc3b256233a976b293c4
|
[
"MIT"
] | 3
|
2021-09-20T04:59:17.000Z
|
2021-09-21T02:49:13.000Z
|
tests/test_console.py
|
SoheilSalmani/wikipedia-cli
|
ab9c65cf694a4ac120c4bc3b256233a976b293c4
|
[
"MIT"
] | null | null | null |
"""Test cases for the console module."""
from unittest.mock import Mock
from click.testing import CliRunner
import marshmallow
import pytest
from pytest_mock import MockFixture
import requests
from wikipedia_cli import console
from wikipedia_cli.wikipedia import page_schema
@pytest.fixture
def cli_runner() -> CliRunner:
"""Fixture for invoking command-line interfaces."""
return CliRunner()
@pytest.fixture
def mock_wikipedia_get_random(mocker: MockFixture) -> Mock:
"""Fixture for mocking wikipedia.get_random."""
mock = mocker.patch("wikipedia_cli.wikipedia.get_random")
mock.return_value = page_schema.load(
{
"title": "Title of the article",
"extract": "Extract of the article.",
}
)
return mock
@pytest.mark.unit
def test_main_returns_0(cli_runner: CliRunner, mock_wikipedia_get_random: Mock) -> None:
"""It exits with a status code of zero."""
result = cli_runner.invoke(console.main)
assert result.exit_code == 0
@pytest.mark.unit
def test_main_prints_title(
cli_runner: CliRunner, mock_wikipedia_get_random: Mock
) -> None:
"""It prints the title of the Wikipedia page."""
result = cli_runner.invoke(console.main)
assert "Title of the article" in result.output
@pytest.mark.unit
def test_main_uses_specified_lang(
cli_runner: CliRunner, mock_wikipedia_get_random: Mock
) -> None:
"""It uses the specified language edition of Wikipedia."""
cli_runner.invoke(console.main, ["--lang=pl"])
mock_wikipedia_get_random.assert_called_with(lang="pl")
@pytest.mark.unit
def test_main_handles_request_exceptions(
cli_runner: CliRunner, mock_wikipedia_get_random: Mock
) -> None:
"""It exists with a non-zero status code if the request fails."""
mock_wikipedia_get_random.side_effect = requests.RequestException("request failed")
result = cli_runner.invoke(console.main)
assert "Error" in result.output
@pytest.mark.unit
def test_main_handles_validation_errors(
cli_runner: CliRunner, mock_wikipedia_get_random: Mock
) -> None:
"""It raises `ClickException` when validation fails."""
mock_wikipedia_get_random.side_effect = marshmallow.ValidationError("invalid data")
result = cli_runner.invoke(console.main)
assert "Error" in result.output
@pytest.mark.e2e
def test_main_returns_0_in_production(cli_runner: CliRunner) -> None:
"""It exists with a status code of zero (end-to-end)."""
result = cli_runner.invoke(console.main)
assert result.exit_code == 0
| 30.190476
| 88
| 0.736987
|
983e0d51ea008796d91364116949316f1b29ee7d
| 1,785
|
py
|
Python
|
home/migrations/0002_create_homepage.py
|
digitaloxford/do-wagtail
|
49dd75b95109ebb38bf66aca13d3fdeb8e25d319
|
[
"MIT"
] | 2
|
2021-04-11T11:59:51.000Z
|
2021-04-12T06:56:23.000Z
|
home/migrations/0002_create_homepage.py
|
digitaloxford/do-wagtail
|
49dd75b95109ebb38bf66aca13d3fdeb8e25d319
|
[
"MIT"
] | 8
|
2021-04-10T10:40:27.000Z
|
2022-01-25T16:32:22.000Z
|
home/migrations/0002_create_homepage.py
|
digitaloxford/do-wagtail
|
49dd75b95109ebb38bf66aca13d3fdeb8e25d319
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.db import migrations
def create_homepage(apps, schema_editor):
# Get models
ContentType = apps.get_model('contenttypes.ContentType')
Page = apps.get_model('wagtailcore.Page')
Site = apps.get_model('wagtailcore.Site')
HomePage = apps.get_model('home.HomePage')
# Delete the default homepage
# If migration is run multiple times, it may have already been deleted
Page.objects.filter(id=2).delete()
# Create content type for homepage model
homepage_content_type, __ = ContentType.objects.get_or_create(
model='homepage', app_label='home')
# Create a new homepage
homepage = HomePage.objects.create(
title="Home",
draft_title="Home",
slug='home',
content_type=homepage_content_type,
path='00010001',
depth=2,
numchild=0,
url_path='/home/',
)
# Create a site with the new homepage set as the root
Site.objects.create(
hostname='localhost', root_page=homepage, is_default_site=True)
def remove_homepage(apps, schema_editor):
# Get models
ContentType = apps.get_model('contenttypes.ContentType')
HomePage = apps.get_model('home.HomePage')
# Delete the default homepage
# Page and Site objects CASCADE
HomePage.objects.filter(slug='home', depth=2).delete()
# Delete content type for homepage model
ContentType.objects.filter(model='homepage', app_label='home').delete()
class Migration(migrations.Migration):
run_before = [
("wagtailcore", "0053_locale_model"), # added for Wagtail 2.11 compatibility
]
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.RunPython(create_homepage, remove_homepage),
]
| 28.333333
| 85
| 0.671709
|
54b37333ce41942056b0ef2896d2fae33b82176e
| 4,985
|
py
|
Python
|
openpeerpower/components/vera/climate.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 1
|
2021-07-08T20:09:55.000Z
|
2021-07-08T20:09:55.000Z
|
openpeerpower/components/vera/climate.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 47
|
2021-02-21T23:43:07.000Z
|
2022-03-31T06:07:10.000Z
|
openpeerpower/components/vera/climate.py
|
OpenPeerPower/core
|
f673dfac9f2d0c48fa30af37b0a99df9dd6640ee
|
[
"Apache-2.0"
] | null | null | null |
"""Support for Vera thermostats."""
from __future__ import annotations
from typing import Any
import pyvera as veraApi
from openpeerpower.components.climate import (
DOMAIN as PLATFORM_DOMAIN,
ENTITY_ID_FORMAT,
ClimateEntity,
)
from openpeerpower.components.climate.const import (
FAN_AUTO,
FAN_ON,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from openpeerpower.config_entries import ConfigEntry
from openpeerpower.const import ATTR_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT
from openpeerpower.core import OpenPeerPower
from openpeerpower.helpers.entity_platform import AddEntitiesCallback
from openpeerpower.util import convert
from . import VeraDevice
from .common import ControllerData, get_controller_data
FAN_OPERATION_LIST = [FAN_ON, FAN_AUTO]
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE
SUPPORT_HVAC = [HVAC_MODE_COOL, HVAC_MODE_HEAT, HVAC_MODE_HEAT_COOL, HVAC_MODE_OFF]
async def async_setup_entry(
opp: OpenPeerPower,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the sensor config entry."""
controller_data = get_controller_data(opp, entry)
async_add_entities(
[
VeraThermostat(device, controller_data)
for device in controller_data.devices.get(PLATFORM_DOMAIN)
],
True,
)
class VeraThermostat(VeraDevice[veraApi.VeraThermostat], ClimateEntity):
"""Representation of a Vera Thermostat."""
def __init__(
self, vera_device: veraApi.VeraThermostat, controller_data: ControllerData
) -> None:
"""Initialize the Vera device."""
VeraDevice.__init__(self, vera_device, controller_data)
self.entity_id = ENTITY_ID_FORMAT.format(self.vera_id)
@property
def supported_features(self) -> int | None:
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def hvac_mode(self) -> str:
"""Return hvac operation ie. heat, cool mode.
Need to be one of HVAC_MODE_*.
"""
mode = self.vera_device.get_hvac_mode()
if mode == "HeatOn":
return HVAC_MODE_HEAT
if mode == "CoolOn":
return HVAC_MODE_COOL
if mode == "AutoChangeOver":
return HVAC_MODE_HEAT_COOL
return HVAC_MODE_OFF
@property
def hvac_modes(self) -> list[str]:
"""Return the list of available hvac operation modes.
Need to be a subset of HVAC_MODES.
"""
return SUPPORT_HVAC
@property
def fan_mode(self) -> str | None:
"""Return the fan setting."""
mode = self.vera_device.get_fan_mode()
if mode == "ContinuousOn":
return FAN_ON
return FAN_AUTO
@property
def fan_modes(self) -> list[str] | None:
"""Return a list of available fan modes."""
return FAN_OPERATION_LIST
def set_fan_mode(self, fan_mode) -> None:
"""Set new target temperature."""
if fan_mode == FAN_ON:
self.vera_device.fan_on()
else:
self.vera_device.fan_auto()
self.schedule_update_op_state()
@property
def current_power_w(self) -> float | None:
"""Return the current power usage in W."""
power = self.vera_device.power
if power:
return convert(power, float, 0.0)
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
vera_temp_units = self.vera_device.vera_controller.temperature_units
if vera_temp_units == "F":
return TEMP_FAHRENHEIT
return TEMP_CELSIUS
@property
def current_temperature(self) -> float | None:
"""Return the current temperature."""
return self.vera_device.get_current_temperature()
@property
def operation(self) -> str:
"""Return current operation ie. heat, cool, idle."""
return self.vera_device.get_hvac_mode()
@property
def target_temperature(self) -> float | None:
"""Return the temperature we try to reach."""
return self.vera_device.get_current_goal_temperature()
def set_temperature(self, **kwargs: Any) -> None:
"""Set new target temperatures."""
if kwargs.get(ATTR_TEMPERATURE) is not None:
self.vera_device.set_temperature(kwargs.get(ATTR_TEMPERATURE))
self.schedule_update_op_state()
def set_hvac_mode(self, hvac_mode) -> None:
"""Set new target hvac mode."""
if hvac_mode == HVAC_MODE_OFF:
self.vera_device.turn_off()
elif hvac_mode == HVAC_MODE_HEAT_COOL:
self.vera_device.turn_auto_on()
elif hvac_mode == HVAC_MODE_COOL:
self.vera_device.turn_cool_on()
elif hvac_mode == HVAC_MODE_HEAT:
self.vera_device.turn_heat_on()
self.schedule_update_op_state()
| 30.212121
| 83
| 0.666199
|
ff3281864fea8b6702d3f149ee891e505b61e118
| 3,288
|
py
|
Python
|
mooquant_formula/helpers/symbol.py
|
bopo/mooquant_formula
|
8a7b5030f5bc53fe35850a8d0825f34e7d9d8660
|
[
"MIT"
] | null | null | null |
mooquant_formula/helpers/symbol.py
|
bopo/mooquant_formula
|
8a7b5030f5bc53fe35850a8d0825f34e7d9d8660
|
[
"MIT"
] | 1
|
2021-03-25T21:47:29.000Z
|
2021-03-25T21:47:29.000Z
|
mooquant_formula/helpers/symbol.py
|
bopo/mooquant_formula
|
8a7b5030f5bc53fe35850a8d0825f34e7d9d8660
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
import os
import re
import requests
STOCK_CODE_PATH = 'symbol.json'
def round_price_by_code(price, code):
"""
根据代码类型[股票,基金] 截取制定位数的价格
:param price: 证券价格
:param code: 证券代码
:return: str 截断后的价格的字符串表示
"""
if isinstance(price, str):
return price
typ = get_code_type(code)
if typ == 'fund':
return '{:.3f}'.format(price)
return '{:.2f}'.format(price)
def get_ipo_info(only_today=False):
import pyquery
response = requests.get('http://vip.stock.finance.sina.com.cn/corp/go.php/vRPD_NewStockIssue/page/1.phtml', headers={'accept-encoding': 'gzip, deflate, sdch'})
html = response.content.decode('gbk')
html_obj = pyquery.PyQuery(html)
table_html = html_obj('#con02-0').html()
import pandas as pd
df = pd.read_html(io.StringIO(table_html), skiprows=3,
converters={
'证券代码': str,
'申购代码': str
}
)[0]
if only_today:
today = datetime.datetime.now().strftime('%Y-%m-%d')
df = df[df['上网发行日期↓'] == today]
return df
def update_stock_codes():
"""获取所有股票 ID 到 all_stock_code 目录下"""
all_stock_codes_url = 'http://www.shdjt.com/js/lib/astock.js'
grep_stock_codes = re.compile('~(\d+)`')
response = requests.get(all_stock_codes_url)
all_stock_codes = grep_stock_codes.findall(response.text)
with open(stock_code_path(), 'w') as f:
f.write(json.dumps(dict(stock=all_stock_codes)))
def get_all_stock_codes(realtime=False):
"""获取所有股票 ID"""
return get_stock_codes(realtime)
def get_stock_codes(symbol_path=None, realtime=False):
"""获取所有股票 ID 到 all_stock_code 目录下"""
if realtime:
all_stock_codes_url = 'http://www.shdjt.com/js/lib/astock.js'
grep_stock_codes = re.compile('~(\d+)`')
response = requests.get(all_stock_codes_url)
symbol_code = grep_stock_codes.findall(response.text)
symbol_path = symbol_path if symbol_path else stock_code_path()
with open(symbol_path, 'w') as f:
json.dump(symbol_code, f)
return symbol_code
else:
with open(symbol_path) as f:
return json.load(f)
def get_stock_type(stock_code):
"""判断股票ID对应的证券市场
匹配规则
['50', '51', '60', '90', '110'] 为 sh
['00', '13', '18', '15', '16', '18', '20', '30', '39', '115'] 为 sz
['5', '6', '9'] 开头的为 sh, 其余为 sz
:param stock_code:股票ID, 若以 'sz', 'sh' 开头直接返回对应类型,否则使用内置规则判断
:return 'sh' or 'sz'"""
assert type(stock_code) is str, 'stock code need str type'
if stock_code.startswith(('sh', 'sz')):
return stock_code[:2]
if stock_code.startswith(('50', '51', '60', '90', '110', '113', '132', '204')):
return 'sh'
if stock_code.startswith(('00', '13', '18', '15', '16', '18', '20', '30', '39', '115', '1318')):
return 'sz'
if stock_code.startswith(('5', '6', '9')):
return 'sh'
return 'sz'
def get_code_type(code):
"""
判断代码是属于那种类型,目前仅支持 ['fund', 'stock']
:return str 返回code类型, fund 基金 stock 股票
"""
if code.startswith(('00', '30', '60')):
return 'stock'
return 'fund'
def stock_code_path():
return os.path.join(os.path.dirname(__file__), STOCK_CODE_PATH)
| 26.95082
| 163
| 0.604015
|
d4ba002d7be92f6b1896af5919811305d4bd01cf
| 13,335
|
py
|
Python
|
train_mdfd.py
|
rchopinw/Tree-regularization-project
|
afc477d6bf76d0b226c97beac499fc56db36f269
|
[
"MIT"
] | null | null | null |
train_mdfd.py
|
rchopinw/Tree-regularization-project
|
afc477d6bf76d0b226c97beac499fc56db36f269
|
[
"MIT"
] | null | null | null |
train_mdfd.py
|
rchopinw/Tree-regularization-project
|
afc477d6bf76d0b226c97beac499fc56db36f269
|
[
"MIT"
] | null | null | null |
"""Run this file to train demo GRU-Tree."""
from __future__ import print_function
from __future__ import absolute_import
import pydotplus
from copy import deepcopy
import autograd.numpy as np
import autograd.numpy.random as npr
from autograd.optimizers import sgd, adam
from autograd import grad
import _pickle as cPickle
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import export_graphviz
from model_mdfd import build_gru, build_mlp
from model_mdfd import softplus
from model_mdfd import save_obj, load_obj
import pickle as pk
class GRUTree(object):
def __init__(self, in_count, state_count, hidden_sizes, out_count, strength=200.0):
self.gru = GRU(in_count, state_count, out_count)
self.mlp = MLP(self.gru.num_weights, hidden_sizes, 1)
self.mlp.weights = self.mlp.init_weights(0.1) # init for 1st run
self.strength = strength
def objective(self, W, X, F, y):
path_length = self.mlp.pred_fun(self.mlp.weights,
W[:self.gru.num_weights][:, np.newaxis]).ravel()[0]
return -self.gru.loglike_fun(W, X, F, y) + self.strength * path_length
def average_path_length_batch(self, weights_batch, X_train, F_train, y_train):
apl_batch = np.zeros((1, weights_batch.shape[1]))
for i in range(weights_batch.shape[1]):
apl_batch[0, i] = self.gru.average_path_length(weights_batch[:, i],
X_train, F_train, y_train)
return apl_batch
def train(self, X_train, F_train, y_train, iters_retrain=25, num_iters=1000,
batch_size=32, lr=1e-3, param_scale=0.01, log_every=10):
npr.seed(42)
num_retrains = num_iters // iters_retrain
for i in range(num_retrains):
self.gru.objective = self.objective
# carry over weights from last training
# init_weights = self.gru.weights if i > 0 else None
# print('training deep net... [%d/%d], learning rate: %.4f' % (i + 1, num_retrains, lr))
# self.gru.train(X_train, F_train, y_train, num_iters=iters_retrain,
# batch_size=batch_size, lr=lr, param_scale=param_scale,
# log_every=log_every, init_weights=init_weights)
# print('building surrogate dataset...')
# W_train = deepcopy(self.gru.saved_weights.T)
# save_obj({'a': W_train}, 'W_saved_intermid')
W_train = load_obj('W_saved_intermid')['a']
# APL_train = self.average_path_length_batch(W_train, X_train, F_train, y_train)
# save_obj({'a': APL_train}, 'APL_train')
APL_train = np.reshape(np.random.randint(3, 6, 200), (1, 200))
print('training surrogate net... [%d/%d]' % (i + 1, num_retrains))
print(self.gru.num_weights)
self.mlp.train(W_train[:self.gru.num_weights, :], APL_train, num_iters=3000,
lr=1e-3, param_scale=0.1, log_every=250)
self.pred_fun = self.gru.pred_fun
self.weights = self.gru.weights
# save final decision tree
self.tree = self.gru.fit_tree(self.weights, X_train, F_train, y_train)
return self.weights
class GRU(object):
def __init__(self, in_count, state_count, out_count):
super(GRU, self).__init__()
pred_fun, loglike_fun, parser = build_gru(in_count, state_count, out_count)
self.num_weights = parser.num_weights
self.in_count = in_count
self.out_count = out_count
self.state_count = state_count
self.pred_fun = pred_fun
self.loglike_fun = loglike_fun
self.saved_weights = None
def objective(self, W, X, F, y):
return -self.loglike_fun(W, X, F, y)
def init_weights(self, param_scale):
return npr.randn(self.num_weights) * param_scale
def fit_tree(self, weights, X_train, F_train, y_train):
"""Train decision tree to track path length."""
interval = F_train[1] - F_train[0]
y_train_hat = self.pred_fun(weights, X_train, F_train)
y_train_hat_int = np.argmax(y_train_hat, axis=0)
X_train = np.array([X_train[0][x:(x+interval)] for x in F_train[:-1]]).T
tree = DecisionTreeClassifier(min_samples_leaf=25)
tree.fit(X_train.T, y_train_hat_int.T)
return tree
def average_path_length(self, weights, X_train, F_train, y_train):
interval = F_train[1] - F_train[0]
tree = self.fit_tree(weights, X_train, F_train, y_train)
X_train = np.array([X_train[0][x:(x + interval)] for x in F_train[:-1]]).T
path_length = average_path_length(tree, X_train.T)
print(path_length)
return path_length
def train(self, X_train, F_train, y_train, batch_size=32, num_iters=1000,
lr=1e-3, param_scale=0.01, log_every=100, init_weights=None):
grad_fun = build_batched_grad_fences(grad(self.objective), batch_size,
X_train, F_train, y_train)
print('Batched gradient fences building completed')
if init_weights is None:
init_weights = self.init_weights(param_scale)
saved_weights = np.zeros((num_iters, self.num_weights))
def callback(weights, i, gradients): # 计算全样本log-likelihood过于缓慢,摒弃。
apl = self.average_path_length(weights, X_train, F_train, y_train)
saved_weights[i, :] = weights
loss_train = self.objective(weights, X_train, F_train, y_train)
if i % log_every == 0:
print('model: gru | iter: {} | loss: {:.2f} | apl: {:.2f}'.format(i, loss_train, apl))
print('Optimization started.')
print(self.num_weights)
optimized_weights = adam(grad_fun, init_weights, num_iters=num_iters,
step_size=lr, callback=callback)
self.saved_weights = saved_weights
self.weights = optimized_weights
return optimized_weights
class MLP(object):
def __init__(self, in_count, hidden_sizes, out_count):
super(MLP, self).__init__()
layer_specs = [in_count] + hidden_sizes + [out_count]
pred_fun, loglike_fun, parser = build_mlp(layer_specs, output_activation=softplus)
self.num_weights = parser.num_weights
self.in_count = in_count
self.out_count = out_count
self.hidden_sizes = hidden_sizes
self.pred_fun = pred_fun
self.loglike_fun = loglike_fun
def objective(self, W, X, y):
return self.loglike_fun(W, X, y) + np.linalg.norm(W[:self.num_weights], ord=2)
def init_weights(self, param_scale):
return npr.randn(self.num_weights) * param_scale
def train(self, X_train, y_train, batch_size=32, num_iters=1000,
lr=1e-3, param_scale=0.01, log_every=100, init_weights=None):
grad_fun = build_batched_grad(grad(self.objective), batch_size,
X_train, y_train)
if init_weights is None:
init_weights = self.init_weights(param_scale)
def callback(weights, i, gradients):
loss_train = self.objective(weights, X_train, y_train)
if i % log_every == 0:
print('model: mlp | iter: {} | loss: {:.2f}'.format(i, loss_train))
optimized_weights = adam(grad_fun, init_weights, num_iters=num_iters,
step_size=lr, callback=callback)
self.weights = optimized_weights
return optimized_weights
def average_path_length(tree, X):
"""Compute average path length: cost of simulating the average
example; this is used in the objective function.
@param tree: DecisionTreeClassifier instance
@param X: NumPy array (D x N)
D := number of dimensions
N := number of examples
@return path_length: float
average path length
"""
leaf_indices = tree.apply(X)
leaf_counts = np.bincount(leaf_indices)
leaf_i = np.arange(tree.tree_.node_count)
path_length = np.dot(leaf_i, leaf_counts) / float(X.shape[0])
return path_length
def get_ith_minibatch_ixs(i, num_data, batch_size):
"""Split data into minibatches.
@param i: integer
iteration index
@param num_data: integer
number of data points
@param batch_size: integer
number of data points in a batch
@return batch_slice: slice object
"""
num_minibatches = num_data / batch_size + ((num_data % batch_size) > 0)
i = i % num_minibatches
start = i * batch_size
stop = start + batch_size
return slice(start, stop)
def build_batched_grad(grad, batch_size, inputs, targets):
"""Return grad on batched gradient.
@param grad: gradient function.
@param batch_size: integer
batch size
@param inputs: NumPy Array
size D x N
@param targets: NumPy Array
size O x N
@return batched_grad: function
function to compute gradients on inputs.
"""
def batched_grad(weights, i):
cur_idxs = get_ith_minibatch_ixs(i, targets.shape[1], batch_size)
return grad(weights, inputs[:, cur_idxs], targets[:, cur_idxs])
return batched_grad
def get_ith_minibatch_ixs_fences(b_i, batch_size, fences):
"""Split timeseries data of uneven sequence lengths into batches.
This is how we handle different sized sequences.
@param b_i: integer
iteration index
@param batch_size: integer
size of batch
@param fences: list of integers
sequence of cutoff array
@return idx: integer
@return batch_slice: slice object
"""
num_data = len(fences) - 1
num_minibatches = num_data / batch_size + ((num_data % batch_size) > 0)
b_i = b_i % num_minibatches
idx = slice(int(b_i * batch_size), int((b_i+1) * batch_size))
batch_i = np.arange(num_data)[idx]
batch_slice = np.concatenate([range(i, j) for i, j in
zip(fences[batch_i], fences[batch_i+1])])
print(batch_slice, batch_i, idx)
return idx, batch_slice
def build_batched_grad_fences(grad, batch_size, inputs, fences, targets):
"""Return grad on batched gradient.
@param grad: gradient function.
@param batch_size: integer
batch size
@param inputs: NumPy Array
size D x N
@param fences: NumPy Array
@param targets: NumPy Array
size O x N
@return batched_grad: function
function to compute gradients on inputs with fenceposts.
"""
def batched_grad(weights, i):
cur_idxs, cur_slice = get_ith_minibatch_ixs_fences(i, batch_size, fences)
batched_inputs = inputs[:, cur_slice]
batched_targets = None if targets is None else targets[:, cur_slice]
batched_fences = fences[cur_idxs.start:cur_idxs.stop+1] - fences[cur_idxs.start]
return grad(weights, batched_inputs, batched_fences, batched_targets)
return batched_grad
def visualize(tree, save_path):
"""Generate PDF of a decision tree.
@param tree: DecisionTreeClassifier instance
@param save_path: string
where to save tree PDF
"""
dot_data = export_graphviz(tree, out_file=None,
filled=True, rounded=True)
graph = pydotplus.graph_from_dot_data(dot_data)
graph = make_graph_minimal(graph) # remove extra text
if not save_path is None:
graph.write_pdf(save_path)
def make_graph_minimal(graph):
nodes = graph.get_nodes()
for node in nodes:
old_label = node.get_label()
label = prune_label(old_label)
if label is not None:
node.set_label(label)
return graph
def prune_label(label):
if label is None:
return None
if len(label) == 0:
return None
label = label[1:-1]
parts = [part for part in label.split('\\n')
if 'gini =' not in part and 'samples =' not in part]
return '"' + '\\n'.join(parts) + '"'
if __name__ == "__main__":
import os
import _pickle as cPickle
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--strength', type=float, default=1000.0,
help='how much to weigh tree-regularization term.')
args = parser.parse_args(args=[])
with open('./data/train.pkl', 'rb') as fp:
data_train = cPickle.load(fp)
X_train = data_train['X']
F_train = data_train['F']
y_train = data_train['y']
gru = GRUTree(1, 2, [10], 4, strength=args.strength)
gru.train(X_train, F_train, y_train, iters_retrain=200, num_iters=400,
batch_size=100, lr=1e-2, param_scale=0.1, log_every=4)
if not os.path.isdir('./trained_models'):
os.mkdir('./trained_models')
with open('./trained_models/trained_weights.pkl', 'wb') as fp:
cPickle.dump({'gru': gru.gru.weights, 'mlp': gru.mlp.weights}, fp)
print('saved trained model to ./trained_models')
visualize(gru.tree, './trained_models/tree.pdf')
print('saved final decision tree to ./trained_models')
| 39.569733
| 102
| 0.631796
|
0348444cb65ab6717235355c8bc90f3045bbc964
| 1,553
|
py
|
Python
|
samples/test/reused_component_test.py
|
rahulsmehta/pipelines
|
a0a8f1da8cb7ca53cde7717aa78e666b634fec75
|
[
"Apache-2.0"
] | 2,860
|
2018-05-24T04:55:01.000Z
|
2022-03-31T13:49:56.000Z
|
samples/test/reused_component_test.py
|
rahulsmehta/pipelines
|
a0a8f1da8cb7ca53cde7717aa78e666b634fec75
|
[
"Apache-2.0"
] | 7,331
|
2018-05-16T09:03:26.000Z
|
2022-03-31T23:22:04.000Z
|
samples/test/reused_component_test.py
|
rahulsmehta/pipelines
|
a0a8f1da8cb7ca53cde7717aa78e666b634fec75
|
[
"Apache-2.0"
] | 1,359
|
2018-05-15T11:05:41.000Z
|
2022-03-31T09:42:09.000Z
|
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import kfp
from .reused_component import my_pipeline
from kfp.samples.test.utils import run_pipeline_func, TestCase, KfpMlmdClient
def verify(run, mlmd_connection_config, **kwargs):
t = unittest.TestCase()
t.maxDiff = None # we always want to see full diff
t.assertEqual(run.status, 'Succeeded')
client = KfpMlmdClient(mlmd_connection_config=mlmd_connection_config)
tasks = client.get_tasks(run_id=run.id)
t.assertEqual(17, tasks['add-3'].outputs.parameters['sum'],
'add result should be 17')
run_pipeline_func([
# Cannot test V2_ENGINE and V1_LEGACY using the same code.
# V2_ENGINE requires importing everything from v2 namespace.
# TestCase(
# pipeline_func=my_pipeline,
# verify_func=verify,
# mode=kfp.dsl.PipelineExecutionMode.V2_ENGINE
# ),
TestCase(
pipeline_func=my_pipeline,
mode=kfp.dsl.PipelineExecutionMode.V1_LEGACY,
),
])
| 33.042553
| 77
| 0.727624
|
eb19280c81e86f20c07301cee666403868c2e2df
| 204,891
|
py
|
Python
|
patches_tool/aws_patch/aws_deps/libcloud/compute/drivers/ec2.py
|
Hybrid-Cloud/badam
|
390ad3a6fc03948008f7c04ed2f9fcc8514cc1eb
|
[
"Apache-2.0"
] | 2
|
2015-06-15T02:16:33.000Z
|
2022-02-23T07:10:38.000Z
|
patches_tool/aws_patch/aws_deps/libcloud/compute/drivers/ec2.py
|
Hybrid-Cloud/badam
|
390ad3a6fc03948008f7c04ed2f9fcc8514cc1eb
|
[
"Apache-2.0"
] | 7
|
2016-05-13T06:39:45.000Z
|
2016-05-20T02:55:31.000Z
|
patches_tool/aws_patch/aws_deps/libcloud/compute/drivers/ec2.py
|
Hybrid-Cloud/badam
|
390ad3a6fc03948008f7c04ed2f9fcc8514cc1eb
|
[
"Apache-2.0"
] | 4
|
2015-11-02T04:02:50.000Z
|
2021-05-13T17:06:00.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Amazon EC2, Eucalyptus, Nimbus and Outscale drivers.
"""
import re
import sys
import base64
import copy
import warnings
try:
from lxml import etree as ET
except ImportError:
from xml.etree import ElementTree as ET
from libcloud.utils.py3 import b, basestring, ensure_string
from libcloud.utils.xml import fixxpath, findtext, findattr, findall
from libcloud.utils.publickey import get_pubkey_ssh2_fingerprint
from libcloud.utils.publickey import get_pubkey_comment
from libcloud.utils.iso8601 import parse_date
from libcloud.common.aws import AWSBaseResponse, SignedAWSConnection
from libcloud.common.aws import DEFAULT_SIGNATURE_VERSION
from libcloud.common.types import (InvalidCredsError, MalformedResponseError,
LibcloudError)
from libcloud.compute.providers import Provider
from libcloud.compute.base import Node, NodeDriver, NodeLocation, NodeSize
from libcloud.compute.base import NodeImage, StorageVolume, VolumeSnapshot
from libcloud.compute.base import KeyPair
from libcloud.compute.types import NodeState, KeyPairDoesNotExistError, \
StorageVolumeState
__all__ = [
'API_VERSION',
'NAMESPACE',
'INSTANCE_TYPES',
'OUTSCALE_INSTANCE_TYPES',
'OUTSCALE_SAS_REGION_DETAILS',
'OUTSCALE_INC_REGION_DETAILS',
'DEFAULT_EUCA_API_VERSION',
'EUCA_NAMESPACE',
'EC2NodeDriver',
'BaseEC2NodeDriver',
'NimbusNodeDriver',
'EucNodeDriver',
'OutscaleSASNodeDriver',
'OutscaleINCNodeDriver',
'EC2NodeLocation',
'EC2ReservedNode',
'EC2SecurityGroup',
'EC2PlacementGroup',
'EC2Network',
'EC2NetworkSubnet',
'EC2NetworkInterface',
'EC2RouteTable',
'EC2Route',
'EC2SubnetAssociation',
'ExEC2AvailabilityZone',
'IdempotentParamError'
]
API_VERSION = '2013-10-15'
NAMESPACE = 'http://ec2.amazonaws.com/doc/%s/' % (API_VERSION)
# Eucalyptus Constants
DEFAULT_EUCA_API_VERSION = '3.3.0'
EUCA_NAMESPACE = 'http://msgs.eucalyptus.com/%s' % (DEFAULT_EUCA_API_VERSION)
"""
Sizes must be hardcoded, because Amazon doesn't provide an API to fetch them.
From http://aws.amazon.com/ec2/instance-types/
"""
INSTANCE_TYPES = {
't1.micro': {
'id': 't1.micro',
'name': 'Micro Instance',
'ram': 613,
'disk': 15,
'bandwidth': None
},
'm1.small': {
'id': 'm1.small',
'name': 'Small Instance',
'ram': 1740,
'disk': 160,
'bandwidth': None
},
'm1.medium': {
'id': 'm1.medium',
'name': 'Medium Instance',
'ram': 3700,
'disk': 410,
'bandwidth': None
},
'm1.large': {
'id': 'm1.large',
'name': 'Large Instance',
'ram': 7680,
'disk': 850,
'bandwidth': None
},
'm1.xlarge': {
'id': 'm1.xlarge',
'name': 'Extra Large Instance',
'ram': 15360,
'disk': 1690,
'bandwidth': None
},
'c1.medium': {
'id': 'c1.medium',
'name': 'High-CPU Medium Instance',
'ram': 1740,
'disk': 350,
'bandwidth': None
},
'c1.xlarge': {
'id': 'c1.xlarge',
'name': 'High-CPU Extra Large Instance',
'ram': 7680,
'disk': 1690,
'bandwidth': None
},
'm2.xlarge': {
'id': 'm2.xlarge',
'name': 'High-Memory Extra Large Instance',
'ram': 17510,
'disk': 420,
'bandwidth': None
},
'm2.2xlarge': {
'id': 'm2.2xlarge',
'name': 'High-Memory Double Extra Large Instance',
'ram': 35021,
'disk': 850,
'bandwidth': None
},
'm2.4xlarge': {
'id': 'm2.4xlarge',
'name': 'High-Memory Quadruple Extra Large Instance',
'ram': 70042,
'disk': 1690,
'bandwidth': None
},
'm3.medium': {
'id': 'm3.medium',
'name': 'Medium Instance',
'ram': 3840,
'disk': 4000,
'bandwidth': None
},
'm3.large': {
'id': 'm3.large',
'name': 'Large Instance',
'ram': 7168,
'disk': 32000,
'bandwidth': None
},
'm3.xlarge': {
'id': 'm3.xlarge',
'name': 'Extra Large Instance',
'ram': 15360,
'disk': 80000,
'bandwidth': None
},
'm3.2xlarge': {
'id': 'm3.2xlarge',
'name': 'Double Extra Large Instance',
'ram': 30720,
'disk': 160000,
'bandwidth': None
},
'cg1.4xlarge': {
'id': 'cg1.4xlarge',
'name': 'Cluster GPU Quadruple Extra Large Instance',
'ram': 22528,
'disk': 1690,
'bandwidth': None
},
'g2.2xlarge': {
'id': 'g2.2xlarge',
'name': 'Cluster GPU G2 Double Extra Large Instance',
'ram': 15000,
'disk': 60,
'bandwidth': None,
},
'cc1.4xlarge': {
'id': 'cc1.4xlarge',
'name': 'Cluster Compute Quadruple Extra Large Instance',
'ram': 23552,
'disk': 1690,
'bandwidth': None
},
'cc2.8xlarge': {
'id': 'cc2.8xlarge',
'name': 'Cluster Compute Eight Extra Large Instance',
'ram': 63488,
'disk': 3370,
'bandwidth': None
},
# c3 instances have 2 SSDs of the specified disk size
'c3.large': {
'id': 'c3.large',
'name': 'Compute Optimized Large Instance',
'ram': 3750,
'disk': 32, # x2
'bandwidth': None
},
'c3.xlarge': {
'id': 'c3.xlarge',
'name': 'Compute Optimized Extra Large Instance',
'ram': 7500,
'disk': 80, # x2
'bandwidth': None
},
'c3.2xlarge': {
'id': 'c3.2xlarge',
'name': 'Compute Optimized Double Extra Large Instance',
'ram': 15000,
'disk': 160, # x2
'bandwidth': None
},
'c3.4xlarge': {
'id': 'c3.4xlarge',
'name': 'Compute Optimized Quadruple Extra Large Instance',
'ram': 30000,
'disk': 320, # x2
'bandwidth': None
},
'c3.8xlarge': {
'id': 'c3.8xlarge',
'name': 'Compute Optimized Eight Extra Large Instance',
'ram': 60000,
'disk': 640, # x2
'bandwidth': None
},
'cr1.8xlarge': {
'id': 'cr1.8xlarge',
'name': 'High Memory Cluster Eight Extra Large',
'ram': 244000,
'disk': 240,
'bandwidth': None
},
'hs1.4xlarge': {
'id': 'hs1.4xlarge',
'name': 'High Storage Quadruple Extra Large Instance',
'ram': 61952,
'disk': 2048,
'bandwidth': None
},
'hs1.8xlarge': {
'id': 'hs1.8xlarge',
'name': 'High Storage Eight Extra Large Instance',
'ram': 119808,
'disk': 48000,
'bandwidth': None
},
# i2 instances have up to eight SSD drives
'i2.xlarge': {
'id': 'i2.xlarge',
'name': 'High Storage Optimized Extra Large Instance',
'ram': 31232,
'disk': 800,
'bandwidth': None
},
'i2.2xlarge': {
'id': 'i2.2xlarge',
'name': 'High Storage Optimized Double Extra Large Instance',
'ram': 62464,
'disk': 1600,
'bandwidth': None
},
'i2.4xlarge': {
'id': 'i2.4xlarge',
'name': 'High Storage Optimized Quadruple Large Instance',
'ram': 124928,
'disk': 3200,
'bandwidth': None
},
'i2.8xlarge': {
'id': 'i2.8xlarge',
'name': 'High Storage Optimized Eight Extra Large Instance',
'ram': 249856,
'disk': 6400,
'bandwidth': None
},
'd2.xlarge': {
'id': 'd2.xlarge',
'name': 'High Storage Optimized Extra Large Instance',
'ram': 30050,
'disk': 6000, # 3 x 2 TB
'bandwidth': None
},
'd2.2xlarge': {
'id': 'd2.2xlarge',
'name': 'High Storage Optimized Double Extra Large Instance',
'ram': 61952,
'disk': 12000, # 6 x 2 TB
'bandwidth': None
},
'd2.4xlarge': {
'id': 'd2.4xlarge',
'name': 'High Storage Optimized Quadruple Extra Large Instance',
'ram': 122000,
'disk': 24000, # 12 x 2 TB
'bandwidth': None
},
'd2.8xlarge': {
'id': 'd2.8xlarge',
'name': 'High Storage Optimized Eight Extra Large Instance',
'ram': 244000,
'disk': 48000, # 24 x 2 TB
'bandwidth': None
},
# 1x SSD
'r3.large': {
'id': 'r3.large',
'name': 'Memory Optimized Large instance',
'ram': 15000,
'disk': 32,
'bandwidth': None
},
'r3.xlarge': {
'id': 'r3.xlarge',
'name': 'Memory Optimized Extra Large instance',
'ram': 30500,
'disk': 80,
'bandwidth': None
},
'r3.2xlarge': {
'id': 'r3.2xlarge',
'name': 'Memory Optimized Double Extra Large instance',
'ram': 61000,
'disk': 160,
'bandwidth': None
},
'r3.4xlarge': {
'id': 'r3.4xlarge',
'name': 'Memory Optimized Quadruple Extra Large instance',
'ram': 122000,
'disk': 320,
'bandwidth': None
},
'r3.8xlarge': {
'id': 'r3.8xlarge',
'name': 'Memory Optimized Eight Extra Large instance',
'ram': 244000,
'disk': 320, # x2
'bandwidth': None
},
't2.micro': {
'id': 't2.micro',
'name': 'Burstable Performance Micro Instance',
'ram': 1024,
'disk': 0, # EBS Only
'bandwidth': None,
'extra': {
'cpu': 1
}
},
# Burstable Performance General Purpose
't2.small': {
'id': 't2.small',
'name': 'Burstable Performance Small Instance',
'ram': 2048,
'disk': 0, # EBS Only
'bandwidth': None,
'extra': {
'cpu': 11
}
},
't2.medium': {
'id': 't2.medium',
'name': 'Burstable Performance Medium Instance',
'ram': 4028,
'disk': 0, # EBS Only
'bandwidth': None,
'extra': {
'cpu': 2
}
}
}
REGION_DETAILS = {
# US East (Northern Virginia) Region
'us-east-1': {
'endpoint': 'ec2.us-east-1.amazonaws.com',
'api_name': 'ec2_us_east',
'country': 'USA',
'signature_version': '2',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'c1.medium',
'c1.xlarge',
'cc2.8xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'cg1.4xlarge',
'g2.2xlarge',
'cr1.8xlarge',
'hs1.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
't2.micro',
't2.small',
't2.medium'
]
},
# US West (Northern California) Region
'us-west-1': {
'endpoint': 'ec2.us-west-1.amazonaws.com',
'api_name': 'ec2_us_west',
'country': 'USA',
'signature_version': '2',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'c1.medium',
'c1.xlarge',
'g2.2xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
't2.micro',
't2.small',
't2.medium'
]
},
# US West (Oregon) Region
'us-west-2': {
'endpoint': 'ec2.us-west-2.amazonaws.com',
'api_name': 'ec2_us_west_oregon',
'country': 'US',
'signature_version': '2',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'c1.medium',
'c1.xlarge',
'g2.2xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'hs1.8xlarge',
'cc2.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
't2.micro',
't2.small',
't2.medium'
]
},
# EU (Ireland) Region
'eu-west-1': {
'endpoint': 'ec2.eu-west-1.amazonaws.com',
'api_name': 'ec2_eu_west',
'country': 'Ireland',
'signature_version': '2',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'c1.medium',
'c1.xlarge',
'g2.2xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'hs1.8xlarge',
'cc2.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
't2.micro',
't2.small',
't2.medium'
]
},
# EU (Frankfurt) Region
'eu-central-1': {
'endpoint': 'ec2.eu-central-1.amazonaws.com',
'api_name': 'ec2_eu_central',
'country': 'Frankfurt',
'signature_version': '4',
'instance_types': [
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
't2.micro',
't2.small',
't2.medium'
]
},
# Asia Pacific (Singapore) Region
'ap-southeast-1': {
'endpoint': 'ec2.ap-southeast-1.amazonaws.com',
'api_name': 'ec2_ap_southeast',
'country': 'Singapore',
'signature_version': '2',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'c1.medium',
'c1.xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'hs1.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
't2.micro',
't2.small',
't2.medium'
]
},
# Asia Pacific (Tokyo) Region
'ap-northeast-1': {
'endpoint': 'ec2.ap-northeast-1.amazonaws.com',
'api_name': 'ec2_ap_northeast',
'country': 'Japan',
'signature_version': '2',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'c1.medium',
'g2.2xlarge',
'c1.xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'hs1.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
't2.micro',
't2.small',
't2.medium'
]
},
# South America (Sao Paulo) Region
'sa-east-1': {
'endpoint': 'ec2.sa-east-1.amazonaws.com',
'api_name': 'ec2_sa_east',
'country': 'Brazil',
'signature_version': '2',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'c1.medium',
'c1.xlarge',
't2.micro',
't2.small',
't2.medium'
]
},
# Asia Pacific (Sydney) Region
'ap-southeast-2': {
'endpoint': 'ec2.ap-southeast-2.amazonaws.com',
'api_name': 'ec2_ap_southeast_2',
'country': 'Australia',
'signature_version': '2',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'c1.medium',
'c1.xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'hs1.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
't2.micro',
't2.small',
't2.medium'
]
},
'us-gov-west-1': {
'endpoint': 'ec2.us-gov-west-1.amazonaws.com',
'api_name': 'ec2_us_govwest',
'country': 'US',
'signature_version': '2',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'c1.medium',
'c1.xlarge',
'g2.2xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'hs1.4xlarge',
'hs1.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
't2.micro',
't2.small',
't2.medium'
]
},
'nimbus': {
# Nimbus clouds have 3 EC2-style instance types but their particular
# RAM allocations are configured by the admin
'country': 'custom',
'signature_version': '2',
'instance_types': [
'm1.small',
'm1.large',
'm1.xlarge'
]
}
}
"""
Sizes must be hardcoded because Outscale doesn't provide an API to fetch them.
Outscale cloud instances share some names with EC2 but have different
specifications so declare them in another constant.
"""
OUTSCALE_INSTANCE_TYPES = {
't1.micro': {
'id': 't1.micro',
'name': 'Micro Instance',
'ram': 615,
'disk': 0,
'bandwidth': None
},
'm1.small': {
'id': 'm1.small',
'name': 'Standard Small Instance',
'ram': 1740,
'disk': 150,
'bandwidth': None
},
'm1.medium': {
'id': 'm1.medium',
'name': 'Standard Medium Instance',
'ram': 3840,
'disk': 420,
'bandwidth': None
},
'm1.large': {
'id': 'm1.large',
'name': 'Standard Large Instance',
'ram': 7680,
'disk': 840,
'bandwidth': None
},
'm1.xlarge': {
'id': 'm1.xlarge',
'name': 'Standard Extra Large Instance',
'ram': 15360,
'disk': 1680,
'bandwidth': None
},
'c1.medium': {
'id': 'c1.medium',
'name': 'Compute Optimized Medium Instance',
'ram': 1740,
'disk': 340,
'bandwidth': None
},
'c1.xlarge': {
'id': 'c1.xlarge',
'name': 'Compute Optimized Extra Large Instance',
'ram': 7168,
'disk': 1680,
'bandwidth': None
},
'c3.large': {
'id': 'c3.large',
'name': 'Compute Optimized Large Instance',
'ram': 3840,
'disk': 32,
'bandwidth': None
},
'c3.xlarge': {
'id': 'c3.xlarge',
'name': 'Compute Optimized Extra Large Instance',
'ram': 7168,
'disk': 80,
'bandwidth': None
},
'c3.2xlarge': {
'id': 'c3.2xlarge',
'name': 'Compute Optimized Double Extra Large Instance',
'ram': 15359,
'disk': 160,
'bandwidth': None
},
'c3.4xlarge': {
'id': 'c3.4xlarge',
'name': 'Compute Optimized Quadruple Extra Large Instance',
'ram': 30720,
'disk': 320,
'bandwidth': None
},
'c3.8xlarge': {
'id': 'c3.8xlarge',
'name': 'Compute Optimized Eight Extra Large Instance',
'ram': 61440,
'disk': 640,
'bandwidth': None
},
'm2.xlarge': {
'id': 'm2.xlarge',
'name': 'High Memory Extra Large Instance',
'ram': 17510,
'disk': 420,
'bandwidth': None
},
'm2.2xlarge': {
'id': 'm2.2xlarge',
'name': 'High Memory Double Extra Large Instance',
'ram': 35020,
'disk': 840,
'bandwidth': None
},
'm2.4xlarge': {
'id': 'm2.4xlarge',
'name': 'High Memory Quadruple Extra Large Instance',
'ram': 70042,
'disk': 1680,
'bandwidth': None
},
'nv1.small': {
'id': 'nv1.small',
'name': 'GPU Small Instance',
'ram': 1739,
'disk': 150,
'bandwidth': None
},
'nv1.medium': {
'id': 'nv1.medium',
'name': 'GPU Medium Instance',
'ram': 3839,
'disk': 420,
'bandwidth': None
},
'nv1.large': {
'id': 'nv1.large',
'name': 'GPU Large Instance',
'ram': 7679,
'disk': 840,
'bandwidth': None
},
'nv1.xlarge': {
'id': 'nv1.xlarge',
'name': 'GPU Extra Large Instance',
'ram': 15358,
'disk': 1680,
'bandwidth': None
},
'g2.2xlarge': {
'id': 'g2.2xlarge',
'name': 'GPU Double Extra Large Instance',
'ram': 15360,
'disk': 60,
'bandwidth': None
},
'cc1.4xlarge': {
'id': 'cc1.4xlarge',
'name': 'Cluster Compute Quadruple Extra Large Instance',
'ram': 24576,
'disk': 1680,
'bandwidth': None
},
'cc2.8xlarge': {
'id': 'cc2.8xlarge',
'name': 'Cluster Compute Eight Extra Large Instance',
'ram': 65536,
'disk': 3360,
'bandwidth': None
},
'hi1.xlarge': {
'id': 'hi1.xlarge',
'name': 'High Storage Extra Large Instance',
'ram': 15361,
'disk': 1680,
'bandwidth': None
},
'm3.xlarge': {
'id': 'm3.xlarge',
'name': 'High Storage Optimized Extra Large Instance',
'ram': 15357,
'disk': 0,
'bandwidth': None
},
'm3.2xlarge': {
'id': 'm3.2xlarge',
'name': 'High Storage Optimized Double Extra Large Instance',
'ram': 30720,
'disk': 0,
'bandwidth': None
},
'm3s.xlarge': {
'id': 'm3s.xlarge',
'name': 'High Storage Optimized Extra Large Instance',
'ram': 15359,
'disk': 0,
'bandwidth': None
},
'm3s.2xlarge': {
'id': 'm3s.2xlarge',
'name': 'High Storage Optimized Double Extra Large Instance',
'ram': 30719,
'disk': 0,
'bandwidth': None
},
'cr1.8xlarge': {
'id': 'cr1.8xlarge',
'name': 'Memory Optimized Eight Extra Large Instance',
'ram': 249855,
'disk': 240,
'bandwidth': None
},
'os1.2xlarge': {
'id': 'os1.2xlarge',
'name': 'Memory Optimized, High Storage, Passthrough NIC Double Extra '
'Large Instance',
'ram': 65536,
'disk': 60,
'bandwidth': None
},
'os1.4xlarge': {
'id': 'os1.4xlarge',
'name': 'Memory Optimized, High Storage, Passthrough NIC Quadruple Ext'
'ra Large Instance',
'ram': 131072,
'disk': 120,
'bandwidth': None
},
'os1.8xlarge': {
'id': 'os1.8xlarge',
'name': 'Memory Optimized, High Storage, Passthrough NIC Eight Extra L'
'arge Instance',
'ram': 249856,
'disk': 500,
'bandwidth': None
},
'oc1.4xlarge': {
'id': 'oc1.4xlarge',
'name': 'Outscale Quadruple Extra Large Instance',
'ram': 24575,
'disk': 1680,
'bandwidth': None
},
'oc2.8xlarge': {
'id': 'oc2.8xlarge',
'name': 'Outscale Eight Extra Large Instance',
'ram': 65535,
'disk': 3360,
'bandwidth': None
}
}
"""
The function manipulating Outscale cloud regions will be overridden because
Outscale instances types are in a separate dict so also declare Outscale cloud
regions in some other constants.
"""
OUTSCALE_SAS_REGION_DETAILS = {
'eu-west-3': {
'endpoint': 'api-ppd.outscale.com',
'api_name': 'osc_sas_eu_west_3',
'country': 'FRANCE',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'c1.medium',
'c1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'nv1.small',
'nv1.medium',
'nv1.large',
'nv1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'm3.xlarge',
'm3.2xlarge',
'cr1.8xlarge',
'os1.8xlarge'
]
},
'eu-west-1': {
'endpoint': 'api.eu-west-1.outscale.com',
'api_name': 'osc_sas_eu_west_1',
'country': 'FRANCE',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'c1.medium',
'c1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'nv1.small',
'nv1.medium',
'nv1.large',
'nv1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'm3.xlarge',
'm3.2xlarge',
'cr1.8xlarge',
'os1.8xlarge'
]
},
'us-east-1': {
'endpoint': 'api.us-east-1.outscale.com',
'api_name': 'osc_sas_us_east_1',
'country': 'USA',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'c1.medium',
'c1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'nv1.small',
'nv1.medium',
'nv1.large',
'nv1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'm3.xlarge',
'm3.2xlarge',
'cr1.8xlarge',
'os1.8xlarge'
]
}
}
OUTSCALE_INC_REGION_DETAILS = {
'eu-west-1': {
'endpoint': 'api.eu-west-1.outscale.com',
'api_name': 'osc_inc_eu_west_1',
'country': 'FRANCE',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'c1.medium',
'c1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'nv1.small',
'nv1.medium',
'nv1.large',
'nv1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'm3.xlarge',
'm3.2xlarge',
'cr1.8xlarge',
'os1.8xlarge'
]
},
'eu-west-3': {
'endpoint': 'api-ppd.outscale.com',
'api_name': 'osc_inc_eu_west_3',
'country': 'FRANCE',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'c1.medium',
'c1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'nv1.small',
'nv1.medium',
'nv1.large',
'nv1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'm3.xlarge',
'm3.2xlarge',
'cr1.8xlarge',
'os1.8xlarge'
]
},
'us-east-1': {
'endpoint': 'api.us-east-1.outscale.com',
'api_name': 'osc_inc_us_east_1',
'country': 'USA',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'c1.medium',
'c1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'nv1.small',
'nv1.medium',
'nv1.large',
'nv1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'm3.xlarge',
'm3.2xlarge',
'cr1.8xlarge',
'os1.8xlarge'
]
}
}
"""
Define the extra dictionary for specific resources
"""
RESOURCE_EXTRA_ATTRIBUTES_MAP = {
'ebs_volume': {
'snapshot_id': {
'xpath': 'ebs/snapshotId',
'transform_func': str
},
'volume_id': {
'xpath': 'ebs/volumeId',
'transform_func': str
},
'volume_size': {
'xpath': 'ebs/volumeSize',
'transform_func': int
},
'delete': {
'xpath': 'ebs/deleteOnTermination',
'transform_func': str
},
'volume_type': {
'xpath': 'ebs/volumeType',
'transform_func': str
},
'iops': {
'xpath': 'ebs/iops',
'transform_func': int
}
},
'elastic_ip': {
'allocation_id': {
'xpath': 'allocationId',
'transform_func': str,
},
'association_id': {
'xpath': 'associationId',
'transform_func': str,
},
'interface_id': {
'xpath': 'networkInterfaceId',
'transform_func': str,
},
'owner_id': {
'xpath': 'networkInterfaceOwnerId',
'transform_func': str,
},
'private_ip': {
'xpath': 'privateIp',
'transform_func': str,
}
},
'image': {
'state': {
'xpath': 'imageState',
'transform_func': str
},
'owner_id': {
'xpath': 'imageOwnerId',
'transform_func': str
},
'owner_alias': {
'xpath': 'imageOwnerAlias',
'transform_func': str
},
'is_public': {
'xpath': 'isPublic',
'transform_func': str
},
'architecture': {
'xpath': 'architecture',
'transform_func': str
},
'image_type': {
'xpath': 'imageType',
'transform_func': str
},
'image_location': {
'xpath': 'imageLocation',
'transform_func': str
},
'platform': {
'xpath': 'platform',
'transform_func': str
},
'description': {
'xpath': 'description',
'transform_func': str
},
'root_device_type': {
'xpath': 'rootDeviceType',
'transform_func': str
},
'virtualization_type': {
'xpath': 'virtualizationType',
'transform_func': str
},
'hypervisor': {
'xpath': 'hypervisor',
'transform_func': str
},
'kernel_id': {
'xpath': 'kernelId',
'transform_func': str
},
'ramdisk_id': {
'xpath': 'ramdiskId',
'transform_func': str
}
},
'network': {
'state': {
'xpath': 'state',
'transform_func': str
},
'dhcp_options_id': {
'xpath': 'dhcpOptionsId',
'transform_func': str
},
'instance_tenancy': {
'xpath': 'instanceTenancy',
'transform_func': str
},
'is_default': {
'xpath': 'isDefault',
'transform_func': str
}
},
'network_interface': {
'subnet_id': {
'xpath': 'subnetId',
'transform_func': str
},
'vpc_id': {
'xpath': 'vpcId',
'transform_func': str
},
'zone': {
'xpath': 'availabilityZone',
'transform_func': str
},
'description': {
'xpath': 'description',
'transform_func': str
},
'owner_id': {
'xpath': 'ownerId',
'transform_func': str
},
'mac_address': {
'xpath': 'macAddress',
'transform_func': str
},
'private_dns_name': {
'xpath': 'privateIpAddressesSet/privateDnsName',
'transform_func': str
},
'source_dest_check': {
'xpath': 'sourceDestCheck',
'transform_func': str
}
},
'network_interface_attachment': {
'attachment_id': {
'xpath': 'attachment/attachmentId',
'transform_func': str
},
'instance_id': {
'xpath': 'attachment/instanceId',
'transform_func': str
},
'owner_id': {
'xpath': 'attachment/instanceOwnerId',
'transform_func': str
},
'device_index': {
'xpath': 'attachment/deviceIndex',
'transform_func': int
},
'status': {
'xpath': 'attachment/status',
'transform_func': str
},
'attach_time': {
'xpath': 'attachment/attachTime',
'transform_func': parse_date
},
'delete': {
'xpath': 'attachment/deleteOnTermination',
'transform_func': str
}
},
'node': {
'availability': {
'xpath': 'placement/availabilityZone',
'transform_func': str
},
'architecture': {
'xpath': 'architecture',
'transform_func': str
},
'client_token': {
'xpath': 'clientToken',
'transform_func': str
},
'dns_name': {
'xpath': 'dnsName',
'transform_func': str
},
'hypervisor': {
'xpath': 'hypervisor',
'transform_func': str
},
'iam_profile': {
'xpath': 'iamInstanceProfile/id',
'transform_func': str
},
'image_id': {
'xpath': 'imageId',
'transform_func': str
},
'instance_id': {
'xpath': 'instanceId',
'transform_func': str
},
'instance_lifecycle': {
'xpath': 'instanceLifecycle',
'transform_func': str
},
'instance_tenancy': {
'xpath': 'placement/tenancy',
'transform_func': str
},
'instance_type': {
'xpath': 'instanceType',
'transform_func': str
},
'key_name': {
'xpath': 'keyName',
'transform_func': str
},
'launch_index': {
'xpath': 'amiLaunchIndex',
'transform_func': int
},
'launch_time': {
'xpath': 'launchTime',
'transform_func': str
},
'kernel_id': {
'xpath': 'kernelId',
'transform_func': str
},
'monitoring': {
'xpath': 'monitoring/state',
'transform_func': str
},
'platform': {
'xpath': 'platform',
'transform_func': str
},
'private_dns': {
'xpath': 'privateDnsName',
'transform_func': str
},
'ramdisk_id': {
'xpath': 'ramdiskId',
'transform_func': str
},
'root_device_type': {
'xpath': 'rootDeviceType',
'transform_func': str
},
'root_device_name': {
'xpath': 'rootDeviceName',
'transform_func': str
},
'reason': {
'xpath': 'reason',
'transform_func': str
},
'source_dest_check': {
'xpath': 'sourceDestCheck',
'transform_func': str
},
'status': {
'xpath': 'instanceState/name',
'transform_func': str
},
'subnet_id': {
'xpath': 'subnetId',
'transform_func': str
},
'virtualization_type': {
'xpath': 'virtualizationType',
'transform_func': str
},
'ebs_optimized': {
'xpath': 'ebsOptimized',
'transform_func': str
},
'vpc_id': {
'xpath': 'vpcId',
'transform_func': str
}
},
'reserved_node': {
'instance_type': {
'xpath': 'instanceType',
'transform_func': str
},
'availability': {
'xpath': 'availabilityZone',
'transform_func': str
},
'start': {
'xpath': 'start',
'transform_func': str
},
'duration': {
'xpath': 'duration',
'transform_func': int
},
'usage_price': {
'xpath': 'usagePrice',
'transform_func': float
},
'fixed_price': {
'xpath': 'fixedPrice',
'transform_func': float
},
'instance_count': {
'xpath': 'instanceCount',
'transform_func': int
},
'description': {
'xpath': 'productDescription',
'transform_func': str
},
'instance_tenancy': {
'xpath': 'instanceTenancy',
'transform_func': str
},
'currency_code': {
'xpath': 'currencyCode',
'transform_func': str
},
'offering_type': {
'xpath': 'offeringType',
'transform_func': str
}
},
'security_group': {
'vpc_id': {
'xpath': 'vpcId',
'transform_func': str
},
'description': {
'xpath': 'groupDescription',
'transform_func': str
},
'owner_id': {
'xpath': 'ownerId',
'transform_func': str
}
},
'snapshot': {
'volume_id': {
'xpath': 'volumeId',
'transform_func': str
},
'state': {
'xpath': 'status',
'transform_func': str
},
'description': {
'xpath': 'description',
'transform_func': str
},
'progress': {
'xpath': 'progress',
'transform_func': str
},
'start_time': {
'xpath': 'startTime',
'transform_func': parse_date
}
},
'subnet': {
'cidr_block': {
'xpath': 'cidrBlock',
'transform_func': str
},
'available_ips': {
'xpath': 'availableIpAddressCount',
'transform_func': int
},
'zone': {
'xpath': 'availabilityZone',
'transform_func': str
},
'vpc_id': {
'xpath': 'vpcId',
'transform_func': str
}
},
'volume': {
'device': {
'xpath': 'attachmentSet/item/device',
'transform_func': str
},
'snapshot_id': {
'xpath': 'snapshotId',
'transform_func': lambda v: str(v) or None
},
'iops': {
'xpath': 'iops',
'transform_func': int
},
'zone': {
'xpath': 'availabilityZone',
'transform_func': str
},
'create_time': {
'xpath': 'createTime',
'transform_func': parse_date
},
'state': {
'xpath': 'status',
'transform_func': str
},
'attach_time': {
'xpath': 'attachmentSet/item/attachTime',
'transform_func': parse_date
},
'attachment_status': {
'xpath': 'attachmentSet/item/status',
'transform_func': str
},
'instance_id': {
'xpath': 'attachmentSet/item/instanceId',
'transform_func': str
},
'delete': {
'xpath': 'attachmentSet/item/deleteOnTermination',
'transform_func': str
}
},
'route_table': {
'vpc_id': {
'xpath': 'vpcId',
'transform_func': str
}
}
}
VALID_EC2_REGIONS = REGION_DETAILS.keys()
VALID_EC2_REGIONS = [r for r in VALID_EC2_REGIONS if r != 'nimbus']
class EC2NodeLocation(NodeLocation):
def __init__(self, id, name, country, driver, availability_zone):
super(EC2NodeLocation, self).__init__(id, name, country, driver)
self.availability_zone = availability_zone
def __repr__(self):
return (('<EC2NodeLocation: id=%s, name=%s, country=%s, '
'availability_zone=%s driver=%s>')
% (self.id, self.name, self.country,
self.availability_zone, self.driver.name))
class EC2Response(AWSBaseResponse):
"""
EC2 specific response parsing and error handling.
"""
def parse_error(self):
err_list = []
# Okay, so for Eucalyptus, you can get a 403, with no body,
# if you are using the wrong user/password.
msg = "Failure: 403 Forbidden"
if self.status == 403 and self.body[:len(msg)] == msg:
raise InvalidCredsError(msg)
try:
body = ET.XML(self.body)
except:
raise MalformedResponseError("Failed to parse XML",
body=self.body, driver=EC2NodeDriver)
for err in body.findall('Errors/Error'):
code, message = err.getchildren()
err_list.append('%s: %s' % (code.text, message.text))
if code.text == 'InvalidClientTokenId':
raise InvalidCredsError(err_list[-1])
if code.text == 'SignatureDoesNotMatch':
raise InvalidCredsError(err_list[-1])
if code.text == 'AuthFailure':
raise InvalidCredsError(err_list[-1])
if code.text == 'OptInRequired':
raise InvalidCredsError(err_list[-1])
if code.text == 'IdempotentParameterMismatch':
raise IdempotentParamError(err_list[-1])
if code.text == 'InvalidKeyPair.NotFound':
# TODO: Use connection context instead
match = re.match(r'.*\'(.+?)\'.*', message.text)
if match:
name = match.groups()[0]
else:
name = None
raise KeyPairDoesNotExistError(name=name,
driver=self.connection.driver)
return '\n'.join(err_list)
class EC2Connection(SignedAWSConnection):
"""
Represents a single connection to the EC2 Endpoint.
"""
version = API_VERSION
host = REGION_DETAILS['us-east-1']['endpoint']
responseCls = EC2Response
service_name = 'ec2'
class ExEC2AvailabilityZone(object):
"""
Extension class which stores information about an EC2 availability zone.
Note: This class is EC2 specific.
"""
def __init__(self, name, zone_state, region_name):
self.name = name
self.zone_state = zone_state
self.region_name = region_name
def __repr__(self):
return (('<ExEC2AvailabilityZone: name=%s, zone_state=%s, '
'region_name=%s>')
% (self.name, self.zone_state, self.region_name))
class EC2ReservedNode(Node):
"""
Class which stores information about EC2 reserved instances/nodes
Inherits from Node and passes in None for name and private/public IPs
Note: This class is EC2 specific.
"""
def __init__(self, id, state, driver, size=None, image=None, extra=None):
super(EC2ReservedNode, self).__init__(id=id, name=None, state=state,
public_ips=None,
private_ips=None,
driver=driver, extra=extra)
def __repr__(self):
return (('<EC2ReservedNode: id=%s>') % (self.id))
class EC2SecurityGroup(object):
"""
Represents information about a Security group
Note: This class is EC2 specific.
"""
def __init__(self, id, name, ingress_rules, egress_rules, extra=None):
self.id = id
self.name = name
self.ingress_rules = ingress_rules
self.egress_rules = egress_rules
self.extra = extra or {}
def __repr__(self):
return (('<EC2SecurityGroup: id=%s, name=%s')
% (self.id, self.name))
class EC2PlacementGroup(object):
"""
Represents information about a Placement Grous
Note: This class is EC2 specific.
"""
def __init__(self, name, state, strategy='cluster', extra=None):
self.name = name
self.strategy = strategy
self.extra = extra or {}
def __repr__(self):
return '<EC2PlacementGroup: name=%s, state=%s>' % (self.name,
self.strategy)
class EC2Network(object):
"""
Represents information about a VPC (Virtual Private Cloud) network
Note: This class is EC2 specific.
"""
def __init__(self, id, name, cidr_block, extra=None):
self.id = id
self.name = name
self.cidr_block = cidr_block
self.extra = extra or {}
def __repr__(self):
return (('<EC2Network: id=%s, name=%s')
% (self.id, self.name))
class EC2NetworkSubnet(object):
"""
Represents information about a VPC (Virtual Private Cloud) subnet
Note: This class is EC2 specific.
"""
def __init__(self, id, name, state, extra=None):
self.id = id
self.name = name
self.state = state
self.extra = extra or {}
def __repr__(self):
return (('<EC2NetworkSubnet: id=%s, name=%s') % (self.id, self.name))
class EC2NetworkInterface(object):
"""
Represents information about a VPC network interface
Note: This class is EC2 specific. The state parameter denotes the current
status of the interface. Valid values for state are attaching, attached,
detaching and detached.
"""
def __init__(self, id, name, state, extra=None):
self.id = id
self.name = name
self.state = state
self.extra = extra or {}
def __repr__(self):
return (('<EC2NetworkInterface: id=%s, name=%s')
% (self.id, self.name))
class ElasticIP(object):
"""
Represents information about an elastic IP address
:param ip: The elastic IP address
:type ip: ``str``
:param domain: The domain that the IP resides in (EC2-Classic/VPC).
EC2 classic is represented with standard and VPC
is represented with vpc.
:type domain: ``str``
:param instance_id: The identifier of the instance which currently
has the IP associated.
:type instance_id: ``str``
Note: This class is used to support both EC2 and VPC IPs.
For VPC specific attributes are stored in the extra
dict to make promotion to the base API easier.
"""
def __init__(self, ip, domain, instance_id, extra=None):
self.ip = ip
self.domain = domain
self.instance_id = instance_id
self.extra = extra or {}
def __repr__(self):
return (('<ElasticIP: ip=%s, domain=%s, instance_id=%s>')
% (self.ip, self.domain, self.instance_id))
class VPCInternetGateway(object):
"""
Class which stores information about VPC Internet Gateways.
Note: This class is VPC specific.
"""
def __init__(self, id, name, vpc_id, state, driver, extra=None):
self.id = id
self.name = name
self.vpc_id = vpc_id
self.state = state
self.extra = extra or {}
def __repr__(self):
return (('<VPCInternetGateway: id=%s>') % (self.id))
class EC2RouteTable(object):
"""
Class which stores information about VPC Route Tables.
Note: This class is VPC specific.
"""
def __init__(self, id, name, routes, subnet_associations,
propagating_gateway_ids, extra=None):
"""
:param id: The ID of the route table.
:type id: ``str``
:param name: The name of the route table.
:type name: ``str``
:param routes: A list of routes in the route table.
:type routes: ``list`` of :class:`EC2Route`
:param subnet_associations: A list of associations between the
route table and one or more subnets.
:type subnet_associations: ``list`` of
:class:`EC2SubnetAssociation`
:param propagating_gateway_ids: The list of IDs of any virtual
private gateways propagating the
routes.
:type propagating_gateway_ids: ``list``
"""
self.id = id
self.name = name
self.routes = routes
self.subnet_associations = subnet_associations
self.propagating_gateway_ids = propagating_gateway_ids
self.extra = extra or {}
def __repr__(self):
return (('<EC2RouteTable: id=%s>') % (self.id))
class EC2Route(object):
"""
Class which stores information about a Route.
Note: This class is VPC specific.
"""
def __init__(self, cidr, gateway_id, instance_id, owner_id,
interface_id, state, origin, vpc_peering_connection_id):
"""
:param cidr: The CIDR block used for the destination match.
:type cidr: ``str``
:param gateway_id: The ID of a gateway attached to the VPC.
:type gateway_id: ``str``
:param instance_id: The ID of a NAT instance in the VPC.
:type instance_id: ``str``
:param owner_id: The AWS account ID of the owner of the instance.
:type owner_id: ``str``
:param interface_id: The ID of the network interface.
:type interface_id: ``str``
:param state: The state of the route (active | blackhole).
:type state: ``str``
:param origin: Describes how the route was created.
:type origin: ``str``
:param vpc_peering_connection_id: The ID of the VPC
peering connection.
:type vpc_peering_connection_id: ``str``
"""
self.cidr = cidr
self.gateway_id = gateway_id
self.instance_id = instance_id
self.owner_id = owner_id
self.interface_id = interface_id
self.state = state
self.origin = origin
self.vpc_peering_connection_id = vpc_peering_connection_id
def __repr__(self):
return (('<EC2Route: cidr=%s>') % (self.cidr))
class EC2SubnetAssociation(object):
"""
Class which stores information about Route Table associated with
a given Subnet in a VPC
Note: This class is VPC specific.
"""
def __init__(self, id, route_table_id, subnet_id, main=False):
"""
:param id: The ID of the subnet association in the VPC.
:type id: ``str``
:param route_table_id: The ID of a route table in the VPC.
:type route_table_id: ``str``
:param subnet_id: The ID of a subnet in the VPC.
:type subnet_id: ``str``
:param main: If true, means this is a main VPC route table.
:type main: ``bool``
"""
self.id = id
self.route_table_id = route_table_id
self.subnet_id = subnet_id
self.main = main
def __repr__(self):
return (('<EC2SubnetAssociation: id=%s>') % (self.id))
class BaseEC2NodeDriver(NodeDriver):
"""
Base Amazon EC2 node driver.
Used for main EC2 and other derivate driver classes to inherit from it.
"""
connectionCls = EC2Connection
features = {'create_node': ['ssh_key']}
path = '/'
signature_version = DEFAULT_SIGNATURE_VERSION
NODE_STATE_MAP = {
'pending': NodeState.PENDING,
'running': NodeState.RUNNING,
'shutting-down': NodeState.UNKNOWN,
'terminated': NodeState.TERMINATED
}
# http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_Volume.html
VOLUME_STATE_MAP = {
'available': StorageVolumeState.AVAILABLE,
'in-use': StorageVolumeState.INUSE,
'error': StorageVolumeState.ERROR,
'creating': StorageVolumeState.CREATING,
'deleting': StorageVolumeState.DELETING,
'deleted': StorageVolumeState.DELETED,
'error_deleting': StorageVolumeState.ERROR
}
def list_nodes(self, ex_node_ids=None, ex_filters=None):
"""
List all nodes
Ex_node_ids parameter is used to filter the list of
nodes that should be returned. Only the nodes
with the corresponding node ids will be returned.
:param ex_node_ids: List of ``node.id``
:type ex_node_ids: ``list`` of ``str``
:param ex_filters: The filters so that the response includes
information for only certain nodes.
:type ex_filters: ``dict``
:rtype: ``list`` of :class:`Node`
"""
params = {'Action': 'DescribeInstances'}
if ex_node_ids:
params.update(self._pathlist('InstanceId', ex_node_ids))
if ex_filters:
params.update(self._build_filters(ex_filters))
elem = self.connection.request(self.path, params=params).object
nodes = []
for rs in findall(element=elem, xpath='reservationSet/item',
namespace=NAMESPACE):
nodes += self._to_nodes(rs, 'instancesSet/item')
nodes_elastic_ips_mappings = self.ex_describe_addresses(nodes)
for node in nodes:
ips = nodes_elastic_ips_mappings[node.id]
node.public_ips.extend(ips)
return nodes
def list_sizes(self, location=None):
available_types = REGION_DETAILS[self.region_name]['instance_types']
sizes = []
for instance_type in available_types:
attributes = INSTANCE_TYPES[instance_type]
attributes = copy.deepcopy(attributes)
price = self._get_size_price(size_id=instance_type)
attributes.update({'price': price})
sizes.append(NodeSize(driver=self, **attributes))
return sizes
def list_images(self, location=None, ex_image_ids=None, ex_owner=None,
ex_executableby=None, ex_filters=None):
"""
List all images
@inherits: :class:`NodeDriver.list_images`
Ex_image_ids parameter is used to filter the list of
images that should be returned. Only the images
with the corresponding image ids will be returned.
Ex_owner parameter is used to filter the list of
images that should be returned. Only the images
with the corresponding owner will be returned.
Valid values: amazon|aws-marketplace|self|all|aws id
Ex_executableby parameter describes images for which
the specified user has explicit launch permissions.
The user can be an AWS account ID, self to return
images for which the sender of the request has
explicit launch permissions, or all to return
images with public launch permissions.
Valid values: all|self|aws id
Ex_filters parameter is used to filter the list of
images that should be returned. Only images matchind
the filter will be returned.
:param ex_image_ids: List of ``NodeImage.id``
:type ex_image_ids: ``list`` of ``str``
:param ex_owner: Owner name
:type ex_owner: ``str``
:param ex_executableby: Executable by
:type ex_executableby: ``str``
:param ex_filters: Filter by
:type ex_filters: ``dict``
:rtype: ``list`` of :class:`NodeImage`
"""
params = {'Action': 'DescribeImages'}
if ex_owner:
params.update({'Owner.1': ex_owner})
if ex_executableby:
params.update({'ExecutableBy.1': ex_executableby})
if ex_image_ids:
for index, image_id in enumerate(ex_image_ids):
index += 1
params.update({'ImageId.%s' % (index): image_id})
if ex_filters:
params.update(self._build_filters(ex_filters))
images = self._to_images(
self.connection.request(self.path, params=params).object
)
return images
def get_image(self, image_id):
"""
Get an image based on an image_id
:param image_id: Image identifier
:type image_id: ``str``
:return: A NodeImage object
:rtype: :class:`NodeImage`
"""
images = self.list_images(ex_image_ids=[image_id])
image = images[0]
return image
def list_locations(self):
locations = []
for index, availability_zone in \
enumerate(self.ex_list_availability_zones()):
locations.append(EC2NodeLocation(
index, availability_zone.name, self.country, self,
availability_zone)
)
return locations
def list_volumes(self, node=None):
params = {
'Action': 'DescribeVolumes',
}
if node:
filters = {'attachment.instance-id': node.id}
params.update(self._build_filters(filters))
response = self.connection.request(self.path, params=params).object
volumes = [self._to_volume(el) for el in response.findall(
fixxpath(xpath='volumeSet/item', namespace=NAMESPACE))
]
return volumes
def create_node(self, **kwargs):
"""
Create a new EC2 node.
Reference: http://bit.ly/8ZyPSy [docs.amazonwebservices.com]
@inherits: :class:`NodeDriver.create_node`
:keyword ex_keyname: The name of the key pair
:type ex_keyname: ``str``
:keyword ex_userdata: User data
:type ex_userdata: ``str``
:keyword ex_security_groups: A list of names of security groups to
assign to the node.
:type ex_security_groups: ``list``
:keyword ex_security_group_ids: A list of ids of security groups to
assign to the node.[for VPC nodes only]
:type ex_security_group_ids: ``list``
:keyword ex_metadata: Key/Value metadata to associate with a node
:type ex_metadata: ``dict``
:keyword ex_mincount: Minimum number of instances to launch
:type ex_mincount: ``int``
:keyword ex_maxcount: Maximum number of instances to launch
:type ex_maxcount: ``int``
:keyword ex_clienttoken: Unique identifier to ensure idempotency
:type ex_clienttoken: ``str``
:keyword ex_blockdevicemappings: ``list`` of ``dict`` block device
mappings.
:type ex_blockdevicemappings: ``list`` of ``dict``
:keyword ex_iamprofile: Name or ARN of IAM profile
:type ex_iamprofile: ``str``
:keyword ex_ebs_optimized: EBS-Optimized if True
:type ex_ebs_optimized: ``bool``
:keyword ex_subnet: The subnet to launch the instance into.
:type ex_subnet: :class:`.EC2Subnet`
:keyword ex_placement_group: The name of the placement group to
launch the instance into.
:type ex_placement_group: ``str``
"""
image = kwargs["image"]
size = kwargs["size"]
params = {
'Action': 'RunInstances',
'ImageId': image.id,
'MinCount': str(kwargs.get('ex_mincount', '1')),
'MaxCount': str(kwargs.get('ex_maxcount', '1')),
'InstanceType': size.id
}
if 'ex_security_groups' in kwargs and 'ex_securitygroup' in kwargs:
raise ValueError('You can only supply ex_security_groups or'
' ex_securitygroup')
# ex_securitygroup is here for backward compatibility
ex_security_groups = kwargs.get('ex_security_groups', None)
ex_securitygroup = kwargs.get('ex_securitygroup', None)
security_groups = ex_security_groups or ex_securitygroup
if security_groups:
if not isinstance(security_groups, (tuple, list)):
security_groups = [security_groups]
for sig in range(len(security_groups)):
params['SecurityGroup.%d' % (sig + 1,)] =\
security_groups[sig]
if 'ex_security_group_ids' in kwargs and 'ex_subnet' not in kwargs:
raise ValueError('You can only supply ex_security_group_ids'
' combinated with ex_subnet')
security_group_ids = kwargs.get('ex_security_group_ids', None)
if security_group_ids:
if not isinstance(security_group_ids, (tuple, list)):
security_group_ids = [security_group_ids]
for sig in range(len(security_group_ids)):
params['SecurityGroupId.%d' % (sig + 1,)] =\
security_group_ids[sig]
if 'location' in kwargs:
availability_zone = getattr(kwargs['location'],
'availability_zone', None)
if availability_zone:
if availability_zone.region_name != self.region_name:
raise AttributeError('Invalid availability zone: %s'
% (availability_zone.name))
params['Placement.AvailabilityZone'] = availability_zone.name
if 'auth' in kwargs and 'ex_keyname' in kwargs:
raise AttributeError('Cannot specify auth and ex_keyname together')
if 'auth' in kwargs:
auth = self._get_and_check_auth(kwargs['auth'])
key = self.ex_find_or_import_keypair_by_key_material(auth.pubkey)
params['KeyName'] = key['keyName']
if 'ex_keyname' in kwargs:
params['KeyName'] = kwargs['ex_keyname']
if 'ex_userdata' in kwargs:
params['UserData'] = base64.b64encode(b(kwargs['ex_userdata']))\
.decode('utf-8')
if 'ex_clienttoken' in kwargs:
params['ClientToken'] = kwargs['ex_clienttoken']
if 'ex_blockdevicemappings' in kwargs:
params.update(self._get_block_device_mapping_params(
kwargs['ex_blockdevicemappings']))
if 'ex_iamprofile' in kwargs:
if not isinstance(kwargs['ex_iamprofile'], basestring):
raise AttributeError('ex_iamprofile not string')
if kwargs['ex_iamprofile'].startswith('arn:aws:iam:'):
params['IamInstanceProfile.Arn'] = kwargs['ex_iamprofile']
else:
params['IamInstanceProfile.Name'] = kwargs['ex_iamprofile']
if 'ex_ebs_optimized' in kwargs:
params['EbsOptimized'] = kwargs['ex_ebs_optimized']
if 'ex_subnet' in kwargs:
params['SubnetId'] = kwargs['ex_subnet'].id
if 'ex_placement_group' in kwargs and kwargs['ex_placement_group']:
params['Placement.GroupName'] = kwargs['ex_placement_group']
object = self.connection.request(self.path, params=params).object
nodes = self._to_nodes(object, 'instancesSet/item')
for node in nodes:
tags = {'Name': kwargs['name']}
if 'ex_metadata' in kwargs:
tags.update(kwargs['ex_metadata'])
try:
self.ex_create_tags(resource=node, tags=tags)
except Exception:
continue
node.name = kwargs['name']
node.extra.update({'tags': tags})
if len(nodes) == 1:
return nodes[0]
else:
return nodes
def reboot_node(self, node):
params = {'Action': 'RebootInstances'}
params.update(self._pathlist('InstanceId', [node.id]))
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def destroy_node(self, node):
params = {'Action': 'TerminateInstances'}
params.update(self._pathlist('InstanceId', [node.id]))
res = self.connection.request(self.path, params=params).object
return self._get_terminate_boolean(res)
def create_volume(self, size, name, location=None, snapshot=None,
ex_volume_type='standard', ex_iops=None):
"""
Create a new volume.
:param size: Size of volume in gigabytes (required)
:type size: ``int``
:param name: Name of the volume to be created
:type name: ``str``
:param location: Which data center to create a volume in. If
empty, undefined behavior will be selected.
(optional)
:type location: :class:`.NodeLocation`
:param snapshot: Snapshot from which to create the new
volume. (optional)
:type snapshot: :class:`.VolumeSnapshot`
:param location: Datacenter in which to create a volume in.
:type location: :class:`.ExEC2AvailabilityZone`
:param ex_volume_type: Type of volume to create.
:type ex_volume_type: ``str``
:param iops: The number of I/O operations per second (IOPS)
that the volume supports. Only used if ex_volume_type
is io1.
:type iops: ``int``
:return: The newly created volume.
:rtype: :class:`StorageVolume`
"""
valid_volume_types = ['standard', 'io1', 'gp2']
params = {
'Action': 'CreateVolume',
'Size': str(size)}
if ex_volume_type and ex_volume_type not in valid_volume_types:
raise ValueError('Invalid volume type specified: %s' %
(ex_volume_type))
if snapshot:
params['SnapshotId'] = snapshot.id
if location is not None:
params['AvailabilityZone'] = location.availability_zone.name
if ex_volume_type:
params['VolumeType'] = ex_volume_type
if ex_volume_type == 'io1' and ex_iops:
params['Iops'] = ex_iops
volume = self._to_volume(
self.connection.request(self.path, params=params).object,
name=name)
if self.ex_create_tags(volume, {'Name': name}):
volume.extra['tags']['Name'] = name
return volume
def attach_volume(self, node, volume, device):
params = {
'Action': 'AttachVolume',
'VolumeId': volume.id,
'InstanceId': node.id,
'Device': device}
self.connection.request(self.path, params=params)
return True
def detach_volume(self, volume):
params = {
'Action': 'DetachVolume',
'VolumeId': volume.id}
self.connection.request(self.path, params=params)
return True
def destroy_volume(self, volume):
params = {
'Action': 'DeleteVolume',
'VolumeId': volume.id}
response = self.connection.request(self.path, params=params).object
return self._get_boolean(response)
def create_volume_snapshot(self, volume, name=None):
"""
Create snapshot from volume
:param volume: Instance of ``StorageVolume``
:type volume: ``StorageVolume``
:param name: Name of snapshot (optional)
:type name: ``str``
:rtype: :class:`VolumeSnapshot`
"""
params = {
'Action': 'CreateSnapshot',
'VolumeId': volume.id,
}
if name:
params.update({
'Description': name,
})
response = self.connection.request(self.path, params=params).object
snapshot = self._to_snapshot(response, name)
if name and self.ex_create_tags(snapshot, {'Name': name}):
snapshot.extra['tags']['Name'] = name
return snapshot
def list_volume_snapshots(self, volume):
return [snapshot for snapshot in self.list_snapshots(owner='self')
if snapshot.extra["volume_id"] == volume.id]
def list_snapshots(self, snapshot=None, owner=None):
"""
Describe all snapshots.
:param snapshot: If provided, only return snapshot information for the
provided snapshot.
:param owner: Owner for snapshot: self|amazon|ID
:type owner: ``str``
:rtype: ``list`` of :class:`VolumeSnapshot`
"""
params = {
'Action': 'DescribeSnapshots',
}
if snapshot:
params.update({
'SnapshotId.1': snapshot.id,
})
if owner:
params.update({
'Owner.1': owner,
})
response = self.connection.request(self.path, params=params).object
snapshots = self._to_snapshots(response)
return snapshots
def destroy_volume_snapshot(self, snapshot):
params = {
'Action': 'DeleteSnapshot',
'SnapshotId': snapshot.id
}
response = self.connection.request(self.path, params=params).object
return self._get_boolean(response)
# Key pair management methods
def list_key_pairs(self):
params = {
'Action': 'DescribeKeyPairs'
}
response = self.connection.request(self.path, params=params)
elems = findall(element=response.object, xpath='keySet/item',
namespace=NAMESPACE)
key_pairs = self._to_key_pairs(elems=elems)
return key_pairs
def get_key_pair(self, name):
params = {
'Action': 'DescribeKeyPairs',
'KeyName': name
}
response = self.connection.request(self.path, params=params)
elems = findall(element=response.object, xpath='keySet/item',
namespace=NAMESPACE)
key_pair = self._to_key_pairs(elems=elems)[0]
return key_pair
def create_key_pair(self, name):
params = {
'Action': 'CreateKeyPair',
'KeyName': name
}
response = self.connection.request(self.path, params=params)
elem = response.object
key_pair = self._to_key_pair(elem=elem)
return key_pair
def import_key_pair_from_string(self, name, key_material):
base64key = ensure_string(base64.b64encode(b(key_material)))
params = {
'Action': 'ImportKeyPair',
'KeyName': name,
'PublicKeyMaterial': base64key
}
response = self.connection.request(self.path, params=params)
elem = response.object
key_pair = self._to_key_pair(elem=elem)
return key_pair
def delete_key_pair(self, key_pair):
params = {
'Action': 'DeleteKeyPair',
'KeyName': key_pair.name
}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def copy_image(self, image, source_region, name=None, description=None):
"""
Copy an Amazon Machine Image from the specified source region
to the current region.
@inherits: :class:`NodeDriver.copy_image`
:param source_region: The region where the image resides
:type source_region: ``str``
:param image: Instance of class NodeImage
:type image: :class:`NodeImage`
:param name: The name of the new image
:type name: ``str``
:param description: The description of the new image
:type description: ``str``
:return: Instance of class ``NodeImage``
:rtype: :class:`NodeImage`
"""
params = {'Action': 'CopyImage',
'SourceRegion': source_region,
'SourceImageId': image.id}
if name is not None:
params['Name'] = name
if description is not None:
params['Description'] = description
image = self._to_image(
self.connection.request(self.path, params=params).object)
return image
def create_image(self, node, name, description=None, reboot=False,
block_device_mapping=None):
"""
Create an Amazon Machine Image based off of an EBS-backed instance.
@inherits: :class:`NodeDriver.create_image`
:param node: Instance of ``Node``
:type node: :class: `Node`
:param name: The name for the new image
:type name: ``str``
:param block_device_mapping: A dictionary of the disk layout
An example of this dict is included
below.
:type block_device_mapping: ``list`` of ``dict``
:param reboot: Whether or not to shutdown the instance before
creation. Amazon calls this NoReboot and
sets it to false by default to ensure a
clean image.
:type reboot: ``bool``
:param description: An optional description for the new image
:type description: ``str``
An example block device mapping dictionary is included:
mapping = [{'VirtualName': None,
'Ebs': {'VolumeSize': 10,
'VolumeType': 'standard',
'DeleteOnTermination': 'true'},
'DeviceName': '/dev/sda1'}]
:return: Instance of class ``NodeImage``
:rtype: :class:`NodeImage`
"""
params = {'Action': 'CreateImage',
'InstanceId': node.id,
'Name': name,
'NoReboot': not reboot}
if description is not None:
params['Description'] = description
if block_device_mapping is not None:
params.update(self._get_block_device_mapping_params(
block_device_mapping))
image = self._to_image(
self.connection.request(self.path, params=params).object)
return image
def delete_image(self, image):
"""
Deletes an image at Amazon given a NodeImage object
@inherits: :class:`NodeDriver.delete_image`
:param image: Instance of ``NodeImage``
:type image: :class: `NodeImage`
:rtype: ``bool``
"""
params = {'Action': 'DeregisterImage',
'ImageId': image.id}
response = self.connection.request(self.path, params=params).object
return self._get_boolean(response)
def ex_create_placement_group(self, name):
"""
Creates new Placement Group
:param name: Name for new placement Group
:type name: ``str``
:rtype: ``bool``
"""
params = {'Action': 'CreatePlacementGroup',
'Strategy': 'cluster',
'GroupName': name}
response = self.connection.request(self.path, params=params).object
return self._get_boolean(response)
def ex_delete_placement_group(self, name):
"""
Deletes Placement Group
:param name: Placement Group name
:type name: ``str``
:rtype: ``bool``
"""
params = {'Action': 'DeletePlacementGroup',
'GroupName': name}
response = self.connection.request(self.path, params=params).object
return self._get_boolean(response)
def ex_list_placement_groups(self, names=None):
"""
List Placement Groups
:param names: Placement Group names
:type names: ``list`` of ``str``
:rtype: ``list`` of :class:`.EC2PlacementGroup`
"""
names = names or []
params = {'Action': 'DescribePlacementGroups'}
for index, name in enumerate(names):
params['GroupName.%s' % index + 1] = name
response = self.connection.request(self.path, params=params).object
return self._to_placement_groups(response)
def ex_register_image(self, name, description=None, architecture=None,
image_location=None, root_device_name=None,
block_device_mapping=None, kernel_id=None,
ramdisk_id=None, virtualization_type=None):
"""
Registers an Amazon Machine Image based off of an EBS-backed instance.
Can also be used to create images from snapshots. More information
can be found at http://goo.gl/hqZq0a.
:param name: The name for the AMI being registered
:type name: ``str``
:param description: The description of the AMI (optional)
:type description: ``str``
:param architecture: The architecture of the AMI (i386/x86_64)
(optional)
:type architecture: ``str``
:param image_location: The location of the AMI within Amazon S3
Required if registering an instance
store-backed AMI
:type image_location: ``str``
:param root_device_name: The device name for the root device
Required if registering an EBS-backed AMI
:type root_device_name: ``str``
:param block_device_mapping: A dictionary of the disk layout
(optional)
:type block_device_mapping: ``dict``
:param kernel_id: Kernel id for AMI (optional)
:type kernel_id: ``str``
:param ramdisk_id: RAM disk for AMI (optional)
:type ramdisk_id: ``str``
:param virtualization_type: The type of virtualization for the
AMI you are registering, paravirt
or hvm (optional)
:type virtualization_type: ``str``
:rtype: :class:`NodeImage`
"""
params = {'Action': 'RegisterImage',
'Name': name}
if description is not None:
params['Description'] = description
if architecture is not None:
params['Architecture'] = architecture
if image_location is not None:
params['ImageLocation'] = image_location
if root_device_name is not None:
params['RootDeviceName'] = root_device_name
if block_device_mapping is not None:
params.update(self._get_block_device_mapping_params(
block_device_mapping))
if kernel_id is not None:
params['KernelId'] = kernel_id
if ramdisk_id is not None:
params['RamDiskId'] = ramdisk_id
if virtualization_type is not None:
params['VirtualizationType'] = virtualization_type
image = self._to_image(
self.connection.request(self.path, params=params).object
)
return image
def ex_list_networks(self, network_ids=None, filters=None):
"""
Return a list of :class:`EC2Network` objects for the
current region.
:param network_ids: Return only networks matching the provided
network IDs. If not specified, a list of all
the networks in the corresponding region
is returned.
:type network_ids: ``list``
:param filters: The filters so that the response includes
information for only certain networks.
:type filters: ``dict``
:rtype: ``list`` of :class:`EC2Network`
"""
params = {'Action': 'DescribeVpcs'}
if network_ids:
params.update(self._pathlist('VpcId', network_ids))
if filters:
params.update(self._build_filters(filters))
return self._to_networks(
self.connection.request(self.path, params=params).object
)
def ex_create_network(self, cidr_block, name=None,
instance_tenancy='default'):
"""
Create a network/VPC
:param cidr_block: The CIDR block assigned to the network
:type cidr_block: ``str``
:param name: An optional name for the network
:type name: ``str``
:param instance_tenancy: The allowed tenancy of instances launched
into the VPC.
Valid values: default/dedicated
:type instance_tenancy: ``str``
:return: Dictionary of network properties
:rtype: ``dict``
"""
params = {'Action': 'CreateVpc',
'CidrBlock': cidr_block,
'InstanceTenancy': instance_tenancy}
response = self.connection.request(self.path, params=params).object
element = response.findall(fixxpath(xpath='vpc',
namespace=NAMESPACE))[0]
network = self._to_network(element, name)
if name and self.ex_create_tags(network, {'Name': name}):
network.extra['tags']['Name'] = name
return network
def ex_delete_network(self, vpc):
"""
Deletes a network/VPC.
:param vpc: VPC to delete.
:type vpc: :class:`.EC2Network`
:rtype: ``bool``
"""
params = {'Action': 'DeleteVpc', 'VpcId': vpc.id}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_list_subnets(self, subnet_ids=None, filters=None):
"""
Return a list of :class:`EC2NetworkSubnet` objects for the
current region.
:param subnet_ids: Return only subnets matching the provided
subnet IDs. If not specified, a list of all
the subnets in the corresponding region
is returned.
:type subnet_ids: ``list``
:param filters: The filters so that the response includes
information for only certain subnets.
:type filters: ``dict``
:rtype: ``list`` of :class:`EC2NetworkSubnet`
"""
params = {'Action': 'DescribeSubnets'}
if subnet_ids:
params.update(self._pathlist('SubnetId', subnet_ids))
if filters:
params.update(self._build_filters(filters))
return self._to_subnets(
self.connection.request(self.path, params=params).object
)
def ex_create_subnet(self, vpc_id, cidr_block,
availability_zone, name=None):
"""
Create a network subnet within a VPC
:param vpc_id: The ID of the VPC that the subnet should be
associated with
:type vpc_id: ``str``
:param cidr_block: The CIDR block assigned to the subnet
:type cidr_block: ``str``
:param availability_zone: The availability zone where the subnet
should reside
:type availability_zone: ``str``
:param name: An optional name for the network
:type name: ``str``
:rtype: :class: `EC2NetworkSubnet`
"""
params = {'Action': 'CreateSubnet',
'VpcId': vpc_id,
'CidrBlock': cidr_block,
'AvailabilityZone': availability_zone}
response = self.connection.request(self.path, params=params).object
element = response.findall(fixxpath(xpath='subnet',
namespace=NAMESPACE))[0]
subnet = self._to_subnet(element, name)
if name and self.ex_create_tags(subnet, {'Name': name}):
subnet.extra['tags']['Name'] = name
return subnet
def ex_delete_subnet(self, subnet):
"""
Deletes a VPC subnet.
:param subnet: The subnet to delete
:type subnet: :class:`.EC2NetworkSubnet`
:rtype: ``bool``
"""
params = {'Action': 'DeleteSubnet', 'SubnetId': subnet.id}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_list_security_groups(self):
"""
List existing Security Groups.
@note: This is a non-standard extension API, and only works for EC2.
:rtype: ``list`` of ``str``
"""
params = {'Action': 'DescribeSecurityGroups'}
response = self.connection.request(self.path, params=params).object
groups = []
for group in findall(element=response, xpath='securityGroupInfo/item',
namespace=NAMESPACE):
name = findtext(element=group, xpath='groupName',
namespace=NAMESPACE)
groups.append(name)
return groups
def ex_get_security_groups(self, group_ids=None,
group_names=None, filters=None):
"""
Return a list of :class:`EC2SecurityGroup` objects for the
current region.
:param group_ids: Return only groups matching the provided
group IDs.
:type group_ids: ``list``
:param group_names: Return only groups matching the provided
group names.
:type group_ids: ``list``
:param filters: The filters so that the response includes
information for only specific security groups.
:type filters: ``dict``
:rtype: ``list`` of :class:`EC2SecurityGroup`
"""
params = {'Action': 'DescribeSecurityGroups'}
if group_ids:
params.update(self._pathlist('GroupId', group_ids))
if group_names:
for name_idx, group_name in enumerate(group_names):
name_idx += 1 # We want 1-based indexes
name_key = 'GroupName.%s' % (name_idx)
params[name_key] = group_name
if filters:
params.update(self._build_filters(filters))
response = self.connection.request(self.path, params=params)
return self._to_security_groups(response.object)
def ex_create_security_group(self, name, description, vpc_id=None):
"""
Creates a new Security Group in EC2-Classic or a targeted VPC.
:param name: The name of the security group to Create.
This must be unique.
:type name: ``str``
:param description: Human readable description of a Security
Group.
:type description: ``str``
:param vpc_id: Optional identifier for VPC networks
:type vpc_id: ``str``
:rtype: ``dict``
"""
params = {'Action': 'CreateSecurityGroup',
'GroupName': name,
'GroupDescription': description}
if vpc_id is not None:
params['VpcId'] = vpc_id
response = self.connection.request(self.path, params=params).object
group_id = findattr(element=response, xpath='groupId',
namespace=NAMESPACE)
return {
'group_id': group_id
}
def ex_delete_security_group_by_id(self, group_id):
"""
Deletes a new Security Group using the group id.
:param group_id: The ID of the security group
:type group_id: ``str``
:rtype: ``bool``
"""
params = {'Action': 'DeleteSecurityGroup', 'GroupId': group_id}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_delete_security_group_by_name(self, group_name):
"""
Deletes a new Security Group using the group name.
:param group_name: The name of the security group
:type group_name: ``str``
:rtype: ``bool``
"""
params = {'Action': 'DeleteSecurityGroup', 'GroupName': group_name}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_delete_security_group(self, name):
"""
Wrapper method which calls ex_delete_security_group_by_name.
:param name: The name of the security group
:type name: ``str``
:rtype: ``bool``
"""
return self.ex_delete_security_group_by_name(name)
def ex_authorize_security_group(self, name, from_port, to_port, cidr_ip,
protocol='tcp'):
"""
Edit a Security Group to allow specific traffic.
@note: This is a non-standard extension API, and only works for EC2.
:param name: The name of the security group to edit
:type name: ``str``
:param from_port: The beginning of the port range to open
:type from_port: ``str``
:param to_port: The end of the port range to open
:type to_port: ``str``
:param cidr_ip: The ip to allow traffic for.
:type cidr_ip: ``str``
:param protocol: tcp/udp/icmp
:type protocol: ``str``
:rtype: ``bool``
"""
params = {'Action': 'AuthorizeSecurityGroupIngress',
'GroupName': name,
'IpProtocol': protocol,
'FromPort': str(from_port),
'ToPort': str(to_port),
'CidrIp': cidr_ip}
try:
res = self.connection.request(
self.path, params=params.copy()).object
return self._get_boolean(res)
except Exception:
e = sys.exc_info()[1]
if e.args[0].find('InvalidPermission.Duplicate') == -1:
raise e
def ex_authorize_security_group_ingress(self, id, from_port, to_port,
cidr_ips=None, group_pairs=None,
protocol='tcp'):
"""
Edit a Security Group to allow specific ingress traffic using
CIDR blocks or either a group ID, group name or user ID (account).
:param id: The id of the security group to edit
:type id: ``str``
:param from_port: The beginning of the port range to open
:type from_port: ``int``
:param to_port: The end of the port range to open
:type to_port: ``int``
:param cidr_ips: The list of ip ranges to allow traffic for.
:type cidr_ips: ``list``
:param group_pairs: Source user/group pairs to allow traffic for.
More info can be found at http://goo.gl/stBHJF
EC2 Classic Example: To allow access from any system
associated with the default group on account 1234567890
[{'group_name': 'default', 'user_id': '1234567890'}]
VPC Example: Allow access from any system associated with
security group sg-47ad482e on your own account
[{'group_id': ' sg-47ad482e'}]
:type group_pairs: ``list`` of ``dict``
:param protocol: tcp/udp/icmp
:type protocol: ``str``
:rtype: ``bool``
"""
params = self._get_common_security_group_params(id,
protocol,
from_port,
to_port,
cidr_ips,
group_pairs)
params["Action"] = 'AuthorizeSecurityGroupIngress'
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_authorize_security_group_egress(self, id, from_port, to_port,
cidr_ips, group_pairs=None,
protocol='tcp'):
"""
Edit a Security Group to allow specific egress traffic using
CIDR blocks or either a group ID, group name or user ID (account).
This call is not supported for EC2 classic and only works for VPC
groups.
:param id: The id of the security group to edit
:type id: ``str``
:param from_port: The beginning of the port range to open
:type from_port: ``int``
:param to_port: The end of the port range to open
:type to_port: ``int``
:param cidr_ips: The list of ip ranges to allow traffic for.
:type cidr_ips: ``list``
:param group_pairs: Source user/group pairs to allow traffic for.
More info can be found at http://goo.gl/stBHJF
EC2 Classic Example: To allow access from any system
associated with the default group on account 1234567890
[{'group_name': 'default', 'user_id': '1234567890'}]
VPC Example: Allow access from any system associated with
security group sg-47ad482e on your own account
[{'group_id': ' sg-47ad482e'}]
:type group_pairs: ``list`` of ``dict``
:param protocol: tcp/udp/icmp
:type protocol: ``str``
:rtype: ``bool``
"""
params = self._get_common_security_group_params(id,
protocol,
from_port,
to_port,
cidr_ips,
group_pairs)
params["Action"] = 'AuthorizeSecurityGroupEgress'
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_revoke_security_group_ingress(self, id, from_port, to_port,
cidr_ips=None, group_pairs=None,
protocol='tcp'):
"""
Edit a Security Group to revoke specific ingress traffic using
CIDR blocks or either a group ID, group name or user ID (account).
:param id: The id of the security group to edit
:type id: ``str``
:param from_port: The beginning of the port range to open
:type from_port: ``int``
:param to_port: The end of the port range to open
:type to_port: ``int``
:param cidr_ips: The list of ip ranges to allow traffic for.
:type cidr_ips: ``list``
:param group_pairs: Source user/group pairs to allow traffic for.
More info can be found at http://goo.gl/stBHJF
EC2 Classic Example: To allow access from any system
associated with the default group on account 1234567890
[{'group_name': 'default', 'user_id': '1234567890'}]
VPC Example: Allow access from any system associated with
security group sg-47ad482e on your own account
[{'group_id': ' sg-47ad482e'}]
:type group_pairs: ``list`` of ``dict``
:param protocol: tcp/udp/icmp
:type protocol: ``str``
:rtype: ``bool``
"""
params = self._get_common_security_group_params(id,
protocol,
from_port,
to_port,
cidr_ips,
group_pairs)
params["Action"] = 'RevokeSecurityGroupIngress'
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_revoke_security_group_egress(self, id, from_port, to_port,
cidr_ips=None, group_pairs=None,
protocol='tcp'):
"""
Edit a Security Group to revoke specific egress traffic using
CIDR blocks or either a group ID, group name or user ID (account).
This call is not supported for EC2 classic and only works for
VPC groups.
:param id: The id of the security group to edit
:type id: ``str``
:param from_port: The beginning of the port range to open
:type from_port: ``int``
:param to_port: The end of the port range to open
:type to_port: ``int``
:param cidr_ips: The list of ip ranges to allow traffic for.
:type cidr_ips: ``list``
:param group_pairs: Source user/group pairs to allow traffic for.
More info can be found at http://goo.gl/stBHJF
EC2 Classic Example: To allow access from any system
associated with the default group on account 1234567890
[{'group_name': 'default', 'user_id': '1234567890'}]
VPC Example: Allow access from any system associated with
security group sg-47ad482e on your own account
[{'group_id': ' sg-47ad482e'}]
:type group_pairs: ``list`` of ``dict``
:param protocol: tcp/udp/icmp
:type protocol: ``str``
:rtype: ``bool``
"""
params = self._get_common_security_group_params(id,
protocol,
from_port,
to_port,
cidr_ips,
group_pairs)
params['Action'] = 'RevokeSecurityGroupEgress'
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_authorize_security_group_permissive(self, name):
"""
Edit a Security Group to allow all traffic.
@note: This is a non-standard extension API, and only works for EC2.
:param name: The name of the security group to edit
:type name: ``str``
:rtype: ``list`` of ``str``
"""
results = []
params = {'Action': 'AuthorizeSecurityGroupIngress',
'GroupName': name,
'IpProtocol': 'tcp',
'FromPort': '0',
'ToPort': '65535',
'CidrIp': '0.0.0.0/0'}
try:
results.append(
self.connection.request(self.path, params=params.copy()).object
)
except Exception:
e = sys.exc_info()[1]
if e.args[0].find("InvalidPermission.Duplicate") == -1:
raise e
params['IpProtocol'] = 'udp'
try:
results.append(
self.connection.request(self.path, params=params.copy()).object
)
except Exception:
e = sys.exc_info()[1]
if e.args[0].find("InvalidPermission.Duplicate") == -1:
raise e
params.update({'IpProtocol': 'icmp', 'FromPort': '-1', 'ToPort': '-1'})
try:
results.append(
self.connection.request(self.path, params=params.copy()).object
)
except Exception:
e = sys.exc_info()[1]
if e.args[0].find("InvalidPermission.Duplicate") == -1:
raise e
return results
def ex_list_availability_zones(self, only_available=True):
"""
Return a list of :class:`ExEC2AvailabilityZone` objects for the
current region.
Note: This is an extension method and is only available for EC2
driver.
:keyword only_available: If true, return only availability zones
with state 'available'
:type only_available: ``str``
:rtype: ``list`` of :class:`ExEC2AvailabilityZone`
"""
params = {'Action': 'DescribeAvailabilityZones'}
filters = {'region-name': self.region_name}
if only_available:
filters['state'] = 'available'
params.update(self._build_filters(filters))
result = self.connection.request(self.path,
params=params.copy()).object
availability_zones = []
for element in findall(element=result,
xpath='availabilityZoneInfo/item',
namespace=NAMESPACE):
name = findtext(element=element, xpath='zoneName',
namespace=NAMESPACE)
zone_state = findtext(element=element, xpath='zoneState',
namespace=NAMESPACE)
region_name = findtext(element=element, xpath='regionName',
namespace=NAMESPACE)
availability_zone = ExEC2AvailabilityZone(
name=name,
zone_state=zone_state,
region_name=region_name
)
availability_zones.append(availability_zone)
return availability_zones
def ex_describe_tags(self, resource):
"""
Return a dictionary of tags for a resource (e.g. Node or
StorageVolume).
:param resource: resource which should be used
:type resource: any resource class, such as :class:`Node,`
:class:`StorageVolume,` or :class:NodeImage`
:return: dict Node tags
:rtype: ``dict``
"""
params = {'Action': 'DescribeTags'}
filters = {
'resource-id': resource.id
}
params.update(self._build_filters(filters))
result = self.connection.request(self.path, params=params).object
return self._get_resource_tags(result)
def ex_create_tags(self, resource, tags):
"""
Create tags for a resource (Node or StorageVolume).
:param resource: Resource to be tagged
:type resource: :class:`Node` or :class:`StorageVolume` or
:class:`VolumeSnapshot`
:param tags: A dictionary or other mapping of strings to strings,
associating tag names with tag values.
:type tags: ``dict``
:rtype: ``bool``
"""
if not tags:
return
params = {'Action': 'CreateTags',
'ResourceId.0': resource.id}
for i, key in enumerate(tags):
params['Tag.%d.Key' % i] = key
params['Tag.%d.Value' % i] = tags[key]
res = self.connection.request(self.path,
params=params.copy()).object
return self._get_boolean(res)
def ex_delete_tags(self, resource, tags):
"""
Delete tags from a resource.
:param resource: Resource to be tagged
:type resource: :class:`Node` or :class:`StorageVolume`
:param tags: A dictionary or other mapping of strings to strings,
specifying the tag names and tag values to be deleted.
:type tags: ``dict``
:rtype: ``bool``
"""
if not tags:
return
params = {'Action': 'DeleteTags',
'ResourceId.0': resource.id}
for i, key in enumerate(tags):
params['Tag.%d.Key' % i] = key
params['Tag.%d.Value' % i] = tags[key]
res = self.connection.request(self.path,
params=params.copy()).object
return self._get_boolean(res)
def ex_get_metadata_for_node(self, node):
"""
Return the metadata associated with the node.
:param node: Node instance
:type node: :class:`Node`
:return: A dictionary or other mapping of strings to strings,
associating tag names with tag values.
:rtype tags: ``dict``
"""
return node.extra['tags']
def ex_allocate_address(self, domain='standard'):
"""
Allocate a new Elastic IP address for EC2 classic or VPC
:param domain: The domain to allocate the new address in
(standard/vpc)
:type domain: ``str``
:return: Instance of ElasticIP
:rtype: :class:`ElasticIP`
"""
params = {'Action': 'AllocateAddress'}
if domain == 'vpc':
params['Domain'] = domain
response = self.connection.request(self.path, params=params).object
return self._to_address(response, only_associated=False)
def ex_release_address(self, elastic_ip, domain=None):
"""
Release an Elastic IP address using the IP (EC2-Classic) or
using the allocation ID (VPC)
:param elastic_ip: Elastic IP instance
:type elastic_ip: :class:`ElasticIP`
:param domain: The domain where the IP resides (vpc only)
:type domain: ``str``
:return: True on success, False otherwise.
:rtype: ``bool``
"""
params = {'Action': 'ReleaseAddress'}
if domain is not None and domain != 'vpc':
raise AttributeError('Domain can only be set to vpc')
if domain is None:
params['PublicIp'] = elastic_ip.ip
else:
params['AllocationId'] = elastic_ip.extra['allocation_id']
response = self.connection.request(self.path, params=params).object
return self._get_boolean(response)
def ex_describe_all_addresses(self, only_associated=False):
"""
Return all the Elastic IP addresses for this account
optionally, return only addresses associated with nodes
:param only_associated: If true, return only those addresses
that are associated with an instance.
:type only_associated: ``bool``
:return: List of ElasticIP instances.
:rtype: ``list`` of :class:`ElasticIP`
"""
params = {'Action': 'DescribeAddresses'}
response = self.connection.request(self.path, params=params).object
# We will send our only_associated boolean over to
# shape how the return data is sent back
return self._to_addresses(response, only_associated)
def ex_associate_address_with_node(self, node, elastic_ip, domain=None):
"""
Associate an Elastic IP address with a particular node.
:param node: Node instance
:type node: :class:`Node`
:param elastic_ip: Elastic IP instance
:type elastic_ip: :class:`ElasticIP`
:param domain: The domain where the IP resides (vpc only)
:type domain: ``str``
:return: A string representation of the association ID which is
required for VPC disassociation. EC2/standard
addresses return None
:rtype: ``None`` or ``str``
"""
params = {'Action': 'AssociateAddress', 'InstanceId': node.id}
if domain is not None and domain != 'vpc':
raise AttributeError('Domain can only be set to vpc')
if domain is None:
params.update({'PublicIp': elastic_ip.ip})
else:
params.update({'AllocationId': elastic_ip.extra['allocation_id']})
response = self.connection.request(self.path, params=params).object
association_id = findtext(element=response,
xpath='associationId',
namespace=NAMESPACE)
return association_id
def ex_associate_addresses(self, node, elastic_ip, domain=None):
"""
Note: This method has been deprecated in favor of
the ex_associate_address_with_node method.
"""
return self.ex_associate_address_with_node(node=node,
elastic_ip=elastic_ip,
domain=domain)
def ex_disassociate_address(self, elastic_ip, domain=None):
"""
Disassociate an Elastic IP address using the IP (EC2-Classic)
or the association ID (VPC)
:param elastic_ip: ElasticIP instance
:type elastic_ip: :class:`ElasticIP`
:param domain: The domain where the IP resides (vpc only)
:type domain: ``str``
:return: True on success, False otherwise.
:rtype: ``bool``
"""
params = {'Action': 'DisassociateAddress'}
if domain is not None and domain != 'vpc':
raise AttributeError('Domain can only be set to vpc')
if domain is None:
params['PublicIp'] = elastic_ip.ip
else:
params['AssociationId'] = elastic_ip.extra['association_id']
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_describe_addresses(self, nodes):
"""
Return Elastic IP addresses for all the nodes in the provided list.
:param nodes: List of :class:`Node` instances
:type nodes: ``list`` of :class:`Node`
:return: Dictionary where a key is a node ID and the value is a
list with the Elastic IP addresses associated with
this node.
:rtype: ``dict``
"""
if not nodes:
return {}
params = {'Action': 'DescribeAddresses'}
if len(nodes) == 1:
self._add_instance_filter(params, nodes[0])
result = self.connection.request(self.path, params=params).object
node_instance_ids = [node.id for node in nodes]
nodes_elastic_ip_mappings = {}
# We will set only_associated to True so that we only get back
# IPs which are associated with instances
only_associated = True
for node_id in node_instance_ids:
nodes_elastic_ip_mappings.setdefault(node_id, [])
for addr in self._to_addresses(result,
only_associated):
instance_id = addr.instance_id
if node_id == instance_id:
nodes_elastic_ip_mappings[instance_id].append(
addr.ip)
return nodes_elastic_ip_mappings
def ex_describe_addresses_for_node(self, node):
"""
Return a list of Elastic IP addresses associated with this node.
:param node: Node instance
:type node: :class:`Node`
:return: list Elastic IP addresses attached to this node.
:rtype: ``list`` of ``str``
"""
node_elastic_ips = self.ex_describe_addresses([node])
return node_elastic_ips[node.id]
# Network interface management methods
def ex_list_network_interfaces(self):
"""
Return all network interfaces
:return: List of EC2NetworkInterface instances
:rtype: ``list`` of :class `EC2NetworkInterface`
"""
params = {'Action': 'DescribeNetworkInterfaces'}
return self._to_interfaces(
self.connection.request(self.path, params=params).object
)
def ex_create_network_interface(self, subnet, name=None,
description=None,
private_ip_address=None):
"""
Create a network interface within a VPC subnet.
:param subnet: EC2NetworkSubnet instance
:type subnet: :class:`EC2NetworkSubnet`
:param name: Optional name of the interface
:type name: ``str``
:param description: Optional description of the network interface
:type description: ``str``
:param private_ip_address: Optional address to assign as the
primary private IP address of the
interface. If one is not provided then
Amazon will automatically auto-assign
an available IP. EC2 allows assignment
of multiple IPs, but this will be
the primary.
:type private_ip_address: ``str``
:return: EC2NetworkInterface instance
:rtype: :class `EC2NetworkInterface`
"""
params = {'Action': 'CreateNetworkInterface',
'SubnetId': subnet.id}
if description:
params['Description'] = description
if private_ip_address:
params['PrivateIpAddress'] = private_ip_address
response = self.connection.request(self.path, params=params).object
element = response.findall(fixxpath(xpath='networkInterface',
namespace=NAMESPACE))[0]
interface = self._to_interface(element, name)
if name and self.ex_create_tags(interface, {'Name': name}):
interface.extra['tags']['Name'] = name
return interface
def ex_delete_network_interface(self, network_interface):
"""
Deletes a network interface.
:param network_interface: EC2NetworkInterface instance
:type network_interface: :class:`EC2NetworkInterface`
:rtype: ``bool``
"""
params = {'Action': 'DeleteNetworkInterface',
'NetworkInterfaceId': network_interface.id}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_attach_network_interface_to_node(self, network_interface,
node, device_index):
"""
Attach a network interface to an instance.
:param network_interface: EC2NetworkInterface instance
:type network_interface: :class:`EC2NetworkInterface`
:param node: Node instance
:type node: :class:`Node`
:param device_index: The interface device index
:type device_index: ``int``
:return: String representation of the attachment id.
This is required to detach the interface.
:rtype: ``str``
"""
params = {'Action': 'AttachNetworkInterface',
'NetworkInterfaceId': network_interface.id,
'InstanceId': node.id,
'DeviceIndex': device_index}
response = self.connection.request(self.path, params=params).object
attachment_id = findattr(element=response, xpath='attachmentId',
namespace=NAMESPACE)
return attachment_id
def ex_detach_network_interface(self, attachment_id, force=False):
"""
Detach a network interface from an instance.
:param attachment_id: The attachment ID associated with the
interface
:type attachment_id: ``str``
:param force: Forces the detachment.
:type force: ``bool``
:return: ``True`` on successful detachment, ``False`` otherwise.
:rtype: ``bool``
"""
params = {'Action': 'DetachNetworkInterface',
'AttachmentId': attachment_id}
if force:
params['Force'] = True
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_modify_instance_attribute(self, node, attributes):
"""
Modify node attributes.
A list of valid attributes can be found at http://goo.gl/gxcj8
:param node: Node instance
:type node: :class:`Node`
:param attributes: Dictionary with node attributes
:type attributes: ``dict``
:return: True on success, False otherwise.
:rtype: ``bool``
"""
attributes = attributes or {}
attributes.update({'InstanceId': node.id})
params = {'Action': 'ModifyInstanceAttribute'}
params.update(attributes)
res = self.connection.request(self.path,
params=params.copy()).object
return self._get_boolean(res)
def ex_modify_image_attribute(self, image, attributes):
"""
Modify image attributes.
:param image: NodeImage instance
:type image: :class:`NodeImage`
:param attributes: Dictionary with node attributes
:type attributes: ``dict``
:return: True on success, False otherwise.
:rtype: ``bool``
"""
attributes = attributes or {}
attributes.update({'ImageId': image.id})
params = {'Action': 'ModifyImageAttribute'}
params.update(attributes)
res = self.connection.request(self.path,
params=params.copy()).object
return self._get_boolean(res)
def ex_change_node_size(self, node, new_size):
"""
Change the node size.
Note: Node must be turned of before changing the size.
:param node: Node instance
:type node: :class:`Node`
:param new_size: NodeSize intance
:type new_size: :class:`NodeSize`
:return: True on success, False otherwise.
:rtype: ``bool``
"""
if 'instancetype' in node.extra:
current_instance_type = node.extra['instancetype']
if current_instance_type == new_size.id:
raise ValueError('New instance size is the same as' +
'the current one')
attributes = {'InstanceType.Value': new_size.id}
return self.ex_modify_instance_attribute(node, attributes)
def ex_start_node(self, node):
"""
Start the node by passing in the node object, does not work with
instance store backed instances
:param node: Node which should be used
:type node: :class:`Node`
:rtype: ``bool``
"""
params = {'Action': 'StartInstances'}
params.update(self._pathlist('InstanceId', [node.id]))
res = self.connection.request(self.path, params=params).object
return self._get_state_boolean(res)
def ex_stop_node(self, node):
"""
Stop the node by passing in the node object, does not work with
instance store backed instances
:param node: Node which should be used
:type node: :class:`Node`
:rtype: ``bool``
"""
params = {'Action': 'StopInstances'}
params.update(self._pathlist('InstanceId', [node.id]))
res = self.connection.request(self.path, params=params).object
return self._get_state_boolean(res)
def ex_get_console_output(self, node):
"""
Get console output for the node.
:param node: Node which should be used
:type node: :class:`Node`
:return: Dictionary with the following keys:
- instance_id (``str``)
- timestamp (``datetime.datetime``) - ts of the last output
- output (``str``) - console output
:rtype: ``dict``
"""
params = {
'Action': 'GetConsoleOutput',
'InstanceId': node.id
}
response = self.connection.request(self.path, params=params).object
timestamp = findattr(element=response,
xpath='timestamp',
namespace=NAMESPACE)
encoded_string = findattr(element=response,
xpath='output',
namespace=NAMESPACE)
timestamp = parse_date(timestamp)
if encoded_string:
output = base64.b64decode(b(encoded_string)).decode('utf-8')
else:
# No console output
output = None
return {'instance_id': node.id,
'timestamp': timestamp,
'output': output}
def ex_list_reserved_nodes(self):
"""
List all reserved instances/nodes which can be purchased from Amazon
for one or three year terms. Reservations are made at a region level
and reduce the hourly charge for instances.
More information can be found at http://goo.gl/ulXCC7.
:rtype: ``list`` of :class:`.EC2ReservedNode`
"""
params = {'Action': 'DescribeReservedInstances'}
response = self.connection.request(self.path, params=params).object
return self._to_reserved_nodes(response, 'reservedInstancesSet/item')
# Account specific methods
def ex_get_limits(self):
"""
Retrieve account resource limits.
:rtype: ``dict``
"""
attributes = ['max-instances', 'max-elastic-ips',
'vpc-max-elastic-ips']
params = {}
params['Action'] = 'DescribeAccountAttributes'
for index, attribute in enumerate(attributes):
params['AttributeName.%s' % (index)] = attribute
response = self.connection.request(self.path, params=params)
data = response.object
elems = data.findall(fixxpath(xpath='accountAttributeSet/item',
namespace=NAMESPACE))
result = {'resource': {}}
for elem in elems:
name = findtext(element=elem, xpath='attributeName',
namespace=NAMESPACE)
value = findtext(element=elem,
xpath='attributeValueSet/item/attributeValue',
namespace=NAMESPACE)
result['resource'][name] = int(value)
return result
# Deprecated extension methods
def ex_list_keypairs(self):
"""
Lists all the keypair names and fingerprints.
:rtype: ``list`` of ``dict``
"""
warnings.warn('This method has been deprecated in favor of '
'list_key_pairs method')
key_pairs = self.list_key_pairs()
result = []
for key_pair in key_pairs:
item = {
'keyName': key_pair.name,
'keyFingerprint': key_pair.fingerprint,
}
result.append(item)
return result
def ex_describe_all_keypairs(self):
"""
Return names for all the available key pairs.
@note: This is a non-standard extension API, and only works for EC2.
:rtype: ``list`` of ``str``
"""
names = [key_pair.name for key_pair in self.list_key_pairs()]
return names
def ex_describe_keypairs(self, name):
"""
Here for backward compatibility.
"""
return self.ex_describe_keypair(name=name)
def ex_describe_keypair(self, name):
"""
Describes a keypair by name.
@note: This is a non-standard extension API, and only works for EC2.
:param name: The name of the keypair to describe.
:type name: ``str``
:rtype: ``dict``
"""
params = {
'Action': 'DescribeKeyPairs',
'KeyName.1': name
}
response = self.connection.request(self.path, params=params).object
key_name = findattr(element=response, xpath='keySet/item/keyName',
namespace=NAMESPACE)
fingerprint = findattr(element=response,
xpath='keySet/item/keyFingerprint',
namespace=NAMESPACE).strip()
return {
'keyName': key_name,
'keyFingerprint': fingerprint
}
def ex_create_keypair(self, name):
"""
Creates a new keypair
@note: This is a non-standard extension API, and only works for EC2.
:param name: The name of the keypair to Create. This must be
unique, otherwise an InvalidKeyPair.Duplicate exception is raised.
:type name: ``str``
:rtype: ``dict``
"""
warnings.warn('This method has been deprecated in favor of '
'create_key_pair method')
key_pair = self.create_key_pair(name=name)
result = {
'keyMaterial': key_pair.private_key,
'keyFingerprint': key_pair.fingerprint
}
return result
def ex_delete_keypair(self, keypair):
"""
Delete a key pair by name.
@note: This is a non-standard extension API, and only works with EC2.
:param keypair: The name of the keypair to delete.
:type keypair: ``str``
:rtype: ``bool``
"""
warnings.warn('This method has been deprecated in favor of '
'delete_key_pair method')
keypair = KeyPair(name=keypair, public_key=None, fingerprint=None,
driver=self)
return self.delete_key_pair(keypair)
def ex_import_keypair_from_string(self, name, key_material):
"""
imports a new public key where the public key is passed in as a string
@note: This is a non-standard extension API, and only works for EC2.
:param name: The name of the public key to import. This must be
unique, otherwise an InvalidKeyPair.Duplicate exception is raised.
:type name: ``str``
:param key_material: The contents of a public key file.
:type key_material: ``str``
:rtype: ``dict``
"""
warnings.warn('This method has been deprecated in favor of '
'import_key_pair_from_string method')
key_pair = self.import_key_pair_from_string(name=name,
key_material=key_material)
result = {
'keyName': key_pair.name,
'keyFingerprint': key_pair.fingerprint
}
return result
def ex_import_keypair(self, name, keyfile):
"""
imports a new public key where the public key is passed via a filename
@note: This is a non-standard extension API, and only works for EC2.
:param name: The name of the public key to import. This must be
unique, otherwise an InvalidKeyPair.Duplicate exception is raised.
:type name: ``str``
:param keyfile: The filename with path of the public key to import.
:type keyfile: ``str``
:rtype: ``dict``
"""
warnings.warn('This method has been deprecated in favor of '
'import_key_pair_from_file method')
key_pair = self.import_key_pair_from_file(name=name,
key_file_path=keyfile)
result = {
'keyName': key_pair.name,
'keyFingerprint': key_pair.fingerprint
}
return result
def ex_find_or_import_keypair_by_key_material(self, pubkey):
"""
Given a public key, look it up in the EC2 KeyPair database. If it
exists, return any information we have about it. Otherwise, create it.
Keys that are created are named based on their comment and fingerprint.
:rtype: ``dict``
"""
key_fingerprint = get_pubkey_ssh2_fingerprint(pubkey)
key_comment = get_pubkey_comment(pubkey, default='unnamed')
key_name = '%s-%s' % (key_comment, key_fingerprint)
key_pairs = self.list_key_pairs()
key_pairs = [key_pair for key_pair in key_pairs if
key_pair.fingerprint == key_fingerprint]
if len(key_pairs) >= 1:
key_pair = key_pairs[0]
result = {
'keyName': key_pair.name,
'keyFingerprint': key_pair.fingerprint
}
else:
result = self.ex_import_keypair_from_string(key_name, pubkey)
return result
def ex_list_internet_gateways(self, gateway_ids=None, filters=None):
"""
Describes available Internet gateways and whether or not they are
attached to a VPC. These are required for VPC nodes to communicate
over the Internet.
:param gateway_ids: Return only intenet gateways matching the
provided internet gateway IDs. If not
specified, a list of all the internet
gateways in the corresponding region is
returned.
:type gateway_ids: ``list``
:param filters: The filters so that the response includes
information for only certain gateways.
:type filters: ``dict``
:rtype: ``list`` of :class:`.VPCInternetGateway`
"""
params = {'Action': 'DescribeInternetGateways'}
if gateway_ids:
params.update(self._pathlist('InternetGatewayId', gateway_ids))
if filters:
params.update(self._build_filters(filters))
response = self.connection.request(self.path, params=params).object
return self._to_internet_gateways(response, 'internetGatewaySet/item')
def ex_create_internet_gateway(self, name=None):
"""
Delete a VPC Internet gateway
:rtype: ``bool``
"""
params = {'Action': 'CreateInternetGateway'}
resp = self.connection.request(self.path, params=params).object
element = resp.findall(fixxpath(xpath='internetGateway',
namespace=NAMESPACE))
gateway = self._to_internet_gateway(element[0], name)
if name and self.ex_create_tags(gateway, {'Name': name}):
gateway.extra['tags']['Name'] = name
return gateway
def ex_delete_internet_gateway(self, gateway):
"""
Delete a VPC Internet gateway
:param gateway: The gateway to delete
:type gateway: :class:`.VPCInternetGateway`
:rtype: ``bool``
"""
params = {'Action': 'DeleteInternetGateway',
'InternetGatewayId': gateway.id}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_attach_internet_gateway(self, gateway, network):
"""
Attach an Internet gateway to a VPC
:param gateway: The gateway to attach
:type gateway: :class:`.VPCInternetGateway`
:param network: The VPC network to attach to
:type network: :class:`.EC2Network`
:rtype: ``bool``
"""
params = {'Action': 'AttachInternetGateway',
'InternetGatewayId': gateway.id,
'VpcId': network.id}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_detach_internet_gateway(self, gateway, network):
"""
Detach an Internet gateway from a VPC
:param gateway: The gateway to detach
:type gateway: :class:`.VPCInternetGateway`
:param network: The VPC network to detach from
:type network: :class:`.EC2Network`
:rtype: ``bool``
"""
params = {'Action': 'DetachInternetGateway',
'InternetGatewayId': gateway.id,
'VpcId': network.id}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_list_route_tables(self, route_table_ids=None, filters=None):
"""
Describes one or more of a VPC's route tables.
These are used to determine where network traffic is directed.
:param route_table_ids: Return only route tables matching the
provided route table IDs. If not specified,
a list of all the route tables in the
corresponding region is returned.
:type route_table_ids: ``list``
:param filters: The filters so that the response includes
information for only certain route tables.
:type filters: ``dict``
:rtype: ``list`` of :class:`.EC2RouteTable`
"""
params = {'Action': 'DescribeRouteTables'}
if route_table_ids:
params.update(self._pathlist('RouteTableId', route_table_ids))
if filters:
params.update(self._build_filters(filters))
response = self.connection.request(self.path, params=params)
return self._to_route_tables(response.object)
def ex_create_route_table(self, network, name=None):
"""
Create a route table within a VPC.
:param vpc_id: The VPC that the subnet should be created in.
:type vpc_id: :class:`.EC2Network`
:rtype: :class: `.EC2RouteTable`
"""
params = {'Action': 'CreateRouteTable',
'VpcId': network.id}
response = self.connection.request(self.path, params=params).object
element = response.findall(fixxpath(xpath='routeTable',
namespace=NAMESPACE))[0]
route_table = self._to_route_table(element, name=name)
if name and self.ex_create_tags(route_table, {'Name': name}):
route_table.extra['tags']['Name'] = name
return route_table
def ex_delete_route_table(self, route_table):
"""
Deletes a VPC route table.
:param route_table: The route table to delete.
:type route_table: :class:`.EC2RouteTable`
:rtype: ``bool``
"""
params = {'Action': 'DeleteRouteTable',
'RouteTableId': route_table.id}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_associate_route_table(self, route_table, subnet):
"""
Associates a route table with a subnet within a VPC.
Note: A route table can be associated with multiple subnets.
:param route_table: The route table to associate.
:type route_table: :class:`.EC2RouteTable`
:param subnet: The subnet to associate with.
:type subnet: :class:`.EC2Subnet`
:return: Route table association ID.
:rtype: ``str``
"""
params = {'Action': 'AssociateRouteTable',
'RouteTableId': route_table.id,
'SubnetId': subnet.id}
result = self.connection.request(self.path, params=params).object
association_id = findtext(element=result,
xpath='associationId',
namespace=NAMESPACE)
return association_id
def ex_dissociate_route_table(self, subnet_association):
"""
Dissociates a subnet from a route table.
:param subnet_association: The subnet association object or
subnet association ID.
:type subnet_association: :class:`.EC2SubnetAssociation` or
``str``
:rtype: ``bool``
"""
if isinstance(subnet_association, EC2SubnetAssociation):
subnet_association_id = subnet_association.id
else:
subnet_association_id = subnet_association
params = {'Action': 'DisassociateRouteTable',
'AssociationId': subnet_association_id}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_replace_route_table_association(self, subnet_association,
route_table):
"""
Changes the route table associated with a given subnet in a VPC.
Note: This method can be used to change which table is the main route
table in the VPC (Specify the main route table's association ID
and the route table to be the new main route table).
:param subnet_association: The subnet association object or
subnet association ID.
:type subnet_association: :class:`.EC2SubnetAssociation` or
``str``
:param route_table: The new route table to associate.
:type route_table: :class:`.EC2RouteTable`
:return: New route table association ID.
:rtype: ``str``
"""
if isinstance(subnet_association, EC2SubnetAssociation):
subnet_association_id = subnet_association.id
else:
subnet_association_id = subnet_association
params = {'Action': 'ReplaceRouteTableAssociation',
'AssociationId': subnet_association_id,
'RouteTableId': route_table.id}
result = self.connection.request(self.path, params=params).object
new_association_id = findtext(element=result,
xpath='newAssociationId',
namespace=NAMESPACE)
return new_association_id
def ex_create_route(self, route_table, cidr,
internet_gateway=None, node=None,
network_interface=None, vpc_peering_connection=None):
"""
Creates a route entry in the route table.
:param route_table: The route table to create the route in.
:type route_table: :class:`.EC2RouteTable`
:param cidr: The CIDR block used for the destination match.
:type cidr: ``str``
:param internet_gateway: The internet gateway to route
traffic through.
:type internet_gateway: :class:`.VPCInternetGateway`
:param node: The NAT instance to route traffic through.
:type node: :class:`Node`
:param network_interface: The network interface of the node
to route traffic through.
:type network_interface: :class:`.EC2NetworkInterface`
:param vpc_peering_connection: The VPC peering connection.
:type vpc_peering_connection: :class:`.VPCPeeringConnection`
:rtype: ``bool``
Note: You must specify one of the following: internet_gateway,
node, network_interface, vpc_peering_connection.
"""
params = {'Action': 'CreateRoute',
'RouteTableId': route_table.id,
'DestinationCidrBlock': cidr}
if internet_gateway:
params['GatewayId'] = internet_gateway.id
if node:
params['InstanceId'] = node.id
if network_interface:
params['NetworkInterfaceId'] = network_interface.id
if vpc_peering_connection:
params['VpcPeeringConnectionId'] = vpc_peering_connection.id
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_delete_route(self, route_table, cidr):
"""
Deletes a route entry from the route table.
:param route_table: The route table to delete the route from.
:type route_table: :class:`.EC2RouteTable`
:param cidr: The CIDR block used for the destination match.
:type cidr: ``str``
:rtype: ``bool``
"""
params = {'Action': 'DeleteRoute',
'RouteTableId': route_table.id,
'DestinationCidrBlock': cidr}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_replace_route(self, route_table, cidr,
internet_gateway=None, node=None,
network_interface=None, vpc_peering_connection=None):
"""
Replaces an existing route entry within a route table in a VPC.
:param route_table: The route table to replace the route in.
:type route_table: :class:`.EC2RouteTable`
:param cidr: The CIDR block used for the destination match.
:type cidr: ``str``
:param internet_gateway: The new internet gateway to route
traffic through.
:type internet_gateway: :class:`.VPCInternetGateway`
:param node: The new NAT instance to route traffic through.
:type node: :class:`Node`
:param network_interface: The new network interface of the node
to route traffic through.
:type network_interface: :class:`.EC2NetworkInterface`
:param vpc_peering_connection: The new VPC peering connection.
:type vpc_peering_connection: :class:`.VPCPeeringConnection`
:rtype: ``bool``
Note: You must specify one of the following: internet_gateway,
node, network_interface, vpc_peering_connection.
"""
params = {'Action': 'ReplaceRoute',
'RouteTableId': route_table.id,
'DestinationCidrBlock': cidr}
if internet_gateway:
params['GatewayId'] = internet_gateway.id
if node:
params['InstanceId'] = node.id
if network_interface:
params['NetworkInterfaceId'] = network_interface.id
if vpc_peering_connection:
params['VpcPeeringConnectionId'] = vpc_peering_connection.id
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def _ex_connection_class_kwargs(self):
kwargs = super(BaseEC2NodeDriver, self)._ex_connection_class_kwargs()
kwargs['signature_version'] = self.signature_version
return kwargs
def _to_nodes(self, object, xpath):
return [self._to_node(el)
for el in object.findall(fixxpath(xpath=xpath,
namespace=NAMESPACE))]
def _to_node(self, element):
try:
state = self.NODE_STATE_MAP[findattr(element=element,
xpath="instanceState/name",
namespace=NAMESPACE)
]
except KeyError:
state = NodeState.UNKNOWN
instance_id = findtext(element=element, xpath='instanceId',
namespace=NAMESPACE)
public_ip = findtext(element=element, xpath='ipAddress',
namespace=NAMESPACE)
public_ips = [public_ip] if public_ip else []
private_ip = findtext(element=element, xpath='privateIpAddress',
namespace=NAMESPACE)
private_ips = [private_ip] if private_ip else []
product_codes = []
for p in findall(element=element,
xpath="productCodesSet/item/productCode",
namespace=NAMESPACE):
product_codes.append(p)
# Get our tags
tags = self._get_resource_tags(element)
name = tags.get('Name', instance_id)
# Get our extra dictionary
extra = self._get_extra_dict(
element, RESOURCE_EXTRA_ATTRIBUTES_MAP['node'])
# Add additional properties to our extra dictionary
extra['block_device_mapping'] = self._to_device_mappings(element)
extra['groups'] = self._get_security_groups(element)
extra['network_interfaces'] = self._to_interfaces(element)
extra['product_codes'] = product_codes
extra['tags'] = tags
return Node(id=instance_id, name=name, state=state,
public_ips=public_ips, private_ips=private_ips,
driver=self.connection.driver, extra=extra)
def _to_images(self, object):
return [self._to_image(el) for el in object.findall(
fixxpath(xpath='imagesSet/item', namespace=NAMESPACE))
]
def _to_image(self, element):
id = findtext(element=element, xpath='imageId', namespace=NAMESPACE)
name = findtext(element=element, xpath='name', namespace=NAMESPACE)
# Build block device mapping
block_device_mapping = self._to_device_mappings(element)
# Get our tags
tags = self._get_resource_tags(element)
# Get our extra dictionary
extra = self._get_extra_dict(
element, RESOURCE_EXTRA_ATTRIBUTES_MAP['image'])
# Add our tags and block device mapping
extra['tags'] = tags
extra['block_device_mapping'] = block_device_mapping
return NodeImage(id=id, name=name, driver=self, extra=extra)
def _to_volume(self, element, name=None):
"""
Parse the XML element and return a StorageVolume object.
:param name: An optional name for the volume. If not provided
then either tag with a key "Name" or volume ID
will be used (which ever is available first in that
order).
:type name: ``str``
:rtype: :class:`StorageVolume`
"""
volId = findtext(element=element, xpath='volumeId',
namespace=NAMESPACE)
size = findtext(element=element, xpath='size', namespace=NAMESPACE)
raw_state = findtext(element=element, xpath='status',
namespace=NAMESPACE)
state = self.VOLUME_STATE_MAP.get(raw_state,
StorageVolumeState.UNKNOWN)
# Get our tags
tags = self._get_resource_tags(element)
# If name was not passed into the method then
# fall back then use the volume id
name = name if name else tags.get('Name', volId)
# Get our extra dictionary
extra = self._get_extra_dict(
element, RESOURCE_EXTRA_ATTRIBUTES_MAP['volume'])
extra['tags'] = tags
return StorageVolume(id=volId,
name=name,
size=int(size),
driver=self,
state=state,
extra=extra)
def _to_snapshots(self, response):
return [self._to_snapshot(el) for el in response.findall(
fixxpath(xpath='snapshotSet/item', namespace=NAMESPACE))
]
def _to_snapshot(self, element, name=None):
snapId = findtext(element=element, xpath='snapshotId',
namespace=NAMESPACE)
size = findtext(element=element, xpath='volumeSize',
namespace=NAMESPACE)
created = parse_date(findtext(element=element, xpath='startTime',
namespace=NAMESPACE))
# Get our tags
tags = self._get_resource_tags(element)
# If name was not passed into the method then
# fall back then use the snapshot id
name = name if name else tags.get('Name', snapId)
# Get our extra dictionary
extra = self._get_extra_dict(
element, RESOURCE_EXTRA_ATTRIBUTES_MAP['snapshot'])
# Add tags and name to the extra dict
extra['tags'] = tags
extra['name'] = name
return VolumeSnapshot(snapId, size=int(size),
driver=self, extra=extra, created=created)
def _to_key_pairs(self, elems):
key_pairs = [self._to_key_pair(elem=elem) for elem in elems]
return key_pairs
def _to_key_pair(self, elem):
name = findtext(element=elem, xpath='keyName', namespace=NAMESPACE)
fingerprint = findtext(element=elem, xpath='keyFingerprint',
namespace=NAMESPACE).strip()
private_key = findtext(element=elem, xpath='keyMaterial',
namespace=NAMESPACE)
key_pair = KeyPair(name=name,
public_key=None,
fingerprint=fingerprint,
private_key=private_key,
driver=self)
return key_pair
def _to_security_groups(self, response):
return [self._to_security_group(el) for el in response.findall(
fixxpath(xpath='securityGroupInfo/item', namespace=NAMESPACE))
]
def _to_security_group(self, element):
# security group id
sg_id = findtext(element=element,
xpath='groupId',
namespace=NAMESPACE)
# security group name
name = findtext(element=element,
xpath='groupName',
namespace=NAMESPACE)
# Get our tags
tags = self._get_resource_tags(element)
# Get our extra dictionary
extra = self._get_extra_dict(
element, RESOURCE_EXTRA_ATTRIBUTES_MAP['security_group'])
# Add tags to the extra dict
extra['tags'] = tags
# Get ingress rules
ingress_rules = self._to_security_group_rules(
element, 'ipPermissions/item'
)
# Get egress rules
egress_rules = self._to_security_group_rules(
element, 'ipPermissionsEgress/item'
)
return EC2SecurityGroup(sg_id, name, ingress_rules,
egress_rules, extra=extra)
def _to_security_group_rules(self, element, xpath):
return [self._to_security_group_rule(el) for el in element.findall(
fixxpath(xpath=xpath, namespace=NAMESPACE))
]
def _to_security_group_rule(self, element):
"""
Parse the XML element and return a SecurityGroup object.
:rtype: :class:`EC2SecurityGroup`
"""
rule = {}
rule['protocol'] = findtext(element=element,
xpath='ipProtocol',
namespace=NAMESPACE)
rule['from_port'] = findtext(element=element,
xpath='fromPort',
namespace=NAMESPACE)
rule['to_port'] = findtext(element=element,
xpath='toPort',
namespace=NAMESPACE)
# get security groups
elements = element.findall(fixxpath(
xpath='groups/item',
namespace=NAMESPACE
))
rule['group_pairs'] = []
for element in elements:
item = {
'user_id': findtext(
element=element,
xpath='userId',
namespace=NAMESPACE),
'group_id': findtext(
element=element,
xpath='groupId',
namespace=NAMESPACE),
'group_name': findtext(
element=element,
xpath='groupName',
namespace=NAMESPACE)
}
rule['group_pairs'].append(item)
# get ip ranges
elements = element.findall(fixxpath(
xpath='ipRanges/item',
namespace=NAMESPACE
))
rule['cidr_ips'] = [
findtext(
element=element,
xpath='cidrIp',
namespace=NAMESPACE
) for element in elements]
return rule
def _to_networks(self, response):
return [self._to_network(el) for el in response.findall(
fixxpath(xpath='vpcSet/item', namespace=NAMESPACE))
]
def _to_network(self, element, name=None):
# Get the network id
vpc_id = findtext(element=element,
xpath='vpcId',
namespace=NAMESPACE)
# Get our tags
tags = self._get_resource_tags(element)
# Set our name if the Name key/value if available
# If we don't get anything back then use the vpc_id
name = name if name else tags.get('Name', vpc_id)
cidr_block = findtext(element=element,
xpath='cidrBlock',
namespace=NAMESPACE)
# Get our extra dictionary
extra = self._get_extra_dict(
element, RESOURCE_EXTRA_ATTRIBUTES_MAP['network'])
# Add tags to the extra dict
extra['tags'] = tags
return EC2Network(vpc_id, name, cidr_block, extra=extra)
def _to_addresses(self, response, only_associated):
"""
Builds a list of dictionaries containing elastic IP properties.
:param only_associated: If true, return only those addresses
that are associated with an instance.
If false, return all addresses.
:type only_associated: ``bool``
:rtype: ``list`` of :class:`ElasticIP`
"""
addresses = []
for el in response.findall(fixxpath(xpath='addressesSet/item',
namespace=NAMESPACE)):
addr = self._to_address(el, only_associated)
if addr is not None:
addresses.append(addr)
return addresses
def _to_address(self, element, only_associated):
instance_id = findtext(element=element, xpath='instanceId',
namespace=NAMESPACE)
public_ip = findtext(element=element,
xpath='publicIp',
namespace=NAMESPACE)
domain = findtext(element=element,
xpath='domain',
namespace=NAMESPACE)
# Build our extra dict
extra = self._get_extra_dict(
element, RESOURCE_EXTRA_ATTRIBUTES_MAP['elastic_ip'])
# Return NoneType if only associated IPs are requested
if only_associated and not instance_id:
return None
return ElasticIP(public_ip, domain, instance_id, extra=extra)
def _to_placement_groups(self, response):
return [self._to_placement_group(el)
for el in response.findall(
fixxpath(xpath='placementGroupSet/item',
namespace=NAMESPACE))]
def _to_placement_group(self, element):
name = findtext(element=element,
xpath='groupName',
namespace=NAMESPACE)
state = findtext(element=element,
xpath='state',
namespace=NAMESPACE)
strategy = findtext(element=element,
xpath='strategy',
namespace=NAMESPACE)
return EC2PlacementGroup(name, state, strategy)
def _to_subnets(self, response):
return [self._to_subnet(el) for el in response.findall(
fixxpath(xpath='subnetSet/item', namespace=NAMESPACE))
]
def _to_subnet(self, element, name=None):
# Get the subnet ID
subnet_id = findtext(element=element,
xpath='subnetId',
namespace=NAMESPACE)
# Get our tags
tags = self._get_resource_tags(element)
# If we don't get anything back then use the subnet_id
name = name if name else tags.get('Name', subnet_id)
state = findtext(element=element,
xpath='state',
namespace=NAMESPACE)
# Get our extra dictionary
extra = self._get_extra_dict(
element, RESOURCE_EXTRA_ATTRIBUTES_MAP['subnet'])
# Also include our tags
extra['tags'] = tags
return EC2NetworkSubnet(subnet_id, name, state, extra=extra)
def _to_interfaces(self, response):
return [self._to_interface(el) for el in response.findall(
fixxpath(xpath='networkInterfaceSet/item', namespace=NAMESPACE))
]
def _to_interface(self, element, name=None):
"""
Parse the XML element and return an EC2NetworkInterface object.
:param name: An optional name for the interface. If not provided
then either tag with a key "Name" or the interface ID
will be used (whichever is available first in that
order).
:type name: ``str``
:rtype: :class: `EC2NetworkInterface`
"""
interface_id = findtext(element=element,
xpath='networkInterfaceId',
namespace=NAMESPACE)
state = findtext(element=element,
xpath='status',
namespace=NAMESPACE)
# Get tags
tags = self._get_resource_tags(element)
name = name if name else tags.get('Name', interface_id)
# Build security groups
groups = self._get_security_groups(element)
# Build private IPs
priv_ips = []
for item in findall(element=element,
xpath='privateIpAddressesSet/item',
namespace=NAMESPACE):
priv_ips.append({'private_ip': findtext(element=item,
xpath='privateIpAddress',
namespace=NAMESPACE),
'private_dns': findtext(element=item,
xpath='privateDnsName',
namespace=NAMESPACE),
'primary': findtext(element=item,
xpath='primary',
namespace=NAMESPACE)})
# Build our attachment dictionary which we will add into extra later
attributes_map = \
RESOURCE_EXTRA_ATTRIBUTES_MAP['network_interface_attachment']
attachment = self._get_extra_dict(element, attributes_map)
# Build our extra dict
attributes_map = RESOURCE_EXTRA_ATTRIBUTES_MAP['network_interface']
extra = self._get_extra_dict(element, attributes_map)
# Include our previously built items as well
extra['tags'] = tags
extra['attachment'] = attachment
extra['private_ips'] = priv_ips
extra['groups'] = groups
return EC2NetworkInterface(interface_id, name, state, extra=extra)
def _to_reserved_nodes(self, object, xpath):
return [self._to_reserved_node(el)
for el in object.findall(fixxpath(xpath=xpath,
namespace=NAMESPACE))]
def _to_reserved_node(self, element):
"""
Build an EC2ReservedNode object using the reserved instance properties.
Information on these properties can be found at http://goo.gl/ulXCC7.
"""
# Get our extra dictionary
extra = self._get_extra_dict(
element, RESOURCE_EXTRA_ATTRIBUTES_MAP['reserved_node'])
try:
size = [size for size in self.list_sizes() if
size.id == extra['instance_type']][0]
except IndexError:
size = None
return EC2ReservedNode(id=findtext(element=element,
xpath='reservedInstancesId',
namespace=NAMESPACE),
state=findattr(element=element,
xpath='state',
namespace=NAMESPACE),
driver=self,
size=size,
extra=extra)
def _to_device_mappings(self, object):
return [self._to_device_mapping(el) for el in object.findall(
fixxpath(xpath='blockDeviceMapping/item', namespace=NAMESPACE))
]
def _to_device_mapping(self, element):
"""
Parse the XML element and return a dictionary of device properties.
Additional information can be found at http://goo.gl/GjWYBf.
@note: EBS volumes do not have a virtual name. Only ephemeral
disks use this property.
:rtype: ``dict``
"""
mapping = {}
mapping['device_name'] = findattr(element=element,
xpath='deviceName',
namespace=NAMESPACE)
mapping['virtual_name'] = findattr(element=element,
xpath='virtualName',
namespace=NAMESPACE)
# If virtual name does not exist then this is an EBS volume.
# Build the EBS dictionary leveraging the _get_extra_dict method.
if mapping['virtual_name'] is None:
mapping['ebs'] = self._get_extra_dict(
element, RESOURCE_EXTRA_ATTRIBUTES_MAP['ebs_volume'])
return mapping
def _to_internet_gateways(self, object, xpath):
return [self._to_internet_gateway(el)
for el in object.findall(fixxpath(xpath=xpath,
namespace=NAMESPACE))]
def _to_internet_gateway(self, element, name=None):
id = findtext(element=element,
xpath='internetGatewayId',
namespace=NAMESPACE)
vpc_id = findtext(element=element,
xpath='attachmentSet/item/vpcId',
namespace=NAMESPACE)
state = findtext(element=element,
xpath='attachmentSet/item/state',
namespace=NAMESPACE)
# If there's no attachment state, let's
# set it to available
if not state:
state = 'available'
# Get our tags
tags = self._get_resource_tags(element)
# If name was not passed into the method then
# fall back then use the gateway id
name = name if name else tags.get('Name', id)
return VPCInternetGateway(id=id, name=name, vpc_id=vpc_id,
state=state, driver=self.connection.driver,
extra={'tags': tags})
def _to_route_tables(self, response):
return [self._to_route_table(el) for el in response.findall(
fixxpath(xpath='routeTableSet/item', namespace=NAMESPACE))
]
def _to_route_table(self, element, name=None):
# route table id
route_table_id = findtext(element=element,
xpath='routeTableId',
namespace=NAMESPACE)
# Get our tags
tags = self._get_resource_tags(element)
# Get our extra dictionary
extra = self._get_extra_dict(
element, RESOURCE_EXTRA_ATTRIBUTES_MAP['route_table'])
# Add tags to the extra dict
extra['tags'] = tags
# Get routes
routes = self._to_routes(element, 'routeSet/item')
# Get subnet associations
subnet_associations = self._to_subnet_associations(
element, 'associationSet/item')
# Get propagating routes virtual private gateways (VGW) IDs
propagating_gateway_ids = []
for el in element.findall(fixxpath(xpath='propagatingVgwSet/item',
namespace=NAMESPACE)):
propagating_gateway_ids.append(findtext(element=el,
xpath='gatewayId',
namespace=NAMESPACE))
name = name if name else tags.get('Name', id)
return EC2RouteTable(route_table_id, name, routes, subnet_associations,
propagating_gateway_ids, extra=extra)
def _to_routes(self, element, xpath):
return [self._to_route(el) for el in element.findall(
fixxpath(xpath=xpath, namespace=NAMESPACE))
]
def _to_route(self, element):
"""
Parse the XML element and return a route object
:rtype: :class: `EC2Route`
"""
destination_cidr = findtext(element=element,
xpath='destinationCidrBlock',
namespace=NAMESPACE)
gateway_id = findtext(element=element,
xpath='gatewayId',
namespace=NAMESPACE)
instance_id = findtext(element=element,
xpath='instanceId',
namespace=NAMESPACE)
owner_id = findtext(element=element,
xpath='instanceOwnerId',
namespace=NAMESPACE)
interface_id = findtext(element=element,
xpath='networkInterfaceId',
namespace=NAMESPACE)
state = findtext(element=element,
xpath='state',
namespace=NAMESPACE)
origin = findtext(element=element,
xpath='origin',
namespace=NAMESPACE)
vpc_peering_connection_id = findtext(element=element,
xpath='vpcPeeringConnectionId',
namespace=NAMESPACE)
return EC2Route(destination_cidr, gateway_id, instance_id, owner_id,
interface_id, state, origin, vpc_peering_connection_id)
def _to_subnet_associations(self, element, xpath):
return [self._to_subnet_association(el) for el in element.findall(
fixxpath(xpath=xpath, namespace=NAMESPACE))
]
def _to_subnet_association(self, element):
"""
Parse the XML element and return a route table association object
:rtype: :class: `EC2SubnetAssociation`
"""
association_id = findtext(element=element,
xpath='routeTableAssociationId',
namespace=NAMESPACE)
route_table_id = findtext(element=element,
xpath='routeTableId',
namespace=NAMESPACE)
subnet_id = findtext(element=element,
xpath='subnetId',
namespace=NAMESPACE)
main = findtext(element=element,
xpath='main',
namespace=NAMESPACE)
main = True if main else False
return EC2SubnetAssociation(association_id, route_table_id,
subnet_id, main)
def _pathlist(self, key, arr):
"""
Converts a key and an array of values into AWS query param format.
"""
params = {}
i = 0
for value in arr:
i += 1
params['%s.%s' % (key, i)] = value
return params
def _get_boolean(self, element):
tag = '{%s}%s' % (NAMESPACE, 'return')
return element.findtext(tag) == 'true'
def _get_terminate_boolean(self, element):
status = element.findtext(".//{%s}%s" % (NAMESPACE, 'name'))
return any([term_status == status
for term_status
in ('shutting-down', 'terminated')])
def _add_instance_filter(self, params, node):
"""
Add instance filter to the provided params dictionary.
"""
filters = {'instance-id': node.id}
params.update(self._build_filters(filters))
return params
def _get_state_boolean(self, element):
"""
Checks for the instances's state
"""
state = findall(element=element,
xpath='instancesSet/item/currentState/name',
namespace=NAMESPACE)[0].text
return state in ('stopping', 'pending', 'starting')
def _get_extra_dict(self, element, mapping):
"""
Extract attributes from the element based on rules provided in the
mapping dictionary.
:param element: Element to parse the values from.
:type element: xml.etree.ElementTree.Element.
:param mapping: Dictionary with the extra layout
:type node: :class:`Node`
:rtype: ``dict``
"""
extra = {}
for attribute, values in mapping.items():
transform_func = values['transform_func']
value = findattr(element=element,
xpath=values['xpath'],
namespace=NAMESPACE)
if value is not None:
extra[attribute] = transform_func(value)
else:
extra[attribute] = None
return extra
def _get_resource_tags(self, element):
"""
Parse tags from the provided element and return a dictionary with
key/value pairs.
:rtype: ``dict``
"""
tags = {}
# Get our tag set by parsing the element
tag_set = findall(element=element,
xpath='tagSet/item',
namespace=NAMESPACE)
for tag in tag_set:
key = findtext(element=tag,
xpath='key',
namespace=NAMESPACE)
value = findtext(element=tag,
xpath='value',
namespace=NAMESPACE)
tags[key] = value
return tags
def _get_block_device_mapping_params(self, block_device_mapping):
"""
Return a list of dictionaries with query parameters for
a valid block device mapping.
:param mapping: List of dictionaries with the drive layout
:type mapping: ``list`` or ``dict``
:return: Dictionary representation of the drive mapping
:rtype: ``dict``
"""
if not isinstance(block_device_mapping, (list, tuple)):
raise AttributeError(
'block_device_mapping not list or tuple')
params = {}
for idx, mapping in enumerate(block_device_mapping):
idx += 1 # We want 1-based indexes
if not isinstance(mapping, dict):
raise AttributeError(
'mapping %s in block_device_mapping '
'not a dict' % mapping)
for k, v in mapping.items():
if not isinstance(v, dict):
params['BlockDeviceMapping.%d.%s' % (idx, k)] = str(v)
else:
for key, value in v.items():
params['BlockDeviceMapping.%d.%s.%s'
% (idx, k, key)] = str(value)
return params
def _get_common_security_group_params(self, group_id, protocol,
from_port, to_port, cidr_ips,
group_pairs):
"""
Return a dictionary with common query parameters which are used when
operating on security groups.
:rtype: ``dict``
"""
params = {'GroupId': group_id,
'IpPermissions.1.IpProtocol': protocol,
'IpPermissions.1.FromPort': from_port,
'IpPermissions.1.ToPort': to_port}
if cidr_ips is not None:
ip_ranges = {}
for index, cidr_ip in enumerate(cidr_ips):
index += 1
ip_ranges['IpPermissions.1.IpRanges.%s.CidrIp'
% (index)] = cidr_ip
params.update(ip_ranges)
if group_pairs is not None:
user_groups = {}
for index, group_pair in enumerate(group_pairs):
index += 1
if 'group_id' in group_pair.keys():
user_groups['IpPermissions.1.Groups.%s.GroupId'
% (index)] = group_pair['group_id']
if 'group_name' in group_pair.keys():
user_groups['IpPermissions.1.Groups.%s.GroupName'
% (index)] = group_pair['group_name']
if 'user_id' in group_pair.keys():
user_groups['IpPermissions.1.Groups.%s.UserId'
% (index)] = group_pair['user_id']
params.update(user_groups)
return params
def _get_security_groups(self, element):
"""
Parse security groups from the provided element and return a
list of security groups with the id ane name key/value pairs.
:rtype: ``list`` of ``dict``
"""
groups = []
for item in findall(element=element,
xpath='groupSet/item',
namespace=NAMESPACE):
groups.append({
'group_id': findtext(element=item,
xpath='groupId',
namespace=NAMESPACE),
'group_name': findtext(element=item,
xpath='groupName',
namespace=NAMESPACE)
})
return groups
def _build_filters(self, filters):
"""
Return a dictionary with filter query parameters which are used when
listing networks, security groups, etc.
:param filters: Dict of filter names and filter values
:type filters: ``dict``
:rtype: ``dict``
"""
filter_entries = {}
for filter_idx, filter_data in enumerate(filters.items()):
filter_idx += 1 # We want 1-based indexes
filter_name, filter_values = filter_data
filter_key = 'Filter.%s.Name' % (filter_idx)
filter_entries[filter_key] = filter_name
if isinstance(filter_values, list):
for value_idx, value in enumerate(filter_values):
value_idx += 1 # We want 1-based indexes
value_key = 'Filter.%s.Value.%s' % (filter_idx,
value_idx)
filter_entries[value_key] = value
else:
value_key = 'Filter.%s.Value.1' % (filter_idx)
filter_entries[value_key] = filter_values
return filter_entries
class EC2NodeDriver(BaseEC2NodeDriver):
"""
Amazon EC2 node driver.
"""
connectionCls = EC2Connection
type = Provider.EC2
name = 'Amazon EC2'
website = 'http://aws.amazon.com/ec2/'
path = '/'
NODE_STATE_MAP = {
'pending': NodeState.PENDING,
'running': NodeState.RUNNING,
'shutting-down': NodeState.UNKNOWN,
'terminated': NodeState.TERMINATED,
'stopped': NodeState.STOPPED
}
def __init__(self, key, secret=None, secure=True, host=None, port=None,
region='us-east-1', **kwargs):
if hasattr(self, '_region'):
region = self._region
if region not in VALID_EC2_REGIONS:
raise ValueError('Invalid region: %s' % (region))
details = REGION_DETAILS[region]
self.region_name = region
self.api_name = details['api_name']
self.country = details['country']
self.signature_version = details.get('signature_version',
DEFAULT_SIGNATURE_VERSION)
host = host or details['endpoint']
super(EC2NodeDriver, self).__init__(key=key, secret=secret,
secure=secure, host=host,
port=port, **kwargs)
class IdempotentParamError(LibcloudError):
"""
Request used the same client token as a previous,
but non-identical request.
"""
def __str__(self):
return repr(self.value)
class EC2EUNodeDriver(EC2NodeDriver):
"""
Driver class for EC2 in the Western Europe Region.
"""
name = 'Amazon EC2 (eu-west-1)'
_region = 'eu-west-1'
class EC2USWestNodeDriver(EC2NodeDriver):
"""
Driver class for EC2 in the Western US Region
"""
name = 'Amazon EC2 (us-west-1)'
_region = 'us-west-1'
class EC2USWestOregonNodeDriver(EC2NodeDriver):
"""
Driver class for EC2 in the US West Oregon region.
"""
name = 'Amazon EC2 (us-west-2)'
_region = 'us-west-2'
class EC2APSENodeDriver(EC2NodeDriver):
"""
Driver class for EC2 in the Southeast Asia Pacific Region.
"""
name = 'Amazon EC2 (ap-southeast-1)'
_region = 'ap-southeast-1'
class EC2APNENodeDriver(EC2NodeDriver):
"""
Driver class for EC2 in the Northeast Asia Pacific Region.
"""
name = 'Amazon EC2 (ap-northeast-1)'
_region = 'ap-northeast-1'
class EC2SAEastNodeDriver(EC2NodeDriver):
"""
Driver class for EC2 in the South America (Sao Paulo) Region.
"""
name = 'Amazon EC2 (sa-east-1)'
_region = 'sa-east-1'
class EC2APSESydneyNodeDriver(EC2NodeDriver):
"""
Driver class for EC2 in the Southeast Asia Pacific (Sydney) Region.
"""
name = 'Amazon EC2 (ap-southeast-2)'
_region = 'ap-southeast-2'
class EucConnection(EC2Connection):
"""
Connection class for Eucalyptus
"""
host = None
class EucNodeDriver(BaseEC2NodeDriver):
"""
Driver class for Eucalyptus
"""
name = 'Eucalyptus'
website = 'http://www.eucalyptus.com/'
api_name = 'ec2_us_east'
region_name = 'us-east-1'
connectionCls = EucConnection
signature_version = '2'
def __init__(self, key, secret=None, secure=True, host=None,
path=None, port=None, api_version=DEFAULT_EUCA_API_VERSION):
"""
@inherits: :class:`EC2NodeDriver.__init__`
:param path: The host where the API can be reached.
:type path: ``str``
:param api_version: The API version to extend support for
Eucalyptus proprietary API calls
:type api_version: ``str``
"""
super(EucNodeDriver, self).__init__(key, secret, secure, host, port)
if path is None:
path = '/services/Eucalyptus'
self.path = path
self.EUCA_NAMESPACE = 'http://msgs.eucalyptus.com/%s' % (api_version)
def list_locations(self):
raise NotImplementedError(
'list_locations not implemented for this driver')
def _to_sizes(self, response):
return [self._to_size(el) for el in response.findall(
fixxpath(xpath='instanceTypeDetails/item',
namespace=self.EUCA_NAMESPACE))]
def _to_size(self, el):
name = findtext(element=el,
xpath='name',
namespace=self.EUCA_NAMESPACE)
cpu = findtext(element=el,
xpath='cpu',
namespace=self.EUCA_NAMESPACE)
disk = findtext(element=el,
xpath='disk',
namespace=self.EUCA_NAMESPACE)
memory = findtext(element=el,
xpath='memory',
namespace=self.EUCA_NAMESPACE)
return NodeSize(id=name,
name=name,
ram=int(memory),
disk=int(disk),
bandwidth=None,
price=None,
driver=EucNodeDriver,
extra={
'cpu': int(cpu)
})
def list_sizes(self):
"""
List available instance flavors/sizes
:rtype: ``list`` of :class:`NodeSize`
"""
params = {'Action': 'DescribeInstanceTypes'}
response = self.connection.request(self.path, params=params).object
return self._to_sizes(response)
def _add_instance_filter(self, params, node):
"""
Eucalyptus driver doesn't support filtering on instance id so this is a
no-op.
"""
pass
class NimbusConnection(EC2Connection):
"""
Connection class for Nimbus
"""
host = None
class NimbusNodeDriver(BaseEC2NodeDriver):
"""
Driver class for Nimbus
"""
type = Provider.NIMBUS
name = 'Nimbus'
website = 'http://www.nimbusproject.org/'
country = 'Private'
api_name = 'nimbus'
region_name = 'nimbus'
friendly_name = 'Nimbus Private Cloud'
connectionCls = NimbusConnection
signature_version = '2'
def ex_describe_addresses(self, nodes):
"""
Nimbus doesn't support elastic IPs, so this is a pass-through.
@inherits: :class:`EC2NodeDriver.ex_describe_addresses`
"""
nodes_elastic_ip_mappings = {}
for node in nodes:
# empty list per node
nodes_elastic_ip_mappings[node.id] = []
return nodes_elastic_ip_mappings
def ex_create_tags(self, resource, tags):
"""
Nimbus doesn't support creating tags, so this is a pass-through.
@inherits: :class:`EC2NodeDriver.ex_create_tags`
"""
pass
class OutscaleConnection(EC2Connection):
"""
Connection class for Outscale
"""
host = None
class OutscaleNodeDriver(BaseEC2NodeDriver):
"""
Base Outscale FCU node driver.
Outscale per provider driver classes inherit from it.
"""
connectionCls = OutscaleConnection
name = 'Outscale'
website = 'http://www.outscale.com'
path = '/'
signature_version = '2'
NODE_STATE_MAP = {
'pending': NodeState.PENDING,
'running': NodeState.RUNNING,
'shutting-down': NodeState.UNKNOWN,
'terminated': NodeState.TERMINATED,
'stopped': NodeState.STOPPED
}
def __init__(self, key, secret=None, secure=True, host=None, port=None,
region='us-east-1', region_details=None, **kwargs):
if hasattr(self, '_region'):
region = self._region
if region_details is None:
raise ValueError('Invalid region_details argument')
if region not in region_details.keys():
raise ValueError('Invalid region: %s' % (region))
self.region_name = region
self.region_details = region_details
details = self.region_details[region]
self.api_name = details['api_name']
self.country = details['country']
self.connectionCls.host = details['endpoint']
self._not_implemented_msg =\
'This method is not supported in the Outscale driver'
super(BaseEC2NodeDriver, self).__init__(key=key, secret=secret,
secure=secure, host=host,
port=port, **kwargs)
def create_node(self, **kwargs):
"""
Create a new Outscale node. The ex_iamprofile keyword is not supported.
@inherits: :class:`BaseEC2NodeDriver.create_node`
:keyword ex_keyname: The name of the key pair
:type ex_keyname: ``str``
:keyword ex_userdata: User data
:type ex_userdata: ``str``
:keyword ex_security_groups: A list of names of security groups to
assign to the node.
:type ex_security_groups: ``list``
:keyword ex_metadata: Key/Value metadata to associate with a node
:type ex_metadata: ``dict``
:keyword ex_mincount: Minimum number of instances to launch
:type ex_mincount: ``int``
:keyword ex_maxcount: Maximum number of instances to launch
:type ex_maxcount: ``int``
:keyword ex_clienttoken: Unique identifier to ensure idempotency
:type ex_clienttoken: ``str``
:keyword ex_blockdevicemappings: ``list`` of ``dict`` block device
mappings.
:type ex_blockdevicemappings: ``list`` of ``dict``
:keyword ex_ebs_optimized: EBS-Optimized if True
:type ex_ebs_optimized: ``bool``
"""
if 'ex_iamprofile' in kwargs:
raise NotImplementedError("ex_iamprofile not implemented")
return super(OutscaleNodeDriver, self).create_node(**kwargs)
def ex_create_network(self, cidr_block, name=None):
"""
Create a network/VPC. Outscale does not support instance_tenancy.
:param cidr_block: The CIDR block assigned to the network
:type cidr_block: ``str``
:param name: An optional name for the network
:type name: ``str``
:return: Dictionary of network properties
:rtype: ``dict``
"""
return super(OutscaleNodeDriver, self).ex_create_network(cidr_block,
name=name)
def ex_modify_instance_attribute(self, node, disable_api_termination=None,
ebs_optimized=None, group_id=None,
source_dest_check=None, user_data=None,
instance_type=None):
"""
Modify node attributes.
Ouscale support the following attributes:
'DisableApiTermination.Value', 'EbsOptimized', 'GroupId.n',
'SourceDestCheck.Value', 'UserData.Value',
'InstanceType.Value'
:param node: Node instance
:type node: :class:`Node`
:param attributes: Dictionary with node attributes
:type attributes: ``dict``
:return: True on success, False otherwise.
:rtype: ``bool``
"""
attributes = {}
if disable_api_termination is not None:
attributes['DisableApiTermination.Value'] = disable_api_termination
if ebs_optimized is not None:
attributes['EbsOptimized'] = ebs_optimized
if group_id is not None:
attributes['GroupId.n'] = group_id
if source_dest_check is not None:
attributes['SourceDestCheck.Value'] = source_dest_check
if user_data is not None:
attributes['UserData.Value'] = user_data
if instance_type is not None:
attributes['InstanceType.Value'] = instance_type
return super(OutscaleNodeDriver, self).ex_modify_instance_attribute(
node, attributes)
def ex_register_image(self, name, description=None, architecture=None,
root_device_name=None, block_device_mapping=None):
"""
Registers a Machine Image based off of an EBS-backed instance.
Can also be used to create images from snapshots.
Outscale does not support image_location, kernel_id and ramdisk_id.
:param name: The name for the AMI being registered
:type name: ``str``
:param description: The description of the AMI (optional)
:type description: ``str``
:param architecture: The architecture of the AMI (i386/x86_64)
(optional)
:type architecture: ``str``
:param root_device_name: The device name for the root device
Required if registering an EBS-backed AMI
:type root_device_name: ``str``
:param block_device_mapping: A dictionary of the disk layout
(optional)
:type block_device_mapping: ``dict``
:rtype: :class:`NodeImage`
"""
return super(OutscaleNodeDriver, self).ex_register_image(
name, description=description, architecture=architecture,
root_device_name=root_device_name,
block_device_mapping=block_device_mapping)
def ex_copy_image(self, source_region, image, name=None, description=None):
"""
Outscale does not support copying images.
@inherits: :class:`EC2NodeDriver.ex_copy_image`
"""
raise NotImplementedError(self._not_implemented_msg)
def ex_get_limits(self):
"""
Outscale does not support getting limits.
@inherits: :class:`EC2NodeDriver.ex_get_limits`
"""
raise NotImplementedError(self._not_implemented_msg)
def ex_create_network_interface(self, subnet, name=None,
description=None,
private_ip_address=None):
"""
Outscale does not support creating a network interface within a VPC.
@inherits: :class:`EC2NodeDriver.ex_create_network_interface`
"""
raise NotImplementedError(self._not_implemented_msg)
def ex_delete_network_interface(self, network_interface):
"""
Outscale does not support deleting a network interface within a VPC.
@inherits: :class:`EC2NodeDriver.ex_delete_network_interface`
"""
raise NotImplementedError(self._not_implemented_msg)
def ex_attach_network_interface_to_node(self, network_interface,
node, device_index):
"""
Outscale does not support attaching a network interface.
@inherits: :class:`EC2NodeDriver.ex_attach_network_interface_to_node`
"""
raise NotImplementedError(self._not_implemented_msg)
def ex_detach_network_interface(self, attachment_id, force=False):
"""
Outscale does not support detaching a network interface
@inherits: :class:`EC2NodeDriver.ex_detach_network_interface`
"""
raise NotImplementedError(self._not_implemented_msg)
def list_sizes(self, location=None):
"""
List available instance flavors/sizes
This override the EC2 default method in order to use Outscale infos.
:rtype: ``list`` of :class:`NodeSize`
"""
available_types =\
self.region_details[self.region_name]['instance_types']
sizes = []
for instance_type in available_types:
attributes = OUTSCALE_INSTANCE_TYPES[instance_type]
attributes = copy.deepcopy(attributes)
price = self._get_size_price(size_id=instance_type)
attributes.update({'price': price})
sizes.append(NodeSize(driver=self, **attributes))
return sizes
class OutscaleSASNodeDriver(OutscaleNodeDriver):
"""
Outscale SAS node driver
"""
name = 'Outscale SAS'
type = Provider.OUTSCALE_SAS
def __init__(self, key, secret=None, secure=True, host=None, port=None,
region='us-east-1', region_details=None, **kwargs):
super(OutscaleSASNodeDriver, self).__init__(
key=key, secret=secret, secure=secure, host=host, port=port,
region=region, region_details=OUTSCALE_SAS_REGION_DETAILS,
**kwargs)
class OutscaleINCNodeDriver(OutscaleNodeDriver):
"""
Outscale INC node driver
"""
name = 'Outscale INC'
type = Provider.OUTSCALE_INC
def __init__(self, key, secret=None, secure=True, host=None, port=None,
region='us-east-1', region_details=None, **kwargs):
super(OutscaleINCNodeDriver, self).__init__(
key=key, secret=secret, secure=secure, host=host, port=port,
region=region, region_details=OUTSCALE_INC_REGION_DETAILS,
**kwargs)
| 33.283138
| 80
| 0.517075
|
d463f75ac362815da409ada6cf1ef513d3f08fcc
| 1,647
|
py
|
Python
|
torcharrow/benchmark/benchmark_vmap.py
|
wenleix/torcharrow-1
|
9c398a361156973e29be868520ea9278db00ea52
|
[
"BSD-3-Clause"
] | null | null | null |
torcharrow/benchmark/benchmark_vmap.py
|
wenleix/torcharrow-1
|
9c398a361156973e29be868520ea9278db00ea52
|
[
"BSD-3-Clause"
] | null | null | null |
torcharrow/benchmark/benchmark_vmap.py
|
wenleix/torcharrow-1
|
9c398a361156973e29be868520ea9278db00ea52
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
import timeit
import torcharrow as ta
import torcharrow.dtypes as dt
def prepare_list_int_col():
elements = []
for i in range(1, 101):
element = []
for j in range(1, i // 10 + 2):
element.append(i * j)
elements.append(element)
return ta.from_pylist(elements, dtype=dt.List(dt.int64), device="cpu")
def list_map_int(col: ta.IColumn):
return col.list.map(lambda val: val + 1)
def list_vmap_int(col: ta.IColumn):
return col.list.vmap(lambda col: col + 1)
def prepare_list_str_col():
elements = []
for i in range(1, 101):
element = []
for j in range(1, i // 10 + 2):
element.append(f"str{i}_{j}")
elements.append(element)
return ta.from_pylist(elements, dtype=dt.List(dt.string), device="cpu")
def list_map_str(col: ta.IColumn):
return col.list.map(lambda val: val + "_1")
def list_vmap_str(col: ta.IColumn):
return col.list.vmap(lambda col: col + "_1")
if __name__ == "__main__":
import timeit
print("Benchmark list[int] column:")
col1 = prepare_list_int_col()
print(
"list.map: " + str(timeit.timeit(stmt=lambda: list_map_int(col1), number=100))
)
print(
"list.vmap: " + str(timeit.timeit(stmt=lambda: list_vmap_int(col1), number=100))
)
print("Benchmark list[str] column:")
col2 = prepare_list_str_col()
print(
"list.map: " + str(timeit.timeit(stmt=lambda: list_map_str(col2), number=100))
)
print(
"list.vmap: " + str(timeit.timeit(stmt=lambda: list_vmap_str(col2), number=100))
)
| 24.954545
| 88
| 0.627201
|
fd8c96cd4dae248a63a11c360fb26b43934b97fa
| 9,261
|
py
|
Python
|
services/api-server/tests/unit/fakes/jobs_faker.py
|
colinRawlings/osparc-simcore
|
bf2f18d5bc1e574d5f4c238d08ad15156184c310
|
[
"MIT"
] | 25
|
2018-04-13T12:44:12.000Z
|
2022-03-12T15:01:17.000Z
|
services/api-server/tests/unit/fakes/jobs_faker.py
|
colinRawlings/osparc-simcore
|
bf2f18d5bc1e574d5f4c238d08ad15156184c310
|
[
"MIT"
] | 2,553
|
2018-01-18T17:11:55.000Z
|
2022-03-31T16:26:40.000Z
|
services/api-server/tests/unit/fakes/jobs_faker.py
|
mrnicegyu11/osparc-simcore
|
b6fa6c245dbfbc18cc74a387111a52de9b05d1f4
|
[
"MIT"
] | 20
|
2018-01-18T19:45:33.000Z
|
2022-03-29T07:08:47.000Z
|
"""
Fakes the implementation of some some kind of queueing and
scheduling of jobs
"""
import asyncio
import hashlib
import logging
import random
from contextlib import contextmanager
from dataclasses import dataclass, field
from datetime import datetime
from typing import Callable, Dict, Iterator
from uuid import UUID
from fastapi import HTTPException
from simcore_service_api_server.models.api_resources import (
RelativeResourceName,
compose_resource_name,
)
from simcore_service_api_server.models.schemas.files import File
from simcore_service_api_server.models.schemas.jobs import (
Job,
JobInputs,
JobOutputs,
JobStatus,
TaskStates,
)
from simcore_service_api_server.models.schemas.solvers import SolverKeyId, VersionStr
from starlette import status
logger = logging.getLogger(__name__)
JobName = RelativeResourceName
@dataclass
class JobsFaker:
# Fakes jobs managements for a given user
#
jobs: Dict[JobName, Job] = field(default_factory=dict)
job_status: Dict[UUID, JobStatus] = field(default_factory=dict)
job_inputs: Dict[UUID, JobInputs] = field(default_factory=dict)
job_tasks: Dict[UUID, asyncio.Future] = field(default_factory=dict)
job_outputs: Dict[UUID, JobOutputs] = field(default_factory=dict)
def job_values(self, solver_name: str = None) -> Iterator[Job]:
if solver_name:
for job in self.jobs.values():
if job.runner_name == solver_name:
yield job
else:
for job in self.jobs.values():
yield job
def create_job(self, solver_name: str, inputs: JobInputs) -> Job:
# NOTE: how can be sure that inputs.json() will be identical everytime? str
# representation might truncate e.g. a number which does not guarantee
# in all cases that is the same!?
inputs_checksum = hashlib.sha256(
" ".join(inputs.json()).encode("utf-8")
).hexdigest()
job = Job.create_now(solver_name, inputs_checksum)
self.jobs[job.name] = job
self.job_inputs[job.id] = inputs
return job
def start_job(self, job_name: JobName) -> JobStatus:
# check job was created?
job = self.jobs[job_name]
# why not getting inputs from here?
inputs = self.job_inputs[job.id]
job_status = self.job_status.get(job.id)
if not job_status:
job_status = JobStatus(
job_id=job.id,
state=TaskStates.UNKNOWN,
progress=0,
submitted_at=datetime.utcnow(),
)
self.job_status[job.id] = job_status
self.job_tasks[job.id] = asyncio.ensure_future(
self._start_job_task(job.id, inputs)
)
return self.job_status[job.id]
async def _start_job_task(self, job_id, inputs):
MOCK_PULLING_TIME = 1, 2
MOCK_PENDING_TIME = 1, 3
MOCK_RUNNING_TIME = 1, 5 + len(inputs.values)
job_status = self.job_status[job_id]
# Fake results ready
outputs = JobOutputs(
job_id=job_id,
results={
"output_1": File(
filename="file_with_int.txt",
id="1460b7c8-70d7-42ee-9eb2-e2de2a9b7b37",
),
"output_2": 42,
},
)
try:
await asyncio.sleep(random.randint(*MOCK_PULLING_TIME)) # nosec
job_status.state = TaskStates.PENDING
logger.info(job_status)
await asyncio.sleep(random.randint(*MOCK_PENDING_TIME)) # nosec
# -------------------------------------------------
job_status.state = TaskStates.STARTED
job_status.take_snapshot("started")
logger.info(job_status)
job_status.progress = 0
for n in range(100):
await asyncio.sleep(random.randint(*MOCK_RUNNING_TIME) / 100.0) # nosec
job_status.progress = n + 1
# NOTE: types of solvers
# - all outputs at once or output completion
# - can pause and resume or not
#
# -------------------------------------------------
done_states = [TaskStates.SUCCESS, TaskStates.FAILED]
job_status.state = random.choice(done_states) # nosec
job_status.progress = 100
job_status.take_snapshot("stopped")
logger.info(job_status)
if job_status.state == TaskStates.FAILED:
failed = random.choice(list(outputs.results.keys()))
outputs.results[failed] = None
# similar to ValidatinError? For one field or generic job error?
except asyncio.CancelledError:
logging.debug("Task for job %s was cancelled", job_id)
job_status.state = TaskStates.FAILED
job_status.take_snapshot("stopped")
# all fail
for name in outputs.results:
outputs.results[name] = None
except Exception: # pylint: disable=broad-except
logger.exception("Job %s failed", job_id)
job_status.state = TaskStates.FAILED
job_status.take_snapshot("stopped")
for name in outputs.results:
outputs.results[name] = None
finally:
self.job_outputs[job_id] = outputs
def stop_job(self, job_name) -> Job:
job = self.jobs[job_name]
try:
task = self.job_tasks[job.id]
if not task.cancelled():
task.cancel()
except KeyError:
logger.warning("Stopping job {job_id} that was never started")
return job
the_fake_impl = JobsFaker()
#####################################################
# */jobs/* API FAKE implementations
#
@contextmanager
def resource_context(**kwargs):
args = []
for key, value in kwargs.items():
args += [str(key), value]
resource_name = compose_resource_name(*args)
resource_display_name = args[-2]
try:
yield resource_name
except KeyError as err:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"{resource_display_name.capitalize().rstrip('s')} {resource_name} does not exists",
) from err
def _copy_n_update(job: Job, url_for, solver_key, version):
return job.copy(
update={
"url": url_for(
"get_job", solver_key=solver_key, version=version, job_id=job.id
),
"runner_url": url_for(
"get_solver_release",
solver_key=solver_key,
version=version,
),
"outputs_url": url_for(
"get_job_outputs",
solver_key=solver_key,
version=version,
job_id=job.id,
),
}
)
async def list_jobs_impl(
solver_key: SolverKeyId,
version: VersionStr,
url_for: Callable,
):
solver_resource_name = f"solvers/{solver_key}/releases/{version}"
return [
_copy_n_update(job, url_for, solver_key, version)
for job in the_fake_impl.job_values(solver_resource_name)
]
async def create_job_impl(
solver_key: SolverKeyId,
version: VersionStr,
inputs: JobInputs,
url_for: Callable,
):
"""Creates a job for a solver with given inputs.
NOTE: This operation does **not** start the job
"""
with resource_context(solvers=solver_key, releases=version) as solver_name:
job = the_fake_impl.create_job(solver_name, inputs)
return _copy_n_update(job, url_for, solver_key, version)
async def get_job_impl(
solver_key: SolverKeyId,
version: VersionStr,
job_id: UUID,
url_for: Callable,
):
with resource_context(
solvers=solver_key, releases=version, jobs=job_id
) as job_name:
job = the_fake_impl.jobs[job_name]
return _copy_n_update(job, url_for, solver_key, version)
async def start_job_impl(solver_key: SolverKeyId, version: VersionStr, job_id: UUID):
with resource_context(
solvers=solver_key, releases=version, jobs=job_id
) as job_name:
job_state = the_fake_impl.start_job(job_name)
return job_state
async def stop_job_impl(
solver_key: SolverKeyId,
version: VersionStr,
job_id: UUID,
url_for: Callable,
):
with resource_context(
solvers=solver_key, releases=version, jobs=job_id
) as job_name:
job = the_fake_impl.stop_job(job_name)
return _copy_n_update(job, url_for, solver_key, version)
async def inspect_job_impl(solver_key: SolverKeyId, version: VersionStr, job_id: UUID):
with resource_context(solvers=solver_key, releases=version, jobs=job_id):
# here we should use job_name ..
state = the_fake_impl.job_status[job_id]
return state
async def get_job_outputs_impl(
solver_key: SolverKeyId, version: VersionStr, job_id: UUID
):
with resource_context(
solvers=solver_key, releases=version, jobs=job_id, outputs="*"
):
outputs: JobOutputs = the_fake_impl.job_outputs[job_id]
return outputs
| 30.87
| 103
| 0.615592
|
f1442e7f936e7e4473ea9e64e24df2ec6a8f7250
| 2,996
|
py
|
Python
|
notebooks/nb066x_workshop_pandas.py
|
hoelzl/ML-Course
|
efa7ccb7c6583753675bbcda569d3184d1ca98d2
|
[
"MIT"
] | 1
|
2022-03-02T15:59:11.000Z
|
2022-03-02T15:59:11.000Z
|
notebooks/nb066z_solution_pandas.py
|
hoelzl/ML-Course
|
efa7ccb7c6583753675bbcda569d3184d1ca98d2
|
[
"MIT"
] | null | null | null |
notebooks/nb066z_solution_pandas.py
|
hoelzl/ML-Course
|
efa7ccb7c6583753675bbcda569d3184d1ca98d2
|
[
"MIT"
] | null | null | null |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.7
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# Importieren Sie Pandas und lesen Sie die Datei `population_by_county.csv` in einen Data Frame `population`
# %%
import pandas as pd
# %%
population = pd.read_csv('population_by_county.csv')
# %% [markdown]
# Zeigen Sie die ersten Zeilen des `population` Data Frames an.
# %%
population.head()
# %% [markdown]
# Bestimmen Sie die Spaltennamen von `population`.
# %%
population.columns
# %% [markdown]
# Welche Staten werden in der Tabelle erfasst? Wie viele Staten sind das?
# %%
population['State'].unique()
# %%
population['State'].nunique()
# %% [markdown]
# Welches sind die 10 häufigsten County-Namen in diesem Datensatz?
# %%
population['County'].value_counts()[0:10]
# %% [markdown]
# Welches sind die 10 bevölkerungsreichsten Staaten nach der Schätzung 2017?
#
# *Hinweis:* Verwenden Sie Hilfsvariablen um Zwischenergebnisse auszuwerten. Mit `Shift-Tab` können Sie sehen, welche Parameter die verwendeten Funktionen akzeptieren.
# %%
population_per_state = population.groupby('State').sum()['2017PopEstimate']
# %%
population_per_state.sort_values(ascending=False)[0:10]
# %%
# Alternativ:
population_per_state.sort_values()[-10:]
# %% [markdown]
# Wie viele Staaten haben nach der Schätzung 2017 mehr als 1 Million Einwohner?
# %%
population_per_state[population_per_state > 1_000_000].count()
# %% [markdown]
# Wie viele Counties haben das Wort "County" in ihrem Namen?
# %%
population['County'].apply(lambda n: 'County' in n).sum()
# %% [markdown]
# Berechnen Sie, als Vorbereitung für die nächste Frage, die proportionale Änderung (d.h., 1.0 entspricht keiner Änderung) zwischen der Volkszählung von 2010 und der Schätzung von 2017 für alle Counties.
# %%
(population['2017PopEstimate'] - population['2010Census']) / population['2010Census']
# %% [markdown]
# Fügen Sie eine Spalte `PercentChange` zu `population` hinzu, die die prozentuale Änderung zwischen der Volkszählung von 2010 und der Schätzung von 2017 angibt. (D.h. 100 entspricht keiner Änderung)
# %%
population['PercentChange'] = 100 * (population['2017PopEstimate'] - population['2010Census']) / population['2010Census']
# %%
population.head()
# %% [markdown]
# Welche 10 Staaten haben die höchste prozentuelle Änderung?
#
# *Hinweis:* Speichern Sie die pro Staat gruppierte Bevölkerung in einer neuen Tabelle und verwenden Sie diese zur Berechnung.
# %%
pop_by_state = population.groupby('State').sum()
# %%
del pop_by_state['PercentChange']
# %%
pop_by_state.head()
# %%
pop_by_state['PercentChange'] = 100 * (pop_by_state['2017PopEstimate'] - pop_by_state['2010Census']) / pop_by_state['2010Census']
# %%
pop_by_state.head()
# %%
pop_by_state.sort_values('PercentChange', ascending=False)[0:10]
# %%
| 25.606838
| 203
| 0.716956
|
228cfc8532b49503f3034060ff8d7c3338a43f68
| 9,525
|
py
|
Python
|
pollbot/telegram/callback_handler/vote.py
|
3wille/ultimate-poll-bot
|
7a99659df463a891b20a1ab424665cd84d4242b4
|
[
"MIT"
] | null | null | null |
pollbot/telegram/callback_handler/vote.py
|
3wille/ultimate-poll-bot
|
7a99659df463a891b20a1ab424665cd84d4242b4
|
[
"MIT"
] | null | null | null |
pollbot/telegram/callback_handler/vote.py
|
3wille/ultimate-poll-bot
|
7a99659df463a891b20a1ab424665cd84d4242b4
|
[
"MIT"
] | null | null | null |
"""Callback functions needed during creation of a Poll."""
from sqlalchemy import func
from sqlalchemy.exc import IntegrityError
from pollbot.i18n import i18n
from pollbot.helper import poll_allows_cumulative_votes
from pollbot.helper.enums import PollType, CallbackResult
from pollbot.helper.update import update_poll_messages
from pollbot.models import PollOption, Vote
def handle_vote(session, context):
"""Handle any clicks on vote buttons."""
# Remove the poll, in case it got deleted, but we didn't manage to kill all references
option = session.query(PollOption).get(context.payload)
if option is None:
if context.query.message is not None:
context.query.message.edit_text(i18n.t('deleted.polls', locale=context.user.locale))
else:
context.bot.edit_message_text(
i18n.t('deleted.polls', locale=context.user.locale),
inline_message_id=context.query.inline_message_id,
)
return
poll = option.poll
try:
# Single vote
if poll.poll_type == PollType.single_vote.name:
update_poll = handle_single_vote(session, context, option)
# Block vote
elif poll.poll_type == PollType.block_vote.name:
update_poll = handle_block_vote(session, context, option)
# Limited vote
elif poll.poll_type == PollType.limited_vote.name:
update_poll = handle_limited_vote(session, context, option)
# Cumulative vote
elif poll.poll_type == PollType.cumulative_vote.name:
update_poll = handle_cumulative_vote(session, context, option)
elif poll.poll_type == PollType.count_vote.name:
update_poll = handle_cumulative_vote(session, context, option, unlimited=True)
elif poll.poll_type == PollType.doodle.name:
update_poll = handle_doodle_vote(session, context, option)
else:
raise Exception("Unknown poll type")
except IntegrityError:
# Double vote. Rollback the transaction and ignore the second vote
session.rollback()
return
session.commit()
if update_poll:
update_poll_messages(session, context.bot, poll)
def respond_to_vote(session, line, context, poll, total_vote_count=None, limited=False):
"""Get the formatted response for a user."""
locale = poll.locale
votes = session.query(Vote) \
.filter(Vote.user == context.user) \
.filter(Vote.poll == poll) \
.all()
if limited:
line += i18n.t('callback.vote.votes_left', locale=locale, count=total_vote_count)
lines = [line]
lines.append(i18n.t('callback.vote.your_votes', locale=locale, count=total_vote_count))
for vote in votes:
if poll_allows_cumulative_votes(poll):
lines.append(f' {vote.poll_option.name} ({vote.vote_count}), ')
else:
lines.append(f' {vote.poll_option.name}')
message = ''.join(lines)
context.query.answer(message)
def handle_single_vote(session, context, option):
"""Handle a single vote."""
locale = option.poll.locale
existing_vote = session.query(Vote) \
.filter(Vote.poll == option.poll) \
.filter(Vote.user == context.user) \
.one_or_none()
# Changed vote
if existing_vote and existing_vote.poll_option != option:
existing_vote.poll_option = option
vote_changed = i18n.t('callback.vote.changed', locale=locale)
respond_to_vote(session, vote_changed, context, option.poll)
# Voted for the same thing again
elif existing_vote and existing_vote.poll_option == option:
session.delete(existing_vote)
vote_removed = i18n.t('callback.vote.removed', locale=locale)
context.query.answer(vote_removed)
# First vote on this poll
elif existing_vote is None:
vote = Vote(context.user, option)
session.add(vote)
vote_registered = i18n.t('callback.vote.registered', locale=locale)
respond_to_vote(session, vote_registered, context, option.poll)
return True
def handle_block_vote(session, context, option):
"""Handle a block vote."""
locale = option.poll.locale
existing_vote = session.query(Vote) \
.filter(Vote.poll_option == option) \
.filter(Vote.user == context.user) \
.one_or_none()
# Remove vote
if existing_vote:
session.delete(existing_vote)
vote_removed = i18n.t('callback.vote.removed', locale=locale)
respond_to_vote(session, vote_removed, context, option.poll)
# Add vote
elif existing_vote is None:
vote = Vote(context.user, option)
session.add(vote)
vote_registered = i18n.t('callback.vote.registered', locale=locale)
respond_to_vote(session, vote_registered, context, option.poll)
return True
def handle_limited_vote(session, context, option):
"""Handle a limited vote."""
locale = option.poll.locale
existing_vote = session.query(Vote) \
.filter(Vote.poll_option == option) \
.filter(Vote.user == context.user) \
.one_or_none()
vote_count = session.query(Vote) \
.filter(Vote.poll == option.poll) \
.filter(Vote.user == context.user) \
.count()
# Remove vote
if existing_vote:
session.delete(existing_vote)
vote_removed = i18n.t('callback.vote.removed', locale=locale)
respond_to_vote(session, vote_removed, context, option.poll, vote_count-1, True)
# Add vote
elif existing_vote is None and vote_count < option.poll.number_of_votes:
vote = Vote(context.user, option)
session.add(vote)
vote_registered = i18n.t('callback.vote.registered', locale=locale)
respond_to_vote(session, vote_registered, context, option.poll, vote_count+1, True)
# Max votes reached
else:
no_left = i18n.t('callback.vote.no_left', locale=locale)
respond_to_vote(session, no_left, context, option.poll)
return False
return True
def handle_cumulative_vote(session, context, option, unlimited=False):
"""Handle a cumulative vote."""
locale = option.poll.locale
existing_vote = session.query(Vote) \
.filter(Vote.poll_option == option) \
.filter(Vote.user == context.user) \
.one_or_none()
vote_count = session.query(func.sum(Vote.vote_count)) \
.filter(Vote.poll == option.poll) \
.filter(Vote.user == context.user) \
.one()
vote_count = vote_count[0]
if vote_count is None:
vote_count = 0
action = context.callback_result
allowed_votes = 10000000
if not unlimited:
allowed_votes = option.poll.number_of_votes
# Upvote, but no votes left
if not unlimited and action == CallbackResult.yes and vote_count >= allowed_votes:
no_left = i18n.t('callback.vote.no_left', locale=locale)
respond_to_vote(session, no_left, context, option.poll)
return False
# Early return if downvote on non existing vote
if existing_vote is None and action == CallbackResult.no:
respond_to_vote(session, 'Cannot downvote this option.', context, option.poll)
return False
if existing_vote:
# Add to an existing vote
if action == CallbackResult.yes:
existing_vote.vote_count += 1
session.commit()
total_vote_count = allowed_votes - (vote_count + 1)
vote_registered = i18n.t('callback.vote.registered', locale=locale)
respond_to_vote(session, vote_registered, context, option.poll, total_vote_count, True)
# Remove from existing vote
elif action == CallbackResult.no:
existing_vote.vote_count -= 1
session.commit()
total_vote_count = allowed_votes - (vote_count - 1)
vote_removed = i18n.t('callback.vote.removed', locale=locale)
respond_to_vote(session, vote_removed, context, option.poll, total_vote_count, True)
# Delete vote if necessary
if existing_vote.vote_count <= 0:
session.delete(existing_vote)
session.commit()
# Add new vote
elif existing_vote is None and action == CallbackResult.yes:
vote = Vote(context.user, option)
session.add(vote)
session.commit()
total_vote_count = allowed_votes - (vote_count + 1)
vote_registered = i18n.t('callback.vote.registered', locale=locale)
respond_to_vote(session, vote_registered, context, option.poll, total_vote_count, True)
return True
def handle_doodle_vote(session, context, option):
"""Handle a doodle vote."""
locale = option.poll.locale
vote = session.query(Vote) \
.filter(Vote.poll_option == option) \
.filter(Vote.user == context.user) \
.one_or_none()
if context.callback_result.name is None:
data = context.data # noqa
raise Exception("Unknown callback result")
# Remove vote
if vote is not None:
vote.type = context.callback_result.name
changed = i18n.t('callback.vote.doodle_changed', locale=locale, vote_type=vote.type)
context.query.answer(changed)
# Add vote
else:
vote = Vote(context.user, option)
vote.type = context.callback_result.name
session.add(vote)
registered = i18n.t('callback.vote.doodle_registered', locale=locale, vote_type=vote.type)
context.query.answer(registered)
return True
| 36.21673
| 99
| 0.663307
|
60aa8308bc22bd1f618b2d464adee4aa2a5520ed
| 23,227
|
py
|
Python
|
meiduo_mall/meiduo_mall/apps/users/views.py
|
1103928458/meiduo_drf
|
49595755f264b09ea748b4deb8a88bba5eb8557b
|
[
"MIT"
] | null | null | null |
meiduo_mall/meiduo_mall/apps/users/views.py
|
1103928458/meiduo_drf
|
49595755f264b09ea748b4deb8a88bba5eb8557b
|
[
"MIT"
] | null | null | null |
meiduo_mall/meiduo_mall/apps/users/views.py
|
1103928458/meiduo_drf
|
49595755f264b09ea748b4deb8a88bba5eb8557b
|
[
"MIT"
] | 1
|
2020-11-10T07:22:42.000Z
|
2020-11-10T07:22:42.000Z
|
from django.http import HttpResponseForbidden
from django import http
from django.shortcuts import render, redirect
import re, json
from django.contrib.auth import login, authenticate, logout, mixins
from .models import User, Address
from meiduo_mall.utils.response_code import RETCODE
from django_redis import get_redis_connection
from random import randint
from django.views import View
from celery_tasks.email.tasks import send_verify_email
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from .utils import generate_email_verify_url, check_verify_token
import logging
from goods.models import SKU
from carts.utils import merge_cart_cookie_to_redis
from .utils import get_user_by_account
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer, BadData
from celery_tasks.sms.tasks import send_sms_code
Logger = logging.getLogger("django")
class RegisterView(View):
def get(self, request):
"""提供注册页面"""
return render(request, "register.html")
def post(self, request):
"""注册公功能逻辑"""
# 1.接收前端传入的表单数据
# 用户名,密码,手机号,短信验证码,同意协议
query_dict = request.POST
username = query_dict.get("username")
password = query_dict.get("password")
password2 = query_dict.get("password2")
mobile = query_dict.get("mobile")
sms_code = query_dict.get("sms_code")
allow = query_dict.get("allow") # 单选框,如果勾选传入的就是“on”,如果没有勾选传入的是None
# 2.检验数据是否满足需求
if all([username, password, password2, mobile, sms_code, allow]) is False:
return http.HttpResponseForbidden("缺少必要参数")
if not re.match(r"^[a-zA-Z0-9_-]{8,20}$", password):
return http.HttpResponseForbidden('请输入8-20位的密码')
if password != password2:
return http.HttpResponseForbidden('两次输入的密码不一致')
if not re.match(r"^1[3-9]\d{9}$", mobile):
return http.HttpResponseForbidden('请输入正确的手机号码')
# 短信验证码后期在补充校验逻辑
redis_conn = get_redis_connection("verify_code")
sms_code_server = redis_conn.get('sms_%s' % mobile)
# 删除redis中的短信验证码,让验证码只能用一次
redis_conn.delete('sms_%s', mobile)
# 检验短信验证码是否过期
if sms_code_server is None:
return http.HttpResponseForbidden("短信验证码过期")
sms_code_server = sms_code_server.decode()
if sms_code != sms_code_server:
return http.HttpResponseForbidden('请输入正确的验证码')
# 保存数据,创建用户user
user = User.objects.create_user(username=username, password=password, mobile=mobile)
# 当用户登录后,将用户的user.id值存储到session 生成session cookie
# 记录用户的登陆状态
login(request, user)
# request.session["_auth_user_id"] = user.id
# 响应:重定向到首页
return redirect('/')
class UsernameCountView(View):
'''判断用户名是否重复注册'''
def get(self, request, username):
# 从数据库查询
count = User.objects.filter(username=username).count()
response_data = {"count": count, "code": RETCODE.OK, "errmsg": "ok"}
return http.JsonResponse(response_data)
class MobileCountView(View):
'''判断手机号是否重复注册'''
def get(self, request, mobile):
# 从数据库查询
count = User.objects.filter(mobile=mobile).count()
response_data = {"count": count, "code": RETCODE.OK, "errmsg": "ok"}
return http.JsonResponse(response_data)
class LoginView(View):
'''用户登录'''
def get(self, request):
'''展示登录界面'''
return render(request, 'login.html')
def post(self, request):
"""用户登录逻辑"""
# 1.接收表单数据
username = request.POST.get("username")
password = request.POST.get("password")
remembered = request.POST.get("remembered") # 是否记录密码 勾选on 不勾None
# 2.校验
# try:
# user = User.objects.get(username=username,password=password)
# except User.DoesNotExist:
# return
#
# if user.check_password(password) is False:
# return
# 修改配置文件---使用手机号码登录(多账号登录)
# if re.match(r"^1[3-9]\d{9}$", username):
#
# User.USERNAME_FIELD = "mobile" # 使用模型类 模型类的属性:USERNAME_FIELD
# 用户认证(django自带校验)
user = authenticate(request, username=username, password=password)
if user is None:
return render(request, "login.html", {"account_errmsg": "用户名或密码错误"})
# 3.状态保持
login(request, user)
# 如果用户没有点击记住密码
if remembered != "on":
# 此行代码实际最终讲cookie中的session设置位浏览器关闭就失效
request.session.set_expiry(0)
next = request.GET.get("next") # 尝试性去获取来源查询参数
response = redirect(next or "/")
response.set_cookie("username", user.username, max_age=3600 * 24 * 14 if remembered else None)
# 再此做合并购物车
merge_cart_cookie_to_redis(request, response)
# 4.重定向首页
return response
class LogoutView(View):
"""退出登录"""
def get(self, request):
# 1.清除状态保持信息
logout(request)
# 2.重定向到登录页面
response = redirect("/login/")
# 3.清除cookie中的username
response.delete_cookie("username")
# 4.响应
return response
# 展示用户中心第一种方法:
# class UserInfoView(View):
# """展示用户中心"""
# def get(self,request):
# """展示用户中心界面"""
# user = request.user
#
# # 如果用户登录就展示用户中心
# if user.is_authenticated: # 判断用户是否登录
# return render(request,"user_center_info.html")
#
# # 如果没有登录就展示登录页面
# else:
# return redirect("/login/?next=/info/")
# 展示用户中心第二种方法:
# class UserInfoView(View):
# """展示用户中心"""
# @method_decorator(login_required) # 判断用户有没有登录
# def get(self,request):
# """展示用户中心界面"""
#
# return render(request,"user_center_info.html")
# 展示用户中心第二种方法:
# mixins.LoginRequiredMixin 判断用户是否登录
class UserInfoView(mixins.LoginRequiredMixin, View):
"""展示用户中心"""
def get(self, request):
"""展示用户中心界面"""
return render(request, "user_center_info.html")
class EmailView(mixins.LoginRequiredMixin, View):
'''设置邮箱'''
def put(self, request):
# 1.接收数据
json_dict = json.loads(request.body.decode())
email = json_dict.get("email")
# 2.校验
if not email:
return http.HttpResponseForbidden('缺少email参数')
if not re.match(r'^[a-z0-9][\w\.\-]*@[a-z0-9\-]+(\.[a-z]{2,5}){1,2}$', email):
return http.HttpResponseForbidden('参数email有误')
# 3.修改用户的email字段
user = request.user
# user.email = email
# user.save()
User.objects.filter(username=user.username, email="").update(email=email)
# 再次给用户发一封激活邮件
verify_url = generate_email_verify_url(user) # 生成激活邮箱url
send_verify_email.delay(email, verify_url) # 使用异步任务
# 4.响应
return http.JsonResponse({"code": RETCODE.OK, "errmsg": "添加邮箱成功"})
class VerifyEmailView(View):
'''激活邮箱'''
def get(self, request):
# 接收参数中的token
token = request.GET.get("token")
# 校验
if token is None:
return http.HttpResponseForbidden("缺少token")
# 对token解密并获取user
user = check_verify_token(token)
if user is None:
return http.HttpResponseForbidden("token无效")
# 修改指定user的email_active字段
user.email_active = True
user.save()
# 响应
# 激活成功回到用户中心
return redirect("/info/")
class AddressView(mixins.LoginRequiredMixin, View):
'''展示用户收货地址'''
def get(self, request):
"""用户收货地址展示"""
# 查询当前用户的所有收货地址
user = request.user
addresses_qs = Address.objects.filter(user=user, is_deleted=False)
# 定义一个列表变量用来包装所有的收货地址字典数据
addresses = []
for address_model in addresses_qs:
addresses.append({
"id": address_model.id,
"title": address_model.title,
"receiver": address_model.receiver,
"province": address_model.province.name,
"province_id": address_model.province.id,
"city": address_model.city.name,
"city_id": address_model.city.id,
"district": address_model.district.name,
"district_id": address_model.district.id,
"place": address_model.place,
"mobile": address_model.mobile,
"tel": address_model.tel,
"email": address_model.email
})
# 准备渲染数据
context = {
"addresses": addresses,
"default_address_id": user.default_address_id
}
return render(request, "user_center_site.html", context)
class CreateAddressView(mixins.LoginRequiredMixin, View):
def post(self, request):
# 判断用户的收货地址是否上限
user = request.user
count = user.addresses.filter(is_deleted=False).count()
if count >= 20:
return http.JsonResponse({"code": RETCODE.THROTTLINGERR, "errmsg": "地址已达到上限"})
# 接收请求体数据
json_dict = json.loads(request.body.decode())
title = json_dict.get("title")
receiver = json_dict.get("receiver")
province_id = json_dict.get("province_id")
city_id = json_dict.get("city_id")
district_id = json_dict.get("district_id")
place = json_dict.get("place")
mobile = json_dict.get("mobile")
tel = json_dict.get("tel")
email = json_dict.get("email")
# 校验
if all([title, receiver, province_id, city_id, district_id, place, mobile]) is False:
return http.HttpResponseForbidden("缺少必传参数")
if not re.match(r'^1[3-9]\d{9}$', mobile):
return http.HttpResponseForbidden('参数mobile有误')
if tel:
if not re.match(r'^(0[0-9]{2,3}-)?([2-9][0-9]{6,7})+(-[0-9]{1,4})?$', tel):
return http.HttpResponseForbidden('参数tel有误')
if email:
if not re.match(r'^[a-z0-9][\w\.\-]*@[a-z0-9\-]+(\.[a-z]{2,5}){1,2}$', email):
return http.HttpResponseForbidden('参数email有误')
try:
# 保存收货地址数据
address_model = Address.objects.create(
user=request.user,
title=title,
receiver=receiver,
province_id=province_id,
city_id=city_id,
district_id=district_id,
place=place,
mobile=mobile,
tel=tel,
email=email,
)
except Exception as e:
Logger.error(e)
return http.HttpResponseForbidden({"code": RETCODE.PARAMERR, "errmsg": "新增收货地址失败"})
# 如果当前用户还没有默认收货地址,就把当前新增的这个地址设置为他的默认地址
if user.default_address is None:
user.default_address = address_model
user.save()
# 把保存好的模型对象转换成字典,再响应给前端
address_dict = {
"id": address_model.id,
"title": address_model.title,
"receiver": address_model.receiver,
"province": address_model.province.name,
"province_id": address_model.province.id,
"city": address_model.city.name,
"city_id": address_model.city.id,
"district": address_model.district.name,
"district_id": address_model.district.id,
"place": address_model.place,
"mobile": address_model.mobile,
"tel": address_model.tel,
"email": address_model.email
}
# 响应
return http.JsonResponse({"code": RETCODE.OK, "errmsg": "新增地址成功", "address": address_dict})
class UpdateDestroyAddressView(mixins.LoginRequiredMixin, View):
"""修改和删除用户地址操作"""
def put(self, request, address_id):
# 接收请求体数据
json_dict = json.loads(request.body.decode())
title = json_dict.get("title")
receiver = json_dict.get("receiver")
province_id = json_dict.get("province_id")
city_id = json_dict.get("city_id")
district_id = json_dict.get("district_id")
place = json_dict.get("place")
mobile = json_dict.get("mobile")
tel = json_dict.get("tel")
email = json_dict.get("email")
# 校验
if all([title, receiver, province_id, city_id, district_id, place, mobile]) is False:
return http.HttpResponseForbidden("缺少必传参数")
if not re.match(r'^1[3-9]\d{9}$', mobile):
return http.HttpResponseForbidden('参数mobile有误')
if tel:
if not re.match(r'^(0[0-9]{2,3}-)?([2-9][0-9]{6,7})+(-[0-9]{1,4})?$', tel):
return http.HttpResponseForbidden('参数tel有误')
if email:
if not re.match(r'^[a-z0-9][\w\.\-]*@[a-z0-9\-]+(\.[a-z]{2,5}){1,2}$', email):
return http.HttpResponseForbidden('参数email有误')
try:
# 修改收货地址数据
Address.objects.filter(id=address_id).update(
title=title,
receiver=receiver,
province_id=province_id,
city_id=city_id,
district_id=district_id,
place=place,
mobile=mobile,
tel=tel,
email=email,
)
except Exception as e:
Logger.error(e)
return http.HttpResponseForbidden({"code": RETCODE.PARAMERR, "errmsg": "修改收货地址失败"})
# 获取到修改后的地址模型对象
address_model = Address.objects.get(id=address_id)
address_dict = {
"id": address_model.id,
"title": address_model.title,
"receiver": address_model.receiver,
"province": address_model.province.name,
"province_id": address_model.province.id,
"city": address_model.city.name,
"city_id": address_model.city.id,
"district": address_model.district.name,
"district_id": address_model.district.id,
"place": address_model.place,
"mobile": address_model.mobile,
"tel": address_model.tel,
"email": address_model.email
}
# 响应
return http.JsonResponse({"code": RETCODE.OK, "errmsg": "修改地址成功", "address": address_dict})
def delete(self, request, address_id):
"""删除地址"""
try:
address = Address.objects.get(id=address_id)
address.is_deleted = True
address.save()
return http.JsonResponse({"code": RETCODE.OK, "errmsg": "删除地址成功"})
except Address.DoesNotExist:
return http.JsonResponse({"code": RETCODE.PARAMERR, "errmsg": "address_id不存在"})
class DefaultAddressView(mixins.LoginRequiredMixin, View):
"""设置默认地址"""
def put(self, request, address_id):
# 查询指定id的收货地址
try:
address = Address.objects.get(id=address_id)
user = request.user
# 把指定的收货地址设置给user的default_address字段
user.default_address = address
user.save()
# 响应
return http.JsonResponse({"code": RETCODE.OK, "errmsg": "设置默认地址成功"})
except Address.DoesNotExist:
return http.JsonResponse({"code": RETCODE.PARAMERR, "errmsg": "设置默认地址失败"})
class UpdateTitleAddressView(mixins.LoginRequiredMixin, View):
"""修改用户收货地址标题"""
def put(self, request, address_id):
# 接收请求体中的地址标题
json_dict = json.loads(request.body.decode())
title = json_dict.get("title")
# 校验
if title is None:
return http.JsonResponse({"code": RETCODE.PARAMERR, "errmsg": "缺少必传参数"})
# 把要修改的收货地址获取
try:
address = Address.objects.get(id=address_id)
# 修改address的title
address.title = title
address.save()
# 响应
return http.JsonResponse({"code": RETCODE.OK, "errmsg": "修改地址标题成功"})
except Address.DoesNotExist:
return http.JsonResponse({"code": RETCODE.PARAMERR, "errmsg": "修改地址标题失败"})
class CheckPaswordView(mixins.LoginRequiredMixin, View):
"""修改用户密码"""
def get(self, request):
return render(request, "user_center_pass.html")
def post(self, request):
# 接收请求体中的表单数据
qury_dict = request.POST
old_password = qury_dict.get("old_pwd")
new_password = qury_dict.get("new_pwd")
new_password2 = qury_dict.get("new_cpwd")
# 校验
if all([old_password, new_password, new_password2]) is False:
return http.HttpResponseForbidden("缺少必传参数")
user = request.user
if user.check_password(old_password) is False:
return render(request, "user_center_pass.html", {"origin_pwd_errmsg": "原密码错误"})
# 检验密码
if not re.match(r'^[0-9A-Za-z]{8,20}$', new_password):
return http.HttpResponseForbidden('密码最少8位,最长20位')
if new_password != new_password2:
return http.HttpResponseForbidden('两次输入的密码不一致')
# 修改用户密码 set_password方法
user.set_password(new_password)
user.save()
# 清除状态保持信息
logout(request)
# 清除cookie中的username
response = redirect("/login")
response.delete_cookie("username")
# 重定向到登录界面
return response
class UserBrowseHistory(mixins.LoginRequiredMixin, View):
"""商品浏览记录"""
def post(self, request):
# 接收请求体中的sku_id
json_dict = json.loads(request.body.decode())
sku_id = json_dict.get("sku_id")
try:
# 校验sku_id的真实性
sku = SKU.objects.get(id=sku_id)
except SKU.DoesNotExist:
return http.HttpResponseForbidden('sku不存在')
# 创建redis链接
redis_conn = get_redis_connection("history")
pl = redis_conn.pipeline()
# 获取当前用户
user = request.user
# 拼接list的key
key = 'history_%s' % user.id
# 先去重
pl.lrem(key, 0, sku_id)
# 添加到列表的开头
pl.lpush(key, sku_id)
# 截取列表前五位
pl.ltrim(key, 0, 4)
# 执行
pl.execute()
# 响应
return http.JsonResponse({"code": RETCODE, "errmsg": "ok"})
def get(self, request):
"""获取用户浏览记录逻辑"""
# 1.获取当前的登录用户对象
user = request.user
# 2.创建redis链接对象
redis_conn = get_redis_connection("history")
# 3.获取当前用户在redis中所有的浏览记录列表
sku_ids = redis_conn.lrange("history_%s" % user.id, 0, -1)
skus = [] # 用来保存sku字典
# 4.在通过列表中的sku_id获取到每个sku模型
for sku_id in sku_ids:
# 5.再将sku模型转换成字典
sku_model = SKU.objects.get(id=sku_id)
skus.append({
"id": sku_model.id,
"name": sku_model.name,
"default_image_url": sku_model.default_image.url,
"price": sku_model.price
})
# 6.响应
return http.JsonResponse({"code": RETCODE.OK, "errmsg": "ok", "skus": skus})
class ForgetPassword(View):
"""忘记密码"""
def get(self, request):
return render(request, 'find_password.html')
class GetMobileAndCode(View):
"""修改密码"""
def get(self, request, username):
image_code = request.GET.get('text')
uuid = request.GET.get('image_code_id')
if all([username, image_code, uuid]) is False:
return http.HttpResponseForbidden("缺少必要参数")
user = get_user_by_account(username)
if user is None:
return http.HttpResponseForbidden('用户名不存在')
else:
mobile = user.mobile
# 创建redis数据库链接
redis_conn = get_redis_connection('verify_code')
# 获取redis数据库中取出图形验证码
redis_image_code = redis_conn.get("img_%s" % uuid)
redis_conn.delete("img_%s" % uuid)
if redis_image_code is None or image_code.lower() != redis_image_code.decode().lower():
return http.JsonResponse({'code': 'ok', 'errmsg': "验证码错误"}, status=400)
# 对数据加密
data = {'mobile': mobile}
serializer = Serializer(settings.SECRET_KEY, 60)
access_token = serializer.dumps(data).decode()
return http.JsonResponse({"mobile": mobile, "access_token": access_token})
class Send_sms_code(View):
"""发送短信验证码"""
def get(self, request):
access_token = request.GET.get('access_token')
if access_token is None:
return http.HttpResponseForbidden('access_token为空')
serializer = Serializer(settings.SECRET_KEY, 60)
try:
data = serializer.loads(access_token)
mobile = data.get('mobile')
except BadData:
return http.HttpResponseForbidden('数据有误')
redis_conn = get_redis_connection('verify_code')
send_flag = redis_conn.get('send_flag_%s' % mobile)
if send_flag: # 判断有无标记
return http.JsonResponse({'errmsg': '频繁发送短信'})
sms_code = "%06d" % randint(0, 999999)
print(sms_code)
# 5.把短信验证码存储到redis,以备后期注册时校验
redis_conn.setex('sms_%s' % mobile, 300, sms_code)
# 5.1向redis存储一个此手机号以发送过短信的标记
redis_conn.setex('send_flag_%s' % mobile, 60, 1)
send_sms_code.delay(mobile, sms_code)
return http.JsonResponse({'message': 'ok'})
class Check_sms_code(View):
"""验证手机号"""
def get(self, request, username):
sms_code = request.GET.get('sms_code')
if all([username, sms_code]) is False:
return http.HttpResponseForbidden("缺少必传参数")
# 自定义认证后端
username = get_user_by_account(username)
if username is None:
return http.HttpResponseForbidden('用户名不存在')
else:
user_id = username.id
mobile = username.mobile
# 连接redis数据库
redis_conn = get_redis_connection('verify_code')
redis_conn_code = redis_conn.get('sms_%s' % mobile)
if sms_code != redis_conn_code.decode():
return http.HttpResponseForbidden('验证码错误')
data = {'mobile': mobile}
serializer = Serializer(settings.SECRET_KEY, 300)
access_token = serializer.dumps(data).decode()
return http.JsonResponse({
'user_id': user_id,
'access_token': access_token
})
class Check_pwd(View):
"""检查密码---重置密码"""
def post(self, request, user_id):
json_dict = json.loads(request.body.decode())
password = json_dict.get('password')
password2 = json_dict.get('password2')
access_token = json_dict.get('access_token')
if all(([password, password2, access_token])) is None:
return http.HttpResponseForbidden("缺少必传参数")
if password != password2:
return http.HttpResponseForbidden('密码不一致')
if not re.match(r'^[0-9A-Za-z]{8,20}$', password):
return http.HttpResponseForbidden('密码最少8位,最长20位')
user = User.objects.get(id=user_id)
mobile = user.mobile
serializer = Serializer(settings.SECRET_KEY, 300)
data = serializer.loads(access_token)
mobile_data = data.get('mobile')
if mobile != mobile_data:
return http.HttpResponseForbidden('数据错误')
user.set_password(password)
user.save()
return http.JsonResponse({'code': '修改密码成功'})
| 31.261104
| 102
| 0.594438
|
a0cabbfc2b540132bc0cd93574c265c8d05b151a
| 621
|
py
|
Python
|
mixins.py
|
bathelomeo/mixins
|
d676972fd3bde3f683963dcf6dcadebff6158b24
|
[
"MIT"
] | null | null | null |
mixins.py
|
bathelomeo/mixins
|
d676972fd3bde3f683963dcf6dcadebff6158b24
|
[
"MIT"
] | null | null | null |
mixins.py
|
bathelomeo/mixins
|
d676972fd3bde3f683963dcf6dcadebff6158b24
|
[
"MIT"
] | null | null | null |
class Loggable:
def __init__(self):
self.title = ''
def log(self):
print('Log message from ' + self.title)
class Connection:
def __init__(self):
self.server = ''
def connect(self):
print('Connecting to data base on ' + self.server)
class Sqldatabase(Connection , Loggable):
def __init__(self):
self.title = 'Sql Connection Demo'
self.server = 'Some_Server'
def framework(item):
if isinstance(item,Connection):
item.connect()
isinstance(item , Loggable)
item.log()
sql_connection = Sqldatabase()
framework(sql_connection)
| 24.84
| 58
| 0.63124
|
4c6c0e4a1e379db8d1361f032e5f9e0b851589c1
| 2,582
|
py
|
Python
|
aws/dynamoDB/p02-graph/tw_upload.py
|
JoseIbanez/testing
|
4d6ff310cd63a8b2f8e1abcfbea0f17b23220021
|
[
"MIT"
] | 1
|
2016-09-15T03:58:30.000Z
|
2016-09-15T03:58:30.000Z
|
aws/dynamoDB/p02-graph/tw_upload.py
|
JoseIbanez/testing
|
4d6ff310cd63a8b2f8e1abcfbea0f17b23220021
|
[
"MIT"
] | 1
|
2020-09-13T08:44:50.000Z
|
2020-09-13T08:44:50.000Z
|
aws/dynamoDB/p02-graph/tw_upload.py
|
JoseIbanez/testing
|
4d6ff310cd63a8b2f8e1abcfbea0f17b23220021
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Upload a photo to Twitter
-----------------------------------------------------------------
Run this command line script with any Twitter endpoint. The json-formatted
response is printed to the console. The script works with both Streaming API and
REST API endpoints.
IMPORTANT: Before using this script, you must enter your Twitter application's OAuth
credentials in TwitterAPI/credentials.txt. Log into http://dev.twitter.com to create
your application.
Examples:
::
python cli -endpoint search/tweets -parameters q=zzz
python cli -endpoint statuses/filter -parameters track=zzz
These examples print the raw json response. You can also print one or more fields
from the response, for instance the tweet 'text' field, like this:
::
python cli -endpoint statuses/filter -parameters track=zzz -fields text
Documentation for all Twitter endpoints is located at:
https://dev.twitter.com/docs/api/1.1
"""
__author__ = "ibanez.j@gmail.com"
__date__ = "July 4, 2017"
__license__ = "MIT"
from TwitterAPI import TwitterAPI, __version__
#from TwitterAPI import TwitterAPI
from TwitterAPI.TwitterOAuth import TwitterOAuth
import argparse
#import codecs
#import json
#import sys
from os.path import expanduser
if __name__ == '__main__':
print('TwitterAPI %s by geduldig' % __version__)
parser = argparse.ArgumentParser(
description='Request any Twitter Streaming or REST API endpoint')
parser.add_argument(
'-oauth',
metavar='FILENAME',
type=str,
help='file containing OAuth credentials',
default=expanduser("~/.secrets/twitter"))
parser.add_argument(
'-file',
metavar='FILENAME',
type=str,
help='image to upload',
required=True)
parser.add_argument(
'-text',
metavar='NAME_VALUE',
type=str,
help='text to send',
default='My picture')
args = parser.parse_args()
try:
oauth = TwitterOAuth.read_file(args.oauth)
file = open(args.file, 'rb')
data = file.read()
file.close()
api = TwitterAPI(oauth.consumer_key,
oauth.consumer_secret,
oauth.access_token_key,
oauth.access_token_secret)
r = api.request('statuses/update_with_media', {'status':args.text}, {'media[]':data})
print(r.status_code)
except KeyboardInterrupt:
print('Terminated by user')
except Exception as e:
print('STOPPED: %s' % e)
| 25.82
| 93
| 0.644849
|
4d344ebe19e0f85f683f7c0a0477880da1958d41
| 4,283
|
py
|
Python
|
src/scripts/training/lgb_K5_nonull_mean_sum_max_mean_imp_no_scaler_params_K.py
|
arnabbiswas1/k_tab_sept_roc_auc_binary_classification_KFold
|
7a5a91e52d460fd25133b76d5241462a4aedc474
|
[
"Apache-2.0"
] | null | null | null |
src/scripts/training/lgb_K5_nonull_mean_sum_max_mean_imp_no_scaler_params_K.py
|
arnabbiswas1/k_tab_sept_roc_auc_binary_classification_KFold
|
7a5a91e52d460fd25133b76d5241462a4aedc474
|
[
"Apache-2.0"
] | null | null | null |
src/scripts/training/lgb_K5_nonull_mean_sum_max_mean_imp_no_scaler_params_K.py
|
arnabbiswas1/k_tab_sept_roc_auc_binary_classification_KFold
|
7a5a91e52d460fd25133b76d5241462a4aedc474
|
[
"Apache-2.0"
] | null | null | null |
"""
LGB,K10,non-null,mean,sum,max,mean impute,no-scaler
params : https://www.kaggle.com/arnabbiswas1/tps-sep-2021-single-lgbm
"""
import os
from timeit import default_timer as timer
from datetime import datetime
import pandas as pd
from sklearn.model_selection import KFold
import src.common as common
import src.config.constants as constants
import src.munging as process_data
import src.modeling as model
common.set_timezone()
start = timer()
# Create RUN_ID
RUN_ID = datetime.now().strftime("%m%d_%H%M")
MODEL_NAME = os.path.basename(__file__).split(".")[0]
SEED = 2021
EXP_DETAILS = "LGB,K5,non-null,mean,sum,max,mean impute,no-scaler"
TARGET = "claim"
N_SPLIT = 5
MODEL_TYPE = "lgb"
OBJECTIVE = "binary"
BOOSTING_TYPE = "gbdt"
METRIC = "auc"
VERBOSE = 100
N_THREADS = 8
NUM_LEAVES = 6
MAX_DEPTH = 2
N_ESTIMATORS = 20000
LEARNING_RATE = 5e-3
EARLY_STOPPING_ROUNDS = 200
lgb_params = {
"objective": OBJECTIVE,
"boosting_type": BOOSTING_TYPE,
"learning_rate": LEARNING_RATE,
# "num_leaves": NUM_LEAVES,
"n_jobs": N_THREADS,
"seed": SEED,
# "max_depth": MAX_DEPTH,
"metric": METRIC,
"verbose": -1,
'subsample': 0.6,
'subsample_freq': 1,
'colsample_bytree': 0.4,
'reg_alpha': 10.0,
'reg_lambda': 1e-1,
'min_child_weight': 256,
'min_child_samples': 20,
}
LOGGER_NAME = "sub_1"
logger = common.get_logger(LOGGER_NAME, MODEL_NAME, RUN_ID, constants.LOG_DIR)
common.set_seed(SEED)
logger.info(f"Running for Model Number [{MODEL_NAME}] & [{RUN_ID}]")
common.update_tracking(RUN_ID, "model_number", MODEL_NAME, drop_incomplete_rows=True)
common.update_tracking(RUN_ID, "model_type", MODEL_TYPE)
common.update_tracking(RUN_ID, "metric", "roc_auc")
common.update_tracking(RUN_ID, "n_estimators", N_ESTIMATORS)
common.update_tracking(RUN_ID, "learning_rate", LEARNING_RATE)
common.update_tracking(RUN_ID, "num_leaves", NUM_LEAVES)
common.update_tracking(RUN_ID, "early_stopping_rounds", EARLY_STOPPING_ROUNDS)
train_df, test_df, sample_submission_df = process_data.read_processed_data(
logger, constants.PROCESSED_DATA_DIR, train=True, test=True, sample_submission=True,
)
combined_df = pd.concat([train_df.drop(TARGET, axis=1), test_df])
features_df = pd.read_parquet(
f"{constants.FEATURES_DATA_DIR}/features_row_wise_stat.parquet"
)
features_to_use = ["no_null", "mean", "sum", "max"]
features_df = features_df[features_to_use]
combined_df = pd.concat([combined_df, features_df], axis=1)
predictors = list(combined_df.columns)
combined_df[predictors] = combined_df[predictors].fillna(combined_df[predictors].mean())
target = train_df[TARGET]
train_df = combined_df.loc[train_df.index]
train_df[TARGET] = target
test_df = combined_df.loc[test_df.index]
train_X = train_df.drop([TARGET], axis=1)
train_Y = train_df[TARGET]
test_X = test_df
logger.info(
f"Shape of train_X : {train_X.shape}, test_X: {test_X.shape}, train_Y: {train_Y.shape}"
)
predictors = list(train_X.columns)
logger.info(f"List of predictors {predictors}")
kfold = KFold(n_splits=N_SPLIT, random_state=SEED, shuffle=True)
common.update_tracking(RUN_ID, "no_of_features", len(predictors), is_integer=True)
common.update_tracking(RUN_ID, "cv_method", "KFold")
common.update_tracking(RUN_ID, "n_fold", N_SPLIT)
results_dict = model.lgb_train_validate_on_cv(
logger,
run_id=RUN_ID,
train_X=train_X,
train_Y=train_Y,
test_X=test_X,
metric="roc_auc",
kf=kfold,
features=predictors,
params=lgb_params,
n_estimators=N_ESTIMATORS,
early_stopping_rounds=EARLY_STOPPING_ROUNDS,
cat_features="auto",
verbose_eval=100,
retrain=False,
)
common.update_tracking(RUN_ID, "lb_score", 0, is_integer=True)
train_index = train_df.index
common.save_artifacts(
logger,
target=TARGET,
is_plot_fi=True,
result_dict=results_dict,
submission_df=sample_submission_df,
train_index=train_index,
model_number=MODEL_NAME,
run_id=RUN_ID,
sub_dir=constants.SUBMISSION_DIR,
oof_dir=constants.OOF_DIR,
fi_dir=constants.FI_DIR,
fi_fig_dir=constants.FI_FIG_DIR,
)
end = timer()
common.update_tracking(RUN_ID, "training_time", end - start, is_integer=True)
common.update_tracking(RUN_ID, "comments", EXP_DETAILS)
logger.info("Execution Complete")
| 26.602484
| 91
| 0.749008
|
76e5d22a0614fb6c762f78fb93a2ba36348a56d2
| 7,967
|
py
|
Python
|
kinow_client/models/director_2.py
|
kinow-io/kinow-python-sdk
|
4c1699a3c78048b84287bd049a669651a5b4e2d5
|
[
"Apache-2.0"
] | 1
|
2019-06-26T14:24:54.000Z
|
2019-06-26T14:24:54.000Z
|
kinow_client/models/director_2.py
|
kinow-io/kinow-python-sdk
|
4c1699a3c78048b84287bd049a669651a5b4e2d5
|
[
"Apache-2.0"
] | null | null | null |
kinow_client/models/director_2.py
|
kinow-io/kinow-python-sdk
|
4c1699a3c78048b84287bd049a669651a5b4e2d5
|
[
"Apache-2.0"
] | 1
|
2018-02-01T10:08:40.000Z
|
2018-02-01T10:08:40.000Z
|
# coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 1.4.58
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Director2(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, name=None, description_short=None, description=None, meta_title=None, meta_description=None, meta_keywords=None, link_rewrite=None, active=None):
"""
Director2 - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'name': 'str',
'description_short': 'list[I18nField]',
'description': 'list[I18nField]',
'meta_title': 'list[I18nField]',
'meta_description': 'list[I18nField]',
'meta_keywords': 'list[I18nField]',
'link_rewrite': 'list[I18nField]',
'active': 'bool'
}
self.attribute_map = {
'name': 'name',
'description_short': 'description_short',
'description': 'description',
'meta_title': 'meta_title',
'meta_description': 'meta_description',
'meta_keywords': 'meta_keywords',
'link_rewrite': 'link_rewrite',
'active': 'active'
}
self._name = name
self._description_short = description_short
self._description = description
self._meta_title = meta_title
self._meta_description = meta_description
self._meta_keywords = meta_keywords
self._link_rewrite = link_rewrite
self._active = active
@property
def name(self):
"""
Gets the name of this Director2.
Director's name
:return: The name of this Director2.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this Director2.
Director's name
:param name: The name of this Director2.
:type: str
"""
self._name = name
@property
def description_short(self):
"""
Gets the description_short of this Director2.
Director's short description
:return: The description_short of this Director2.
:rtype: list[I18nField]
"""
return self._description_short
@description_short.setter
def description_short(self, description_short):
"""
Sets the description_short of this Director2.
Director's short description
:param description_short: The description_short of this Director2.
:type: list[I18nField]
"""
self._description_short = description_short
@property
def description(self):
"""
Gets the description of this Director2.
Director's description
:return: The description of this Director2.
:rtype: list[I18nField]
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this Director2.
Director's description
:param description: The description of this Director2.
:type: list[I18nField]
"""
self._description = description
@property
def meta_title(self):
"""
Gets the meta_title of this Director2.
Title page for search engines
:return: The meta_title of this Director2.
:rtype: list[I18nField]
"""
return self._meta_title
@meta_title.setter
def meta_title(self, meta_title):
"""
Sets the meta_title of this Director2.
Title page for search engines
:param meta_title: The meta_title of this Director2.
:type: list[I18nField]
"""
self._meta_title = meta_title
@property
def meta_description(self):
"""
Gets the meta_description of this Director2.
Short description for search engines
:return: The meta_description of this Director2.
:rtype: list[I18nField]
"""
return self._meta_description
@meta_description.setter
def meta_description(self, meta_description):
"""
Sets the meta_description of this Director2.
Short description for search engines
:param meta_description: The meta_description of this Director2.
:type: list[I18nField]
"""
self._meta_description = meta_description
@property
def meta_keywords(self):
"""
Gets the meta_keywords of this Director2.
List of keywords for search engines
:return: The meta_keywords of this Director2.
:rtype: list[I18nField]
"""
return self._meta_keywords
@meta_keywords.setter
def meta_keywords(self, meta_keywords):
"""
Sets the meta_keywords of this Director2.
List of keywords for search engines
:param meta_keywords: The meta_keywords of this Director2.
:type: list[I18nField]
"""
self._meta_keywords = meta_keywords
@property
def link_rewrite(self):
"""
Gets the link_rewrite of this Director2.
Friendly URL used to reach real page
:return: The link_rewrite of this Director2.
:rtype: list[I18nField]
"""
return self._link_rewrite
@link_rewrite.setter
def link_rewrite(self, link_rewrite):
"""
Sets the link_rewrite of this Director2.
Friendly URL used to reach real page
:param link_rewrite: The link_rewrite of this Director2.
:type: list[I18nField]
"""
self._link_rewrite = link_rewrite
@property
def active(self):
"""
Gets the active of this Director2.
Director's status
:return: The active of this Director2.
:rtype: bool
"""
return self._active
@active.setter
def active(self, active):
"""
Sets the active of this Director2.
Director's status
:param active: The active of this Director2.
:type: bool
"""
self._active = active
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 26.824916
| 168
| 0.578637
|
85309d76c3b96ca7268bfde3915187a6d3f67267
| 5,544
|
py
|
Python
|
test/functional/rpc_generateblock.py
|
trungnq1510/bitcoin
|
4702a08358f9996f2a40cb5cba03c9177d4d991f
|
[
"MIT"
] | null | null | null |
test/functional/rpc_generateblock.py
|
trungnq1510/bitcoin
|
4702a08358f9996f2a40cb5cba03c9177d4d991f
|
[
"MIT"
] | null | null | null |
test/functional/rpc_generateblock.py
|
trungnq1510/bitcoin
|
4702a08358f9996f2a40cb5cba03c9177d4d991f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2020 The Bitnamicoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''Test generateblock rpc.
'''
from test_framework.test_framework import BitnamicoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class GenerateBlockTest(BitnamicoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node = self.nodes[0]
self.log.info('Generate an empty block to address')
address = node.getnewaddress()
hash = node.generateblock(output=address, transactions=[])['hash']
block = node.getblock(blockhash=hash, verbose=2)
assert_equal(len(block['tx']), 1)
assert_equal(block['tx'][0]['vout'][0]['scriptPubKey']['addresses'][0], address)
self.log.info('Generate an empty block to a descriptor')
hash = node.generateblock('addr(' + address + ')', [])['hash']
block = node.getblock(blockhash=hash, verbosity=2)
assert_equal(len(block['tx']), 1)
assert_equal(block['tx'][0]['vout'][0]['scriptPubKey']['addresses'][0], address)
self.log.info('Generate an empty block to a combo descriptor with compressed pubkey')
combo_key = '0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798'
combo_address = 'bcrt1qw508d6qejxtdg4y5r3zarvary0c5xw7kygt080'
hash = node.generateblock('combo(' + combo_key + ')', [])['hash']
block = node.getblock(hash, 2)
assert_equal(len(block['tx']), 1)
assert_equal(block['tx'][0]['vout'][0]['scriptPubKey']['addresses'][0], combo_address)
self.log.info('Generate an empty block to a combo descriptor with uncompressed pubkey')
combo_key = '0408ef68c46d20596cc3f6ddf7c8794f71913add807f1dc55949fa805d764d191c0b7ce6894c126fce0babc6663042f3dde9b0cf76467ea315514e5a6731149c67'
combo_address = 'mkc9STceoCcjoXEXe6cm66iJbmjM6zR9B2'
hash = node.generateblock('combo(' + combo_key + ')', [])['hash']
block = node.getblock(hash, 2)
assert_equal(len(block['tx']), 1)
assert_equal(block['tx'][0]['vout'][0]['scriptPubKey']['addresses'][0], combo_address)
# Generate 110 blocks to spend
node.generatetoaddress(110, address)
# Generate some extra mempool transactions to verify they don't get mined
for _ in range(10):
node.sendtoaddress(address, 0.001)
self.log.info('Generate block with txid')
txid = node.sendtoaddress(address, 1)
hash = node.generateblock(address, [txid])['hash']
block = node.getblock(hash, 1)
assert_equal(len(block['tx']), 2)
assert_equal(block['tx'][1], txid)
self.log.info('Generate block with raw tx')
utxos = node.listunspent(addresses=[address])
raw = node.createrawtransaction([{'txid':utxos[0]['txid'], 'vout':utxos[0]['vout']}],[{address:1}])
signed_raw = node.signrawtransactionwithwallet(raw)['hex']
hash = node.generateblock(address, [signed_raw])['hash']
block = node.getblock(hash, 1)
assert_equal(len(block['tx']), 2)
txid = block['tx'][1]
assert_equal(node.gettransaction(txid)['hex'], signed_raw)
self.log.info('Fail to generate block with out of order txs')
raw1 = node.createrawtransaction([{'txid':txid, 'vout':0}],[{address:0.9999}])
signed_raw1 = node.signrawtransactionwithwallet(raw1)['hex']
txid1 = node.sendrawtransaction(signed_raw1)
raw2 = node.createrawtransaction([{'txid':txid1, 'vout':0}],[{address:0.999}])
signed_raw2 = node.signrawtransactionwithwallet(raw2)['hex']
assert_raises_rpc_error(-25, 'TestBlockValidity failed: bad-txns-inputs-missingorspent', node.generateblock, address, [signed_raw2, txid1])
self.log.info('Fail to generate block with txid not in mempool')
missing_txid = '0000000000000000000000000000000000000000000000000000000000000000'
assert_raises_rpc_error(-5, 'Transaction ' + missing_txid + ' not in mempool.', node.generateblock, address, [missing_txid])
self.log.info('Fail to generate block with invalid raw tx')
invalid_raw_tx = '0000'
assert_raises_rpc_error(-22, 'Transaction decode failed for ' + invalid_raw_tx, node.generateblock, address, [invalid_raw_tx])
self.log.info('Fail to generate block with invalid address/descriptor')
assert_raises_rpc_error(-5, 'Invalid address or descriptor', node.generateblock, '1234', [])
self.log.info('Fail to generate block with a ranged descriptor')
ranged_descriptor = 'pkh(tpubD6NzVbkrYhZ4XgiXtGrdW5XDAPFCL9h7we1vwNCpn8tGbBcgfVYjXyhWo4E1xkh56hjod1RhGjxbaTLV3X4FyWuejifB9jusQ46QzG87VKp/0/*)'
assert_raises_rpc_error(-8, 'Ranged descriptor not accepted. Maybe pass through deriveaddresses first?', node.generateblock, ranged_descriptor, [])
self.log.info('Fail to generate block with a descriptor missing a private key')
child_descriptor = 'pkh(tpubD6NzVbkrYhZ4XgiXtGrdW5XDAPFCL9h7we1vwNCpn8tGbBcgfVYjXyhWo4E1xkh56hjod1RhGjxbaTLV3X4FyWuejifB9jusQ46QzG87VKp/0\'/0)'
assert_raises_rpc_error(-5, 'Cannot derive script without private keys', node.generateblock, child_descriptor, [])
if __name__ == '__main__':
GenerateBlockTest().main()
| 51.813084
| 155
| 0.694805
|
f6c883e451c88a1e1d7de646f56c752da4e7ad25
| 2,144
|
py
|
Python
|
lms/models/event_models.py
|
yankai14/event-management-telegram-bot-backend
|
c0b4b2294ab7d06100b221d9b41a8f52d500075d
|
[
"MIT"
] | null | null | null |
lms/models/event_models.py
|
yankai14/event-management-telegram-bot-backend
|
c0b4b2294ab7d06100b221d9b41a8f52d500075d
|
[
"MIT"
] | 6
|
2021-06-28T07:23:15.000Z
|
2021-07-22T12:59:33.000Z
|
lms/models/event_models.py
|
yankai14/event-management-telegram-bot-backend
|
c0b4b2294ab7d06100b221d9b41a8f52d500075d
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.postgres.fields import ArrayField
class Event(models.Model):
eventCode = models.CharField(max_length=100, blank=False, null=True, unique=True)
name = models.CharField(max_length=100, blank=False, null=True)
description = models.TextField(default="")
def __str__(self):
return self.eventCode
class EventInstance(models.Model):
''' An EventInstance is a many to one relation with Event.
Eg A module has many tutorial groups. '''
eventInstanceCode = models.CharField(max_length=100, blank=False, null=True, unique=True)
startDate = models.DateTimeField(blank=False, null=True)
endDate = models.DateTimeField(blank=False, null=True)
location = models.CharField(max_length=250, blank=False, null=True)
dates = ArrayField(
models.DateTimeField(blank=False, null=True),
null=True
)
fee = models.DecimalField(max_digits=10, decimal_places=2, default=0)
isCompleted = models.BooleanField(default=False)
event = models.ForeignKey('Event', null=True, on_delete=models.CASCADE)
vacancy = models.IntegerField(blank=False, null=True)
vacancyLeft = models.IntegerField(blank=False, null=True)
isOpenForSignUps = models.BooleanField(default=True)
def __str__(self):
return self.eventInstanceCode
class EventInstanceFolder(models.Model):
folderId = models.CharField(max_length=250, blank=False, null=False, unique=True)
folderName = models.CharField(max_length=200, blank=False, null=True, unique=True)
eventInstance = models.OneToOneField('EventInstance', null=True, on_delete=models.CASCADE)
def __str__(self):
return self.folderId
class EventInstanceFolderPermissions(models.Model):
permissionId = models.CharField(max_length=250, blank=False, null=False, unique=True)
user = models.OneToOneField('User', null=True, on_delete=models.CASCADE)
folder = models.ManyToManyField('EventInstanceFolder')
folderRole = models.CharField(max_length=50, blank=False, null=False, default="reader")
def __str__(self):
return self.permissionId
| 38.285714
| 94
| 0.736007
|
59cfadb44650872154785eec14fbda5405526d1c
| 8,051
|
py
|
Python
|
doc/src/MCsummary/src/qdotBroyden.py
|
GabrielSCabrera/ComputationalPhysics2
|
a840b97b651085090f99bf6a11abab57100c2e85
|
[
"CC0-1.0"
] | 87
|
2015-01-21T08:29:56.000Z
|
2022-03-28T07:11:53.000Z
|
doc/src/MCsummary/src/qdotBroyden.py
|
GabrielSCabrera/ComputationalPhysics2
|
a840b97b651085090f99bf6a11abab57100c2e85
|
[
"CC0-1.0"
] | 3
|
2020-01-18T10:43:38.000Z
|
2020-02-08T13:15:42.000Z
|
doc/src/MCsummary/src/qdotBroyden.py
|
GabrielSCabrera/ComputationalPhysics2
|
a840b97b651085090f99bf6a11abab57100c2e85
|
[
"CC0-1.0"
] | 54
|
2015-02-09T10:02:00.000Z
|
2022-03-07T10:44:14.000Z
|
# 2-electron VMC code for 2dim quantum dot with importance sampling
# Using gaussian rng for new positions and Metropolis- Hastings
# Added energy minimization
from math import exp, sqrt
from random import random, seed, normalvariate
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from scipy.optimize import minimize
import sys
import os
# Where to save data files
PROJECT_ROOT_DIR = "Results"
DATA_ID = "Results/EnergyMin"
if not os.path.exists(PROJECT_ROOT_DIR):
os.mkdir(PROJECT_ROOT_DIR)
if not os.path.exists(DATA_ID):
os.makedirs(DATA_ID)
def data_path(dat_id):
return os.path.join(DATA_ID, dat_id)
outfile = open(data_path("Energies.dat"),'w')
# Trial wave function for the 2-electron quantum dot in two dims
def WaveFunction(r,alpha,beta):
r1 = r[0,0]**2 + r[0,1]**2
r2 = r[1,0]**2 + r[1,1]**2
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = r12/(1+beta*r12)
return exp(-0.5*alpha*(r1+r2)+deno)
# Local energy for the 2-electron quantum dot in two dims, using analytical local energy
def LocalEnergy(r,alpha,beta):
r1 = (r[0,0]**2 + r[0,1]**2)
r2 = (r[1,0]**2 + r[1,1]**2)
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = 1.0/(1+beta*r12)
deno2 = deno*deno
return 0.5*(1-alpha*alpha)*(r1 + r2) +2.0*alpha + 1.0/r12+deno2*(alpha*r12-deno2+2*beta*deno-1.0/r12)
# Derivate of wave function ansatz as function of variational parameters
def DerivativeWFansatz(r,alpha,beta):
WfDer = np.zeros((2), np.double)
r1 = (r[0,0]**2 + r[0,1]**2)
r2 = (r[1,0]**2 + r[1,1]**2)
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = 1.0/(1+beta*r12)
deno2 = deno*deno
WfDer[0] = -0.5*(r1+r2)
WfDer[1] = -r12*r12*deno2
return WfDer
# Setting up the quantum force for the two-electron quantum dot, recall that it is a vector
def QuantumForce(r,alpha,beta):
qforce = np.zeros((NumberParticles,Dimension), np.double)
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = 1.0/(1+beta*r12)
qforce[0,:] = -2*r[0,:]*alpha*(r[0,:]-r[1,:])*deno*deno/r12
qforce[1,:] = -2*r[1,:]*alpha*(r[1,:]-r[0,:])*deno*deno/r12
return qforce
# Computing the derivative of the energy and the energy
def EnergyDerivative(x0):
# Parameters in the Fokker-Planck simulation of the quantum force
D = 0.5
TimeStep = 0.05
# positions
PositionOld = np.zeros((NumberParticles,Dimension), np.double)
PositionNew = np.zeros((NumberParticles,Dimension), np.double)
# Quantum force
QuantumForceOld = np.zeros((NumberParticles,Dimension), np.double)
QuantumForceNew = np.zeros((NumberParticles,Dimension), np.double)
energy = 0.0
DeltaE = 0.0
alpha = x0[0]
beta = x0[1]
EnergyDer = 0.0
DeltaPsi = 0.0
DerivativePsiE = 0.0
#Initial position
for i in range(NumberParticles):
for j in range(Dimension):
PositionOld[i,j] = normalvariate(0.0,1.0)*sqrt(TimeStep)
wfold = WaveFunction(PositionOld,alpha,beta)
QuantumForceOld = QuantumForce(PositionOld,alpha, beta)
#Loop over MC MCcycles
for MCcycle in range(NumberMCcycles):
#Trial position moving one particle at the time
for i in range(NumberParticles):
for j in range(Dimension):
PositionNew[i,j] = PositionOld[i,j]+normalvariate(0.0,1.0)*sqrt(TimeStep)+\
QuantumForceOld[i,j]*TimeStep*D
wfnew = WaveFunction(PositionNew,alpha,beta)
QuantumForceNew = QuantumForce(PositionNew,alpha, beta)
GreensFunction = 0.0
for j in range(Dimension):
GreensFunction += 0.5*(QuantumForceOld[i,j]+QuantumForceNew[i,j])*\
(D*TimeStep*0.5*(QuantumForceOld[i,j]-QuantumForceNew[i,j])-\
PositionNew[i,j]+PositionOld[i,j])
GreensFunction = exp(GreensFunction)
ProbabilityRatio = GreensFunction*wfnew**2/wfold**2
#Metropolis-Hastings test to see whether we accept the move
if random() <= ProbabilityRatio:
for j in range(Dimension):
PositionOld[i,j] = PositionNew[i,j]
QuantumForceOld[i,j] = QuantumForceNew[i,j]
wfold = wfnew
DeltaE = LocalEnergy(PositionOld,alpha,beta)
DerPsi = DerivativeWFansatz(PositionOld,alpha,beta)
DeltaPsi += DerPsi
energy += DeltaE
DerivativePsiE += DerPsi*DeltaE
# We calculate mean values
energy /= NumberMCcycles
DerivativePsiE /= NumberMCcycles
DeltaPsi /= NumberMCcycles
EnergyDer = 2*(DerivativePsiE-DeltaPsi*energy)
return EnergyDer
# Computing the expectation value of the local energy
def Energy(x0):
# Parameters in the Fokker-Planck simulation of the quantum force
D = 0.5
TimeStep = 0.05
# positions
PositionOld = np.zeros((NumberParticles,Dimension), np.double)
PositionNew = np.zeros((NumberParticles,Dimension), np.double)
# Quantum force
QuantumForceOld = np.zeros((NumberParticles,Dimension), np.double)
QuantumForceNew = np.zeros((NumberParticles,Dimension), np.double)
energy = 0.0
DeltaE = 0.0
alpha = x0[0]
beta = x0[1]
#Initial position
for i in range(NumberParticles):
for j in range(Dimension):
PositionOld[i,j] = normalvariate(0.0,1.0)*sqrt(TimeStep)
wfold = WaveFunction(PositionOld,alpha,beta)
QuantumForceOld = QuantumForce(PositionOld,alpha, beta)
#Loop over MC MCcycles
for MCcycle in range(NumberMCcycles):
#Trial position moving one particle at the time
for i in range(NumberParticles):
for j in range(Dimension):
PositionNew[i,j] = PositionOld[i,j]+normalvariate(0.0,1.0)*sqrt(TimeStep)+\
QuantumForceOld[i,j]*TimeStep*D
wfnew = WaveFunction(PositionNew,alpha,beta)
QuantumForceNew = QuantumForce(PositionNew,alpha, beta)
GreensFunction = 0.0
for j in range(Dimension):
GreensFunction += 0.5*(QuantumForceOld[i,j]+QuantumForceNew[i,j])*\
(D*TimeStep*0.5*(QuantumForceOld[i,j]-QuantumForceNew[i,j])-\
PositionNew[i,j]+PositionOld[i,j])
GreensFunction = exp(GreensFunction)
ProbabilityRatio = GreensFunction*wfnew**2/wfold**2
#Metropolis-Hastings test to see whether we accept the move
if random() <= ProbabilityRatio:
for j in range(Dimension):
PositionOld[i,j] = PositionNew[i,j]
QuantumForceOld[i,j] = QuantumForceNew[i,j]
wfold = wfnew
DeltaE = LocalEnergy(PositionOld,alpha,beta)
energy += DeltaE
if Printout:
outfile.write('%f\n' %(energy/(MCcycle+1.0)))
# We calculate mean values
energy /= NumberMCcycles
return energy
#Here starts the main program with variable declarations
NumberParticles = 2
Dimension = 2
# seed for rng generator
seed()
# Monte Carlo cycles for parameter optimization
Printout = False
NumberMCcycles= 10000
# guess for variational parameters
x0 = np.array([0.9,0.2])
# Using Broydens method to find optimal parameters
res = minimize(Energy, x0, method='BFGS', jac=EnergyDerivative, options={'gtol': 1e-4,'disp': True})
x0 = res.x
# Compute the energy again with the optimal parameters and increased number of Monte Cycles
NumberMCcycles= 2**19
Printout = True
FinalEnergy = Energy(x0)
EResult = np.array([FinalEnergy,FinalEnergy])
outfile.close()
#nice printout with Pandas
import pandas as pd
from pandas import DataFrame
data ={'Optimal Parameters':x0, 'Final Energy':EResult}
frame = pd.DataFrame(data)
print(frame)
| 35.46696
| 105
| 0.635573
|
eafe9e815a235969468f3cc59527190de78b77ea
| 3,569
|
py
|
Python
|
stellar_sdk/xdr/transaction_signature_payload_tagged_transaction.py
|
JakeUrban/py-stellar-base
|
819097df05c9af448f806657a3ff1b0d890f0ac4
|
[
"Apache-2.0"
] | null | null | null |
stellar_sdk/xdr/transaction_signature_payload_tagged_transaction.py
|
JakeUrban/py-stellar-base
|
819097df05c9af448f806657a3ff1b0d890f0ac4
|
[
"Apache-2.0"
] | null | null | null |
stellar_sdk/xdr/transaction_signature_payload_tagged_transaction.py
|
JakeUrban/py-stellar-base
|
819097df05c9af448f806657a3ff1b0d890f0ac4
|
[
"Apache-2.0"
] | null | null | null |
# This is an automatically generated file.
# DO NOT EDIT or your changes may be overwritten
import base64
from xdrlib import Packer, Unpacker
from .envelope_type import EnvelopeType
from .fee_bump_transaction import FeeBumpTransaction
from .transaction import Transaction
from ..exceptions import ValueError
__all__ = ["TransactionSignaturePayloadTaggedTransaction"]
class TransactionSignaturePayloadTaggedTransaction:
"""
XDR Source Code
----------------------------------------------------------------
union switch (EnvelopeType type)
{
// Backwards Compatibility: Use ENVELOPE_TYPE_TX to sign ENVELOPE_TYPE_TX_V0
case ENVELOPE_TYPE_TX:
Transaction tx;
case ENVELOPE_TYPE_TX_FEE_BUMP:
FeeBumpTransaction feeBump;
}
----------------------------------------------------------------
"""
def __init__(
self,
type: EnvelopeType,
tx: Transaction = None,
fee_bump: FeeBumpTransaction = None,
) -> None:
self.type = type
self.tx = tx
self.fee_bump = fee_bump
def pack(self, packer: Packer) -> None:
self.type.pack(packer)
if self.type == EnvelopeType.ENVELOPE_TYPE_TX:
if self.tx is None:
raise ValueError("tx should not be None.")
self.tx.pack(packer)
return
if self.type == EnvelopeType.ENVELOPE_TYPE_TX_FEE_BUMP:
if self.fee_bump is None:
raise ValueError("fee_bump should not be None.")
self.fee_bump.pack(packer)
return
raise ValueError("Invalid type.")
@classmethod
def unpack(
cls, unpacker: Unpacker
) -> "TransactionSignaturePayloadTaggedTransaction":
type = EnvelopeType.unpack(unpacker)
if type == EnvelopeType.ENVELOPE_TYPE_TX:
tx = Transaction.unpack(unpacker)
if tx is None:
raise ValueError("tx should not be None.")
return cls(type, tx=tx)
if type == EnvelopeType.ENVELOPE_TYPE_TX_FEE_BUMP:
fee_bump = FeeBumpTransaction.unpack(unpacker)
if fee_bump is None:
raise ValueError("fee_bump should not be None.")
return cls(type, fee_bump=fee_bump)
raise ValueError("Invalid type.")
def to_xdr_bytes(self) -> bytes:
packer = Packer()
self.pack(packer)
return packer.get_buffer()
@classmethod
def from_xdr_bytes(
cls, xdr: bytes
) -> "TransactionSignaturePayloadTaggedTransaction":
unpacker = Unpacker(xdr)
return cls.unpack(unpacker)
def to_xdr(self) -> str:
xdr_bytes = self.to_xdr_bytes()
return base64.b64encode(xdr_bytes).decode()
@classmethod
def from_xdr(cls, xdr: str) -> "TransactionSignaturePayloadTaggedTransaction":
xdr_bytes = base64.b64decode(xdr.encode())
return cls.from_xdr_bytes(xdr_bytes)
def __eq__(self, other: object):
if not isinstance(other, self.__class__):
return NotImplemented
return (
self.type == other.type
and self.tx == other.tx
and self.fee_bump == other.fee_bump
)
def __str__(self):
out = []
out.append(f"type={self.type}")
out.append(f"tx={self.tx}") if self.tx is not None else None
out.append(f"fee_bump={self.fee_bump}") if self.fee_bump is not None else None
return f"<TransactionSignaturePayloadTaggedTransaction {[', '.join(out)]}>"
| 33.669811
| 86
| 0.607453
|
ee43f5a68307556d0a4535981880a0a3c4a7e166
| 419
|
py
|
Python
|
django_rest_framework/django_rest_framework/wsgi.py
|
KushalVenkatesh/drf-secure-apis-demo
|
ef24928f9ad58e205827ce93ab3be3d1e80af715
|
[
"Apache-2.0"
] | 1
|
2021-08-31T06:09:00.000Z
|
2021-08-31T06:09:00.000Z
|
django_rest_framework/django_rest_framework/wsgi.py
|
KushalVenkatesh/drf-secure-apis-demo
|
ef24928f9ad58e205827ce93ab3be3d1e80af715
|
[
"Apache-2.0"
] | null | null | null |
django_rest_framework/django_rest_framework/wsgi.py
|
KushalVenkatesh/drf-secure-apis-demo
|
ef24928f9ad58e205827ce93ab3be3d1e80af715
|
[
"Apache-2.0"
] | 1
|
2021-08-31T06:08:59.000Z
|
2021-08-31T06:08:59.000Z
|
"""
WSGI config for django_rest_framework project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_rest_framework.settings')
application = get_wsgi_application()
| 24.647059
| 81
| 0.799523
|
7fbc0969249ea3ac7097800abc81beb1e3ebf5da
| 820
|
py
|
Python
|
tickit/devices/eiger/stream/stream_status.py
|
dls-controls/tickit
|
00bb013e69674bcfe4926f365ecb3c65c080abe8
|
[
"Apache-2.0"
] | 4
|
2021-09-16T13:35:33.000Z
|
2022-02-01T23:35:53.000Z
|
tickit/devices/eiger/stream/stream_status.py
|
dls-controls/tickit
|
00bb013e69674bcfe4926f365ecb3c65c080abe8
|
[
"Apache-2.0"
] | 46
|
2021-09-16T13:44:58.000Z
|
2022-02-02T13:42:56.000Z
|
tickit/devices/eiger/stream/stream_status.py
|
dls-controls/tickit
|
00bb013e69674bcfe4926f365ecb3c65c080abe8
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass, field, fields
from typing import Any, List
from tickit.devices.eiger.eiger_schema import AccessMode, ro_int, ro_str
@dataclass
class StreamStatus:
"""Eiger stream status taken from the API spec."""
state: str = field(default="ready", metadata=ro_str())
error: List[str] = field(
default_factory=lambda: [],
metadata=dict(
value=[], value_type=AccessMode.LIST_STR, access_mode=AccessMode.READ_ONLY
),
)
dropped: int = field(default=0, metadata=ro_int())
def __getitem__(self, key: str) -> Any: # noqa: D105
f = {}
for field_ in fields(self):
f[field_.name] = {
"value": vars(self)[field_.name],
"metadata": field_.metadata,
}
return f[key]
| 29.285714
| 86
| 0.609756
|
be94f993d1a0f68dcdbc47ecfce56f3376ffe6c9
| 2,189
|
py
|
Python
|
tasks/__init__.py
|
ruivieira/python-experiments
|
c25cc7644d1437898ae5b0fa8073c5ae1442c3ba
|
[
"Apache-2.0"
] | null | null | null |
tasks/__init__.py
|
ruivieira/python-experiments
|
c25cc7644d1437898ae5b0fa8073c5ae1442c3ba
|
[
"Apache-2.0"
] | null | null | null |
tasks/__init__.py
|
ruivieira/python-experiments
|
c25cc7644d1437898ae5b0fa8073c5ae1442c3ba
|
[
"Apache-2.0"
] | null | null | null |
# type: ignore
"""Task management"""
# INFO: Task management
# pylint: disable=R0903,C0103
from dataclasses import dataclass
from enum import Enum
from typing import List
from todoist import TodoistAPI
import common.fs as fs
class TaskStatus(Enum):
"""Task status enum"""
TODO = "TODO"
DONE = "DONE"
LATER = "LATER"
@dataclass
class Task:
"""Task data class"""
status: TaskStatus
content: str
class Todoist:
"""Todoist access manager"""
def __init__(self, token: str):
self.token = token
self.api = TodoistAPI(self.token)
self.api.sync()
def get_all_active_tasks(self) -> List[Task]:
"""Get all active tasks"""
tasks: List[Task] = []
for item in self.api.state["items"]:
status = TaskStatus.TODO if item["checked"] == 0 else TaskStatus.DONE
task = Task(status=status, content=item["content"])
tasks.append(task)
return tasks
def _parse_LogSeq_task(line: str) -> Task:
"""Parse a string into a LogSeq (orgmode) task"""
stripped = line.lstrip()
if stripped.startswith("- TODO"):
task = Task(TaskStatus.TODO, stripped[7:])
elif stripped.startswith("- LATER"):
task = Task(TaskStatus.LATER, stripped[8:])
elif stripped.startswith("- DONE"):
task = Task(TaskStatus.DONE, stripped[7:])
else:
task = Task(TaskStatus.TODO, stripped[2:])
return task
def _isTask(line: str) -> bool:
"""Check if a LogSeq string is a task"""
stripped = line.lstrip()
return (
stripped.startswith("- TODO")
or stripped.startswith("- LATER")
or stripped.startswith("- DONE")
)
class LogSeqTasks:
"""LogSeq tasks manager"""
def __init__(self, root):
self.root = root
self.tasks: List[Task] = []
files = fs.get_all_files(root, "*.md")
for file in files:
# open the markdown file
with open(file, "r") as md_file:
lines = md_file.read().splitlines()
for line in lines:
if _isTask(line):
self.tasks.append(_parse_LogSeq_task(line))
| 25.16092
| 81
| 0.595249
|
7a4d10d0c271fb998260abd06a935a458709e72a
| 3,293
|
py
|
Python
|
examples/pylab_examples/axes_zoom_effect.py
|
stefanv/matplotlib
|
2c6c3695ef989ec86b7c43740462ef992685022f
|
[
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | 3
|
2015-11-16T07:22:28.000Z
|
2016-11-11T17:55:14.000Z
|
examples/pylab_examples/axes_zoom_effect.py
|
astraw/matplotlib-ancient
|
7272f102482941ada6e5fd874bdf7f185f420526
|
[
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
examples/pylab_examples/axes_zoom_effect.py
|
astraw/matplotlib-ancient
|
7272f102482941ada6e5fd874bdf7f185f420526
|
[
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
from matplotlib.transforms import Bbox, TransformedBbox, \
blended_transform_factory
from mpl_toolkits.axes_grid1.inset_locator import BboxPatch, BboxConnector,\
BboxConnectorPatch
def connect_bbox(bbox1, bbox2,
loc1a, loc2a, loc1b, loc2b,
prop_lines, prop_patches=None):
if prop_patches is None:
prop_patches = prop_lines.copy()
prop_patches["alpha"] = prop_patches.get("alpha", 1)*0.2
c1 = BboxConnector(bbox1, bbox2, loc1=loc1a, loc2=loc2a, **prop_lines)
c1.set_clip_on(False)
c2 = BboxConnector(bbox1, bbox2, loc1=loc1b, loc2=loc2b, **prop_lines)
c2.set_clip_on(False)
bbox_patch1 = BboxPatch(bbox1, **prop_patches)
bbox_patch2 = BboxPatch(bbox2, **prop_patches)
p = BboxConnectorPatch(bbox1, bbox2,
#loc1a=3, loc2a=2, loc1b=4, loc2b=1,
loc1a=loc1a, loc2a=loc2a, loc1b=loc1b, loc2b=loc2b,
**prop_patches)
p.set_clip_on(False)
return c1, c2, bbox_patch1, bbox_patch2, p
def zoom_effect01(ax1, ax2, xmin, xmax, **kwargs):
u"""
ax1 : the main axes
ax1 : the zoomed axes
(xmin,xmax) : the limits of the colored area in both plot axes.
connect ax1 & ax2. The x-range of (xmin, xmax) in both axes will
be marked. The keywords parameters will be used ti create
patches.
"""
trans1 = blended_transform_factory(ax1.transData, ax1.transAxes)
trans2 = blended_transform_factory(ax2.transData, ax2.transAxes)
bbox = Bbox.from_extents(xmin, 0, xmax, 1)
mybbox1 = TransformedBbox(bbox, trans1)
mybbox2 = TransformedBbox(bbox, trans2)
prop_patches=kwargs.copy()
prop_patches["ec"]="none"
prop_patches["alpha"]=0.2
c1, c2, bbox_patch1, bbox_patch2, p = \
connect_bbox(mybbox1, mybbox2,
loc1a=3, loc2a=2, loc1b=4, loc2b=1,
prop_lines=kwargs, prop_patches=prop_patches)
ax1.add_patch(bbox_patch1)
ax2.add_patch(bbox_patch2)
ax2.add_patch(c1)
ax2.add_patch(c2)
ax2.add_patch(p)
return c1, c2, bbox_patch1, bbox_patch2, p
def zoom_effect02(ax1, ax2, **kwargs):
u"""
ax1 : the main axes
ax1 : the zoomed axes
Similar to zoom_effect01. The xmin & xmax will be taken from the
ax1.viewLim.
"""
tt = ax1.transScale + (ax1.transLimits + ax2.transAxes)
trans = blended_transform_factory(ax2.transData, tt)
mybbox1 = ax1.bbox
mybbox2 = TransformedBbox(ax1.viewLim, trans)
prop_patches=kwargs.copy()
prop_patches["ec"]="none"
prop_patches["alpha"]=0.2
c1, c2, bbox_patch1, bbox_patch2, p = \
connect_bbox(mybbox1, mybbox2,
loc1a=3, loc2a=2, loc1b=4, loc2b=1,
prop_lines=kwargs, prop_patches=prop_patches)
ax1.add_patch(bbox_patch1)
ax2.add_patch(bbox_patch2)
ax2.add_patch(c1)
ax2.add_patch(c2)
ax2.add_patch(p)
return c1, c2, bbox_patch1, bbox_patch2, p
import matplotlib.pyplot as plt
plt.figure(1, figsize=(5,5))
ax1 = plt.subplot(221)
ax2 = plt.subplot(212)
ax2.set_xlim(0, 1)
ax2.set_xlim(0, 5)
zoom_effect01(ax1, ax2, 0.2, 0.8)
ax1 = plt.subplot(222)
ax1.set_xlim(2, 3)
ax2.set_xlim(0, 5)
zoom_effect02(ax1, ax2)
plt.show()
| 27.672269
| 78
| 0.652293
|
55b4b339e9d70e85b4187a60ee978350f2ece6e4
| 2,282
|
py
|
Python
|
test_files/Final/move_type_check.py
|
NeelamMahapatro/Roborex_Chess
|
cc2666c56c09fc21a7c76879e2f19e594ddd474c
|
[
"MIT"
] | 3
|
2019-06-20T04:17:54.000Z
|
2020-07-17T15:34:14.000Z
|
test_files/Final/move_type_check.py
|
NeelamMahapatro/Roborex_Chess
|
cc2666c56c09fc21a7c76879e2f19e594ddd474c
|
[
"MIT"
] | 1
|
2018-03-31T09:48:58.000Z
|
2018-03-31T09:48:58.000Z
|
test_files/Final/move_type_check.py
|
NeelamMahapatro/Roborex_Chess
|
cc2666c56c09fc21a7c76879e2f19e594ddd474c
|
[
"MIT"
] | 6
|
2018-03-30T12:24:25.000Z
|
2020-07-17T15:33:48.000Z
|
#!/usr/bin/env python
################################################################################
##
## MIT License
##
## Copyright (c) 2018 Team Roborex, NIT Rourkela
##
## Permission is hereby granted, free of charge, to any person obtaining a copy
## of this software and associated documentation files (the "Software"), to deal
## in the Software without restriction, including without limitation the rights
## to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
## copies of the Software, and to permit persons to whom the Software is
## furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in all
## copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
## SOFTWARE.
##
################################################################################
################################################################################
##
## AUTHORS: Pabitra Dash
##
################################################################################
import chess
import chess.uci
brd=chess.Board()
def checking(string_):
len_value=len(string_)
if len_value==4:
cool=checking_(string_)
if len_value==5:
cool='5'
return cool
def checking_(string_):
flag=99
string_=chess.Move.from_uci(string_)
if(brd.is_capture(string_)):
flag='1'
if(brd.is_en_passant(string_)):
flag='2'
if(brd.is_castling(string_)):
flag='3'
if(flag==99):
flag='0'
return flag
###################################
i=0
while True:
str_=raw_input("Enter a string: ")
i=i+1
a=checking(str_)
print(a)
brd.push(chess.Move.from_uci(str_))
print(brd)
'''
if(i%2==0):
a=checking(str_)
print(a)
'''
| 30.837838
| 81
| 0.578878
|
3e0c46caf8ea5a3737443e70ff8d615300e67347
| 204
|
py
|
Python
|
python/packages/isce3/core/DateTime.py
|
piyushrpt/isce3
|
1741af321470cb5939693459765d11a19c5c6fc2
|
[
"Apache-2.0"
] | null | null | null |
python/packages/isce3/core/DateTime.py
|
piyushrpt/isce3
|
1741af321470cb5939693459765d11a19c5c6fc2
|
[
"Apache-2.0"
] | null | null | null |
python/packages/isce3/core/DateTime.py
|
piyushrpt/isce3
|
1741af321470cb5939693459765d11a19c5c6fc2
|
[
"Apache-2.0"
] | null | null | null |
#-*- coding: utf-8 -*-
#
# Heresh Fattahi
# Copyright 2019-
#
# The extension
from .. import isceextension
class DateTime(isceextension.pyDateTime):
"""
Wrapper for pyDateTime.
"""
pass
| 13.6
| 41
| 0.642157
|
fa1104381ac698aaf0186a6447bbe10fea4eb475
| 1,064
|
py
|
Python
|
kd_common/kd_common/google/common.py
|
konovalovdmitry/catsnap
|
d5f1d7c37dcee1ad3fee2cdc12a3b44b56f4c63f
|
[
"MIT"
] | null | null | null |
kd_common/kd_common/google/common.py
|
konovalovdmitry/catsnap
|
d5f1d7c37dcee1ad3fee2cdc12a3b44b56f4c63f
|
[
"MIT"
] | null | null | null |
kd_common/kd_common/google/common.py
|
konovalovdmitry/catsnap
|
d5f1d7c37dcee1ad3fee2cdc12a3b44b56f4c63f
|
[
"MIT"
] | 1
|
2021-09-30T08:06:20.000Z
|
2021-09-30T08:06:20.000Z
|
import pickle
import os.path
from typing import Any
from os.path import join
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from kd_common import pathutil
_SCOPES = ['https://www.googleapis.com/auth/spreadsheets']
_FILE_TOKEN = join(pathutil.FOLDER_GOOGLE, "token.pickle")
_FILE_CREDENTIALS = join(pathutil.FOLDER_GOOGLE, "credentials.json")
def get_credentials() -> Any:
creds = None
if os.path.exists(_FILE_TOKEN):
with open(_FILE_TOKEN, 'rb') as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
_FILE_CREDENTIALS, _SCOPES)
creds = flow.run_console()
# Save the credentials for the next run
with open(_FILE_TOKEN, 'wb') as token:
pickle.dump(creds, token)
return creds
| 30.4
| 68
| 0.698308
|
06c5606d3f39a9dd8b48f22ce967fe806b42fae9
| 4,018
|
py
|
Python
|
dcitools/devices/doremi/message.py
|
jeajar/python-dcitools
|
1be2e70b6d48bed18465fd8d5ef36ea0e617d5b2
|
[
"MIT"
] | 15
|
2015-02-25T00:51:05.000Z
|
2021-01-14T17:20:56.000Z
|
dcitools/devices/doremi/message.py
|
jeajar/python-dcitools
|
1be2e70b6d48bed18465fd8d5ef36ea0e617d5b2
|
[
"MIT"
] | 5
|
2018-10-05T07:41:00.000Z
|
2019-05-27T22:57:42.000Z
|
dcitools/devices/doremi/message.py
|
jeajar/python-dcitools
|
1be2e70b6d48bed18465fd8d5ef36ea0e617d5b2
|
[
"MIT"
] | 7
|
2018-03-26T13:43:10.000Z
|
2022-03-08T14:43:26.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
"""
(c) 2014 Ronan Delacroix
Doremi API Requests definition
:author: Ronan Delacroix
"""
import six
import tbx.bytes
class MessageDefinition(object):
"""
Request Definition object.
"""
def __init__(self, name, key, elements=None):
self.name = name
if six.PY3:
self.key = bytes.fromhex(key)
else:
self.key = str(key).decode('hex')
self.elements = elements or [] #List of Element or ResponseElement
@property
def element_names(self):
return [e.name for e in self.elements]
class Element(object):
"""
Message Element Definition
"""
def __init__(self, name, func, **kwargs):
self.name = name
self.func = func
self.kwargs = kwargs
class ResponseElement(Element):
"""
Response Message Element Definition
"""
def __init__(self, name, start, end, func, text_translate=None):
self.name = name
self.func = func
self.start = start
self.end = end
self.text_translate = text_translate
class ResponseBatch(Element):
"""
Response Message Element Definition
"""
def __init__(self, name, start, end, sub_elements=None):
super(ResponseBatch, self).__init__(name=name, func=self.func)
self.start = start
self.end = end
self.sub_elements = sub_elements or []
self.text_translate = None
def func(self, byte_array):
result = []
length = tbx.bytes.bytes_to_int(byte_array[0:4])
item_size = tbx.bytes.bytes_to_int(byte_array[4:8])
for i in range(0, length):
item = {}
chunk = byte_array[8+i*item_size:8+(i+1)*item_size]
for e in self.sub_elements:
sub_chunk = chunk[e.start:e.end]
item[e.name] = e.func(sub_chunk)
if e.text_translate:
item[e.name+'_text'] = e.text_translate.get(item[e.name], 'unknown value')
result.append(item)
return result
class MessageList(object):
"""
Message class.
Allows to index a request/response definition set.
"""
def __init__(self, messages):
self.messages = messages
self.index_by_name = {}
self.index_by_key = {}
for d in self.messages:
self.index_by_key[d.key] = d
self.index_by_name[d.name] = d
def get_by_name(self, name):
return self.index_by_name.get(name, None)
def get_by_key(self, k):
if isinstance(k, str):
if six.PY3:
k = bytes.fromhex(k)
else:
k = k.encode('hex')
return self.index_by_key.get(bytes(k), None)
def get(self, key_or_name):
if isinstance(key_or_name, bytes) or isinstance(key_or_name, bytearray):
return self.get_by_key(key_or_name) or self.get_by_name(key_or_name)
else:
return self.get_by_name(key_or_name) or self.get_by_key(key_or_name)
def list_names(self):
return self.index_by_name.keys()
def list_keys(self):
return self.index_by_key.keys()
def __getattr__(self, name):
if name in self.index_by_name.keys():
return self.get_by_name(name)
return super(MessageList, self).__getattr__(name)
class MessageListWrapper(MessageList):
"""
Module Wrapper Class.
Same as parent class but can wrap a module.
See here for wrapping module class : http://stackoverflow.com/questions/2447353/getattr-on-a-module
"""
def __init__(self, wrapped, messages):
self.wrapped = wrapped
super(MessageListWrapper, self).__init__(messages)
def __getattr__(self, name):
"""
Fall back on module to get attributes
"""
try:
super(MessageListWrapper, self).__getattr__(name)
except AttributeError:
return getattr(self.wrapped, name)
| 28.295775
| 103
| 0.607018
|
5c62ad30a0a9e1afc05f8288697746bce12e286d
| 2,019
|
py
|
Python
|
process_geotiff.py
|
SmartDataLab/building_damage_kd
|
45392fff69de818b8d13576de6c413372ca3d0e0
|
[
"MIT"
] | null | null | null |
process_geotiff.py
|
SmartDataLab/building_damage_kd
|
45392fff69de818b8d13576de6c413372ca3d0e0
|
[
"MIT"
] | null | null | null |
process_geotiff.py
|
SmartDataLab/building_damage_kd
|
45392fff69de818b8d13576de6c413372ca3d0e0
|
[
"MIT"
] | 2
|
2021-07-02T08:26:11.000Z
|
2021-07-02T08:27:03.000Z
|
import numpy as np
import gdal
import os
import cv2
import matplotlib.pyplot as plt
import argparse
#%%
def image(path,band):
#读为一个numpy数组
dataset = gdal.Open(path)
band = dataset.GetRasterBand(band)
nXSize = dataset.RasterXSize #列数
nYSize = dataset.RasterYSize #行数
data= band.ReadAsArray(0,0,nXSize,nYSize).astype(np.int)
return data
def writeimage(filename,dst_filename,data):
#filename用于获取坐标信息,dst_filename目标文件格式为ENVI格式,data为要写出的数据,
dataset=gdal.Open(filename)
projinfo=dataset.GetProjection()
geotransform = dataset.GetGeoTransform()
format = "ENVI"
driver = gdal.GetDriverByName( format )
dst_ds = driver.Create( dst_filename,dataset.RasterXSize, dataset.RasterYSize,
1, gdal.GDT_Float32 )
dst_ds.SetGeoTransform(geotransform )
dst_ds.SetProjection( projinfo )
dst_ds.GetRasterBand(1).WriteArray( data )
dst_ds = None
def getListFiles(path):
#获取文件目录下的所有文件(包含子文件夹内的文件)
assert os.path.isdir(path),'%s not exist,'%path
ret=[]
for root,dirs,files in os.walk(path):
for filespath in files:
ret.append(os.path.join(root,filespath))
return ret
def get_figure(file_path,save_path=None):
band_list = [image(file_path,i) for i in range(1,4)]
img = cv2.merge(band_list)
plt.imshow(img)
plt.xticks([]),plt.yticks([]) # 不显示坐标轴
if save_path != None:
plt.savefig(save_path)
plt.show()
#%%
if __name__=='__main__':
parser = argparse.ArgumentParser('将geotiff文件转成rbg图像')
parser.add_argument("-geotiff", help="geotiff文件的path", required=True)
parser.add_argument("-savepath", help="存储rgb文件的path", required=True)
args = parser.parse_args()
#file_path = '/data1/su/data/geotiffs/tier3/images/woolsey-fire_00000877_pre_disaster.tif'
# %%
# %%
# use like following
# python process_geotiff.py -geotiff /data1/su/data/geotiffs/tier3/images/woolsey-fire_00000877_pre_disaster.tif -savepath test.png
get_figure(args.geotiff,args.savepath)
| 33.098361
| 135
| 0.70629
|
cde75ee6425139ceabd9a6a5de87e846ec184a1a
| 2,182
|
py
|
Python
|
music_blog/models.py
|
sashis/music-blog
|
c0ed13e3df4d25606345c4d8e064833fa86eb0c3
|
[
"MIT"
] | null | null | null |
music_blog/models.py
|
sashis/music-blog
|
c0ed13e3df4d25606345c4d8e064833fa86eb0c3
|
[
"MIT"
] | null | null | null |
music_blog/models.py
|
sashis/music-blog
|
c0ed13e3df4d25606345c4d8e064833fa86eb0c3
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from flask_login import UserMixin
from sqlalchemy import func
from sqlalchemy.ext.hybrid import hybrid_property
from werkzeug.security import generate_password_hash, check_password_hash
from .database import db
class User(db.Model, UserMixin):
__repr_attr__ = 'username'
username = db.Column(db.String(64), unique=True)
email = db.Column(db.String(128), unique=True)
_password = db.Column('password_hash', db.String(128), nullable=False)
posts = db.relationship('Post', backref='author', lazy=True)
@hybrid_property
def password(self):
return self._password
@password.setter
def password(self, plain_passwd):
self._password = generate_password_hash(plain_passwd)
def check_password(self, plain_passwd):
return check_password_hash(self.password, plain_passwd)
@classmethod
def is_exist(cls, username, email):
user = cls.query.filter((cls.username == username) | (cls.email == email))
return user.first() is not None
posts_tags = db.Table(
'posts_tags',
db.Column('post_id', db.Integer, db.ForeignKey('posts.id'), primary_key=True),
db.Column('tag_id', db.Integer, db.ForeignKey('tags.id'), primary_key=True)
)
class Tag(db.Model):
__repr_attr__ = 'name'
name = db.Column(db.String(32), unique=True, nullable=False)
class Post(db.Model):
__repr_attr__ = 'title'
title = db.Column(db.String(80), nullable=False)
img_filename = db.Column(db.String(64), nullable=False)
description = db.Column(db.String(170))
body = db.Column(db.Text, nullable=False)
created_at = db.Column(db.DateTime, nullable=False, default=datetime.utcnow())
view_count = db.Column(db.Integer, nullable=False, default=0)
user_id = db.Column(
db.Integer,
db.ForeignKey('users.id', onupdate='CASCADE', ondelete='SET NULL')
)
tags = db.relationship(Tag, secondary=posts_tags, backref=db.backref('posts', lazy='dynamic'), lazy='dynamic')
@property
def by_same_author(self):
cls = type(self)
return cls.query.with_parent(self.author).filter(cls.id != self.id).order_by(func.random())
| 32.088235
| 114
| 0.698442
|
dfb6df882195a4196e52852d93b67a3af58b2ac2
| 736
|
py
|
Python
|
tests/views.py
|
wq/django-mustache
|
09d9a723167f5f5ffb63abe687b89324e397f0a2
|
[
"MIT"
] | 19
|
2016-01-14T23:01:50.000Z
|
2020-09-05T07:59:10.000Z
|
tests/views.py
|
sheppard/django-mustache
|
09d9a723167f5f5ffb63abe687b89324e397f0a2
|
[
"MIT"
] | 3
|
2016-11-07T20:54:47.000Z
|
2020-03-18T13:48:27.000Z
|
tests/views.py
|
sheppard/django-mustache
|
09d9a723167f5f5ffb63abe687b89324e397f0a2
|
[
"MIT"
] | 4
|
2016-04-26T23:14:51.000Z
|
2020-06-24T12:18:15.000Z
|
from rest_framework.views import APIView
from rest_framework.response import Response
class View(APIView):
context = {}
@property
def template_name(self):
return "test_%s.html" % type(self).__name__.lower().replace('view', '')
def get(self, request, *args, **kwargs):
return Response(self.context)
class ContextView(View):
context = {
'test': 1234,
'boolean': True,
'none': None,
'false': False,
'nested': {
'value': 5678,
},
'array': [1, 2, 3],
'empty': [],
}
class PartialsView(View):
pass
class MustacheCPView(View):
pass
class DjangoCPView(View):
pass
class AppTemplateView(View):
pass
| 16.727273
| 79
| 0.576087
|
d47eb6ae4ab202335e36b34a13205a376de997dd
| 67,605
|
py
|
Python
|
scipy/stats/mstats_basic.py
|
f0k/scipy
|
3145a226339b14bbc22f2e984848e05def7659c5
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/stats/mstats_basic.py
|
f0k/scipy
|
3145a226339b14bbc22f2e984848e05def7659c5
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/stats/mstats_basic.py
|
f0k/scipy
|
3145a226339b14bbc22f2e984848e05def7659c5
|
[
"BSD-3-Clause"
] | null | null | null |
"""
An extension of scipy.stats.stats to support masked arrays
:author: Pierre GF Gerard-Marchant
:contact: pierregm_at_uga_edu
"""
#TODO : f_value_wilks_lambda looks botched... what are dfnum & dfden for ?
#TODO : ttest_reel looks botched: what are x1,x2,v1,v2 for ?
#TODO : reimplement ksonesamp
from __future__ import division, print_function, absolute_import
__author__ = "Pierre GF Gerard-Marchant"
__docformat__ = "restructuredtext en"
__all__ = ['argstoarray',
'betai',
'chisquare','count_tied_groups',
'describe',
'f_oneway','f_value_wilks_lambda','find_repeats','friedmanchisquare',
'gmean',
'hmean',
'kendalltau','kendalltau_seasonal','kruskal','kruskalwallis',
'ks_twosamp','ks_2samp','kurtosis','kurtosistest',
'linregress',
'mannwhitneyu', 'meppf','mode','moment','mquantiles','msign',
'normaltest',
'obrientransform',
'pearsonr','plotting_positions','pointbiserialr',
'rankdata',
'scoreatpercentile','sem',
'sen_seasonal_slopes','signaltonoise','skew','skewtest','spearmanr',
'theilslopes','threshold','tmax','tmean','tmin','trim','trimboth',
'trimtail','trima','trimr','trimmed_mean','trimmed_std',
'trimmed_stde','trimmed_var','tsem','ttest_1samp','ttest_onesamp',
'ttest_ind','ttest_rel','tvar',
'variation',
'winsorize',
'zmap', 'zscore'
]
import numpy as np
from numpy import ndarray
import numpy.ma as ma
from numpy.ma import MaskedArray, masked, nomask
from scipy.lib.six import iteritems
import itertools
import warnings
#import scipy.stats as stats
from . import stats
import scipy.special as special
import scipy.misc as misc
#import scipy.stats.futil as futil
from . import futil
genmissingvaldoc = """
Notes
-----
Missing values are considered pair-wise: if a value is missing in x,
the corresponding value in y is masked.
"""
#------------------------------------------------------------------------------
def _chk_asarray(a, axis):
if axis is None:
a = ma.ravel(a)
outaxis = 0
else:
a = ma.asanyarray(a)
outaxis = axis
return a, outaxis
def _chk2_asarray(a, b, axis):
if axis is None:
a = ma.ravel(a)
b = ma.ravel(b)
outaxis = 0
else:
a = ma.asanyarray(a)
b = ma.asanyarray(b)
outaxis = axis
return a, b, outaxis
def _chk_size(a,b):
a = ma.asanyarray(a)
b = ma.asanyarray(b)
(na, nb) = (a.size, b.size)
if na != nb:
raise ValueError("The size of the input array should match!"\
" (%s <> %s)" % (na,nb))
return (a,b,na)
def argstoarray(*args):
"""Constructs a 2D array from a sequence of sequences. Sequences are filled
with missing values to match the length of the longest sequence.
Returns
-------
output : MaskedArray
a (mxn) masked array, where m is the number of arguments and n the
length of the longest argument.
"""
if len(args) == 1 and not isinstance(args[0], ndarray):
output = ma.asarray(args[0])
if output.ndim != 2:
raise ValueError("The input should be 2D")
else:
n = len(args)
m = max([len(k) for k in args])
output = ma.array(np.empty((n,m), dtype=float), mask=True)
for (k,v) in enumerate(args):
output[k,:len(v)] = v
output[np.logical_not(np.isfinite(output._data))] = masked
return output
#####--------------------------------------------------------------------------
#---- --- Ranking ---
#####--------------------------------------------------------------------------
def find_repeats(arr):
"""Find repeats in arr and return a tuple (repeats, repeat_count).
Masked values are discarded.
Parameters
----------
arr : sequence
Input array. The array is flattened if it is not 1D.
Returns
-------
repeats : ndarray
Array of repeated values.
counts : ndarray
Array of counts.
"""
marr = ma.compressed(arr)
if not marr.size:
return (np.array(0), np.array(0))
(v1, v2, n) = futil.dfreps(ma.array(ma.compressed(arr), copy=True))
return (v1[:n], v2[:n])
def count_tied_groups(x, use_missing=False):
"""
Counts the number of tied values in x, and returns a dictionary
(nb of ties: nb of groups).
Parameters
----------
x : sequence
Sequence of data on which to counts the ties
use_missing : boolean
Whether to consider missing values as tied.
Examples
--------
>>> z = [0, 0, 0, 2, 2, 2, 3, 3, 4, 5, 6]
>>> count_tied_groups(z)
>>> {2:1, 3:2}
>>> # The ties were 0 (3x), 2 (3x) and 3 (2x)
>>> z = ma.array([0, 0, 1, 2, 2, 2, 3, 3, 4, 5, 6])
>>> count_tied_groups(z)
>>> {2:2, 3:1}
>>> # The ties were 0 (2x), 2 (3x) and 3 (2x)
>>> z[[1,-1]] = masked
>>> count_tied_groups(z, use_missing=True)
>>> {2:2, 3:1}
>>> # The ties were 2 (3x), 3 (2x) and masked (2x)
"""
nmasked = ma.getmask(x).sum()
# We need the copy as find_repeats will overwrite the initial data
data = ma.compressed(x).copy()
(ties, counts) = find_repeats(data)
nties = {}
if len(ties):
nties = dict(zip(np.unique(counts), itertools.repeat(1)))
nties.update(dict(zip(*find_repeats(counts))))
if nmasked and use_missing:
try:
nties[nmasked] += 1
except KeyError:
nties[nmasked] = 1
return nties
def rankdata(data, axis=None, use_missing=False):
"""Returns the rank (also known as order statistics) of each data point
along the given axis.
If some values are tied, their rank is averaged.
If some values are masked, their rank is set to 0 if use_missing is False,
or set to the average rank of the unmasked values if use_missing is True.
Parameters
----------
data : sequence
Input data. The data is transformed to a masked array
axis : {None,int}, optional
Axis along which to perform the ranking.
If None, the array is first flattened. An exception is raised if
the axis is specified for arrays with a dimension larger than 2
use_missing : {boolean}, optional
Whether the masked values have a rank of 0 (False) or equal to the
average rank of the unmasked values (True).
"""
#
def _rank1d(data, use_missing=False):
n = data.count()
rk = np.empty(data.size, dtype=float)
idx = data.argsort()
rk[idx[:n]] = np.arange(1,n+1)
#
if use_missing:
rk[idx[n:]] = (n+1)/2.
else:
rk[idx[n:]] = 0
#
repeats = find_repeats(data.copy())
for r in repeats[0]:
condition = (data==r).filled(False)
rk[condition] = rk[condition].mean()
return rk
#
data = ma.array(data, copy=False)
if axis is None:
if data.ndim > 1:
return _rank1d(data.ravel(), use_missing).reshape(data.shape)
else:
return _rank1d(data, use_missing)
else:
return ma.apply_along_axis(_rank1d,axis,data,use_missing).view(ndarray)
#####--------------------------------------------------------------------------
#---- --- Central tendency ---
#####--------------------------------------------------------------------------
def gmean(a, axis=0):
a, axis = _chk_asarray(a, axis)
log_a = ma.log(a)
return ma.exp(log_a.mean(axis=axis))
gmean.__doc__ = stats.gmean.__doc__
def hmean(a, axis=0):
a, axis = _chk_asarray(a, axis)
if isinstance(a, MaskedArray):
size = a.count(axis)
else:
size = a.shape[axis]
return size / (1.0/a).sum(axis)
hmean.__doc__ = stats.hmean.__doc__
def mode(a, axis=0):
def _mode1D(a):
(rep,cnt) = find_repeats(a)
if not cnt.ndim:
return (0, 0)
elif cnt.size:
return (rep[cnt.argmax()], cnt.max())
return (a[0], 1)
#
if axis is None:
output = _mode1D(ma.ravel(a))
output = (ma.array(output[0]), ma.array(output[1]))
else:
output = ma.apply_along_axis(_mode1D, axis, a)
newshape = list(a.shape)
newshape[axis] = 1
slices = [slice(None)] * output.ndim
slices[axis] = 0
modes = output[tuple(slices)].reshape(newshape)
slices[axis] = 1
counts = output[tuple(slices)].reshape(newshape)
output = (modes, counts)
return output
mode.__doc__ = stats.mode.__doc__
#####--------------------------------------------------------------------------
#---- --- Probabilities ---
#####--------------------------------------------------------------------------
def betai(a, b, x):
x = np.asanyarray(x)
x = ma.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0
return special.betainc(a, b, x)
betai.__doc__ = stats.betai.__doc__
#####--------------------------------------------------------------------------
#---- --- Correlation ---
#####--------------------------------------------------------------------------
def msign(x):
"""Returns the sign of x, or 0 if x is masked."""
return ma.filled(np.sign(x), 0)
def pearsonr(x,y):
"""Calculates a Pearson correlation coefficient and the p-value for testing
non-correlation.
The Pearson correlation coefficient measures the linear relationship
between two datasets. Strictly speaking, Pearson's correlation requires
that each dataset be normally distributed. Like other correlation
coefficients, this one varies between -1 and +1 with 0 implying no
correlation. Correlations of -1 or +1 imply an exact linear
relationship. Positive correlations imply that as x increases, so does
y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
x : 1D array
y : 1D array the same length as x
Returns
-------
(Pearson's correlation coefficient,
2-tailed p-value)
References
----------
http://www.statsoft.com/textbook/glosp.html#Pearson%20Correlation
"""
(x, y, n) = _chk_size(x, y)
(x, y) = (x.ravel(), y.ravel())
# Get the common mask and the total nb of unmasked elements
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
n -= m.sum()
df = n-2
if df < 0:
return (masked, masked)
#
(mx, my) = (x.mean(), y.mean())
(xm, ym) = (x-mx, y-my)
#
r_num = n*(ma.add.reduce(xm*ym))
r_den = n*ma.sqrt(ma.dot(xm,xm)*ma.dot(ym,ym))
r = (r_num / r_den)
# Presumably, if r > 1, then it is only some small artifact of floating
# point arithmetic.
r = min(r, 1.0)
r = max(r, -1.0)
df = n-2
#
t = ma.sqrt(df/((1.0-r)*(1.0+r))) * r
if t is masked:
prob = 0.
else:
prob = betai(0.5*df,0.5,df/(df+t*t))
return (r,prob)
def spearmanr(x, y, use_ties=True):
"""Calculates a Spearman rank-order correlation coefficient and the p-value
to test for non-correlation.
The Spearman correlation is a nonparametric measure of the linear
relationship between two datasets. Unlike the Pearson correlation, the
Spearman correlation does not assume that both datasets are normally
distributed. Like other correlation coefficients, this one varies
between -1 and +1 with 0 implying no correlation. Correlations of -1 or
+1 imply an exact linear relationship. Positive correlations imply that
as x increases, so does y. Negative correlations imply that as x
increases, y decreases.
Missing values are discarded pair-wise: if a value is missing in x, the
corresponding value in y is masked.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
x : 1D array
y : 1D array the same length as x
The lengths of both arrays must be > 2.
use_ties : {True, False}, optional
Whether the correction for ties should be computed.
Returns
-------
(Spearman correlation coefficient,
2-tailed p-value)
References
----------
[CRCProbStat2000] section 14.7
"""
(x, y, n) = _chk_size(x, y)
(x, y) = (x.ravel(), y.ravel())
#
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
n -= m.sum()
if m is not nomask:
x = ma.array(x, mask=m, copy=True)
y = ma.array(y, mask=m, copy=True)
df = n-2
if df < 0:
raise ValueError("The input must have at least 3 entries!")
# Gets the ranks and rank differences
rankx = rankdata(x)
ranky = rankdata(y)
dsq = np.add.reduce((rankx-ranky)**2)
# Tie correction
if use_ties:
xties = count_tied_groups(x)
yties = count_tied_groups(y)
corr_x = np.sum(v*k*(k**2-1) for (k,v) in iteritems(xties))/12.
corr_y = np.sum(v*k*(k**2-1) for (k,v) in iteritems(yties))/12.
else:
corr_x = corr_y = 0
denom = n*(n**2 - 1)/6.
if corr_x != 0 or corr_y != 0:
rho = denom - dsq - corr_x - corr_y
rho /= ma.sqrt((denom-2*corr_x)*(denom-2*corr_y))
else:
rho = 1. - dsq/denom
#
t = ma.sqrt(ma.divide(df,(rho+1.0)*(1.0-rho))) * rho
if t is masked:
prob = 0.
else:
prob = betai(0.5*df,0.5,df/(df+t*t))
return rho, prob
def kendalltau(x, y, use_ties=True, use_missing=False):
"""Computes Kendall's rank correlation tau on two variables *x* and *y*.
Parameters
----------
xdata : sequence
First data list (for example, time).
ydata : sequence
Second data list.
use_ties : {True, False}, optional
Whether ties correction should be performed.
use_missing : {False, True}, optional
Whether missing data should be allocated a rank of 0 (False) or the
average rank (True)
Returns
-------
tau : float
Kendall tau
prob : float
Approximate 2-side p-value.
"""
(x, y, n) = _chk_size(x, y)
(x, y) = (x.flatten(), y.flatten())
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
if m is not nomask:
x = ma.array(x, mask=m, copy=True)
y = ma.array(y, mask=m, copy=True)
n -= m.sum()
#
if n < 2:
return (np.nan, np.nan)
#
rx = ma.masked_equal(rankdata(x, use_missing=use_missing), 0)
ry = ma.masked_equal(rankdata(y, use_missing=use_missing), 0)
idx = rx.argsort()
(rx, ry) = (rx[idx], ry[idx])
C = np.sum([((ry[i+1:]>ry[i]) * (rx[i+1:]>rx[i])).filled(0).sum()
for i in range(len(ry)-1)], dtype=float)
D = np.sum([((ry[i+1:]<ry[i])*(rx[i+1:]>rx[i])).filled(0).sum()
for i in range(len(ry)-1)], dtype=float)
if use_ties:
xties = count_tied_groups(x)
yties = count_tied_groups(y)
corr_x = np.sum([v*k*(k-1) for (k,v) in iteritems(xties)], dtype=float)
corr_y = np.sum([v*k*(k-1) for (k,v) in iteritems(yties)], dtype=float)
denom = ma.sqrt((n*(n-1)-corr_x)/2. * (n*(n-1)-corr_y)/2.)
else:
denom = n*(n-1)/2.
tau = (C-D) / denom
#
var_s = n*(n-1)*(2*n+5)
if use_ties:
var_s -= np.sum(v*k*(k-1)*(2*k+5)*1. for (k,v) in iteritems(xties))
var_s -= np.sum(v*k*(k-1)*(2*k+5)*1. for (k,v) in iteritems(yties))
v1 = np.sum([v*k*(k-1) for (k, v) in iteritems(xties)], dtype=float) *\
np.sum([v*k*(k-1) for (k, v) in iteritems(yties)], dtype=float)
v1 /= 2.*n*(n-1)
if n > 2:
v2 = np.sum([v*k*(k-1)*(k-2) for (k,v) in iteritems(xties)],
dtype=float) * \
np.sum([v*k*(k-1)*(k-2) for (k,v) in iteritems(yties)],
dtype=float)
v2 /= 9.*n*(n-1)*(n-2)
else:
v2 = 0
else:
v1 = v2 = 0
var_s /= 18.
var_s += (v1 + v2)
z = (C-D)/np.sqrt(var_s)
prob = special.erfc(abs(z)/np.sqrt(2))
return (tau, prob)
def kendalltau_seasonal(x):
"""Computes a multivariate extension Kendall's rank correlation tau, designed
for seasonal data.
Parameters
----------
x: 2D array
Array of seasonal data, with seasons in columns.
"""
x = ma.array(x, subok=True, copy=False, ndmin=2)
(n,m) = x.shape
n_p = x.count(0)
#
S_szn = np.sum(msign(x[i:]-x[i]).sum(0) for i in range(n))
S_tot = S_szn.sum()
#
n_tot = x.count()
ties = count_tied_groups(x.compressed())
corr_ties = np.sum(v*k*(k-1) for (k,v) in iteritems(ties))
denom_tot = ma.sqrt(1.*n_tot*(n_tot-1)*(n_tot*(n_tot-1)-corr_ties))/2.
#
R = rankdata(x, axis=0, use_missing=True)
K = ma.empty((m,m), dtype=int)
covmat = ma.empty((m,m), dtype=float)
# cov_jj = ma.empty(m, dtype=float)
denom_szn = ma.empty(m, dtype=float)
for j in range(m):
ties_j = count_tied_groups(x[:,j].compressed())
corr_j = np.sum(v*k*(k-1) for (k,v) in iteritems(ties_j))
cmb = n_p[j]*(n_p[j]-1)
for k in range(j,m,1):
K[j,k] = np.sum(msign((x[i:,j]-x[i,j])*(x[i:,k]-x[i,k])).sum()
for i in range(n))
covmat[j,k] = (K[j,k] +4*(R[:,j]*R[:,k]).sum() - \
n*(n_p[j]+1)*(n_p[k]+1))/3.
K[k,j] = K[j,k]
covmat[k,j] = covmat[j,k]
# cov_jj[j] = (nn_p*(2*n_p[j]+5))
# cov_jj[j] -= np.sum(v*k*(k-1)*(2*k+5) for (k,v) in ties_j.iteritems())
# cov_jj[j] /= 18.
denom_szn[j] = ma.sqrt(cmb*(cmb-corr_j)) / 2.
var_szn = covmat.diagonal()
#
z_szn = msign(S_szn) * (abs(S_szn)-1) / ma.sqrt(var_szn)
z_tot_ind = msign(S_tot) * (abs(S_tot)-1) / ma.sqrt(var_szn.sum())
z_tot_dep = msign(S_tot) * (abs(S_tot)-1) / ma.sqrt(covmat.sum())
#
prob_szn = special.erfc(abs(z_szn)/np.sqrt(2))
prob_tot_ind = special.erfc(abs(z_tot_ind)/np.sqrt(2))
prob_tot_dep = special.erfc(abs(z_tot_dep)/np.sqrt(2))
#
chi2_tot = (z_szn*z_szn).sum()
chi2_trd = m * z_szn.mean()**2
output = {'seasonal tau': S_szn/denom_szn,
'global tau': S_tot/denom_tot,
'global tau (alt)': S_tot/denom_szn.sum(),
'seasonal p-value': prob_szn,
'global p-value (indep)': prob_tot_ind,
'global p-value (dep)': prob_tot_dep,
'chi2 total': chi2_tot,
'chi2 trend': chi2_trd,
}
return output
def pointbiserialr(x, y):
x = ma.fix_invalid(x, copy=True).astype(bool)
y = ma.fix_invalid(y, copy=True).astype(float)
# Get rid of the missing data ..........
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
if m is not nomask:
unmask = np.logical_not(m)
x = x[unmask]
y = y[unmask]
#
n = len(x)
# phat is the fraction of x values that are True
phat = x.sum() / float(n)
y0 = y[~x] # y-values where x is False
y1 = y[x] # y-values where x is True
y0m = y0.mean()
y1m = y1.mean()
#
rpb = (y1m - y0m)*np.sqrt(phat * (1-phat)) / y.std()
#
df = n-2
t = rpb*ma.sqrt(df/(1.0-rpb**2))
prob = betai(0.5*df, 0.5, df/(df+t*t))
return rpb, prob
if stats.pointbiserialr.__doc__:
pointbiserialr.__doc__ = stats.pointbiserialr.__doc__ + genmissingvaldoc
def linregress(*args):
if len(args) == 1: # more than 1D array?
args = ma.array(args[0], copy=True)
if len(args) == 2:
x = args[0]
y = args[1]
else:
x = args[:,0]
y = args[:,1]
else:
x = ma.array(args[0]).flatten()
y = ma.array(args[1]).flatten()
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
if m is not nomask:
x = ma.array(x,mask=m)
y = ma.array(y,mask=m)
n = len(x)
(xmean, ymean) = (x.mean(), y.mean())
(xm, ym) = (x-xmean, y-ymean)
(Sxx, Syy) = (ma.add.reduce(xm*xm), ma.add.reduce(ym*ym))
Sxy = ma.add.reduce(xm*ym)
r_den = ma.sqrt(Sxx*Syy)
if r_den == 0.0:
r = 0.0
else:
r = Sxy / r_den
if (r > 1.0):
r = 1.0 # from numerical error
#z = 0.5*log((1.0+r+TINY)/(1.0-r+TINY))
df = n-2
t = r * ma.sqrt(df/(1.0-r*r))
prob = betai(0.5*df,0.5,df/(df+t*t))
slope = Sxy / Sxx
intercept = ymean - slope*xmean
sterrest = ma.sqrt(1.-r*r) * y.std()
return slope, intercept, r, prob, sterrest
if stats.linregress.__doc__:
linregress.__doc__ = stats.linregress.__doc__ + genmissingvaldoc
def theilslopes(y, x=None, alpha=0.05):
"""Computes the Theil slope over the dataset (x,y), as the median of all slopes
between paired values.
Parameters
----------
y : sequence
Dependent variable.
x : {None, sequence}, optional
Independent variable. If None, use arange(len(y)) instead.
alpha : float
Confidence degree.
Returns
-------
medslope : float
Theil slope
medintercept : float
Intercept of the Theil line, as median(y)-medslope*median(x)
lo_slope : float
Lower bound of the confidence interval on medslope
up_slope : float
Upper bound of the confidence interval on medslope
"""
y = ma.asarray(y).flatten()
y[-1] = masked
n = len(y)
if x is None:
x = ma.arange(len(y), dtype=float)
else:
x = ma.asarray(x).flatten()
if len(x) != n:
raise ValueError("Incompatible lengths ! (%s<>%s)" % (n,len(x)))
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
y._mask = x._mask = m
ny = y.count()
#
slopes = ma.hstack([(y[i+1:]-y[i])/(x[i+1:]-x[i]) for i in range(n-1)])
slopes.sort()
medslope = ma.median(slopes)
medinter = ma.median(y) - medslope*ma.median(x)
#
if alpha > 0.5:
alpha = 1.-alpha
z = stats.distributions.norm.ppf(alpha/2.)
#
(xties, yties) = (count_tied_groups(x), count_tied_groups(y))
nt = ny*(ny-1)/2.
sigsq = (ny*(ny-1)*(2*ny+5)/18.)
sigsq -= np.sum(v*k*(k-1)*(2*k+5) for (k,v) in iteritems(xties))
sigsq -= np.sum(v*k*(k-1)*(2*k+5) for (k,v) in iteritems(yties))
sigma = np.sqrt(sigsq)
Ru = min(np.round((nt - z*sigma)/2. + 1), len(slopes)-1)
Rl = max(np.round((nt + z*sigma)/2.), 0)
delta = slopes[[Rl,Ru]]
return medslope, medinter, delta[0], delta[1]
def sen_seasonal_slopes(x):
x = ma.array(x, subok=True, copy=False, ndmin=2)
(n,_) = x.shape
# Get list of slopes per season
szn_slopes = ma.vstack([(x[i+1:]-x[i])/np.arange(1,n-i)[:,None]
for i in range(n)])
szn_medslopes = ma.median(szn_slopes, axis=0)
medslope = ma.median(szn_slopes, axis=None)
return szn_medslopes, medslope
#####--------------------------------------------------------------------------
#---- --- Inferential statistics ---
#####--------------------------------------------------------------------------
def ttest_onesamp(a, popmean):
a = ma.asarray(a)
x = a.mean(axis=None)
v = a.var(axis=None,ddof=1)
n = a.count(axis=None)
df = n-1
svar = ((n-1)*v) / float(df)
t = (x-popmean)/ma.sqrt(svar*(1.0/n))
prob = betai(0.5*df,0.5,df/(df+t*t))
return t,prob
ttest_onesamp.__doc__ = stats.ttest_1samp.__doc__
ttest_1samp = ttest_onesamp
def ttest_ind(a, b, axis=0):
a, b, axis = _chk2_asarray(a, b, axis)
(x1, x2) = (a.mean(axis), b.mean(axis))
(v1, v2) = (a.var(axis=axis, ddof=1), b.var(axis=axis, ddof=1))
(n1, n2) = (a.count(axis), b.count(axis))
df = n1+n2-2
svar = ((n1-1)*v1+(n2-1)*v2) / float(df)
svar == 0
t = (x1-x2)/ma.sqrt(svar*(1.0/n1 + 1.0/n2)) # N-D COMPUTATION HERE!!!!!!
t = ma.filled(t, 1) # replace NaN t-values with 1.0
probs = betai(0.5*df,0.5,float(df)/(df+t*t)).reshape(t.shape)
return t, probs.squeeze()
ttest_ind.__doc__ = stats.ttest_ind.__doc__
def ttest_rel(a,b,axis=None):
a, b, axis = _chk2_asarray(a, b, axis)
if len(a)!=len(b):
raise ValueError('unequal length arrays')
(x1, x2) = (a.mean(axis), b.mean(axis))
(v1, v2) = (a.var(axis=axis, ddof=1), b.var(axis=axis, ddof=1))
n = a.count(axis)
df = (n-1.0)
d = (a-b).astype('d')
denom = ma.sqrt((n*ma.add.reduce(d*d,axis) - ma.add.reduce(d,axis)**2) /df)
#zerodivproblem = denom == 0
t = ma.add.reduce(d, axis) / denom
t = ma.filled(t, 1)
probs = betai(0.5*df,0.5,df/(df+t*t)).reshape(t.shape).squeeze()
return t, probs
ttest_rel.__doc__ = stats.ttest_rel.__doc__
def chisquare(f_obs, f_exp=None):
f_obs = ma.asarray(f_obs)
if f_exp is None:
f_exp = ma.array([f_obs.mean(axis=0)] * len(f_obs))
f_exp = f_exp.astype(float)
chisq = ma.add.reduce((f_obs-f_exp)**2 / f_exp)
return chisq, stats.chisqprob(chisq, f_obs.count(0)-1)
chisquare.__doc__ = stats.chisquare.__doc__
def mannwhitneyu(x,y, use_continuity=True):
"""Computes the Mann-Whitney on samples x and y.
Missing values in x and/or y are discarded.
Parameters
----------
x : sequence
y : sequence
use_continuity : {True, False}, optional
Whether a continuity correction (1/2.) should be taken into account.
Returns
-------
u : float
The Mann-Whitney statistics
prob : float
Approximate p-value assuming a normal distribution.
"""
x = ma.asarray(x).compressed().view(ndarray)
y = ma.asarray(y).compressed().view(ndarray)
ranks = rankdata(np.concatenate([x,y]))
(nx, ny) = (len(x), len(y))
nt = nx + ny
U = ranks[:nx].sum() - nx*(nx+1)/2.
U = max(U, nx*ny - U)
u = nx*ny - U
#
mu = (nx*ny)/2.
sigsq = (nt**3 - nt)/12.
ties = count_tied_groups(ranks)
sigsq -= np.sum(v*(k**3-k) for (k,v) in iteritems(ties))/12.
sigsq *= nx*ny/float(nt*(nt-1))
#
if use_continuity:
z = (U - 1/2. - mu) / ma.sqrt(sigsq)
else:
z = (U - mu) / ma.sqrt(sigsq)
prob = special.erfc(abs(z)/np.sqrt(2))
return (u, prob)
def kruskalwallis(*args):
output = argstoarray(*args)
ranks = ma.masked_equal(rankdata(output, use_missing=False), 0)
sumrk = ranks.sum(-1)
ngrp = ranks.count(-1)
ntot = ranks.count()
# ssbg = (sumrk**2/ranks.count(-1)).sum() - ranks.sum()**2/ntotal
# H = ssbg / (ntotal*(ntotal+1)/12.)
H = 12./(ntot*(ntot+1)) * (sumrk**2/ngrp).sum() - 3*(ntot+1)
# Tie correction
ties = count_tied_groups(ranks)
T = 1. - np.sum(v*(k**3-k) for (k,v) in iteritems(ties))/float(ntot**3-ntot)
if T == 0:
raise ValueError('All numbers are identical in kruskal')
H /= T
#
df = len(output) - 1
prob = stats.chisqprob(H,df)
return (H, prob)
kruskal = kruskalwallis
kruskalwallis.__doc__ = stats.kruskal.__doc__
_kolmog2 = special.kolmogorov
def _kolmog1(x,n):
if x <= 0:
return 0
if x >= 1:
return 1
j = np.arange(np.floor(n*(1-x))+1)
return 1 - x * np.sum(np.exp(np.log(misc.comb(n,j))
+ (n-j) * np.log(1-x-j/float(n))
+ (j-1) * np.log(x+j/float(n))))
def ks_twosamp(data1, data2, alternative="two-sided"):
"""Computes the Kolmogorov-Smirnov test on two samples.
Missing values are discarded.
Parameters
----------
data1 : sequence
First data set
data2 : sequence
Second data set
alternative : {'two-sided', 'less', 'greater'}, optional
Indicates the alternative hypothesis.
Returns
-------
d : float
Value of the Kolmogorov Smirnov test
p : float
Corresponding p-value.
"""
(data1, data2) = (ma.asarray(data1), ma.asarray(data2))
(n1, n2) = (data1.count(), data2.count())
n = (n1*n2/float(n1+n2))
mix = ma.concatenate((data1.compressed(), data2.compressed()))
mixsort = mix.argsort(kind='mergesort')
csum = np.where(mixsort<n1, 1./n1, -1./n2).cumsum()
# Check for ties
if len(np.unique(mix)) < (n1+n2):
csum = csum[np.r_[np.diff(mix[mixsort]).nonzero()[0],-1]]
#
alternative = str(alternative).lower()[0]
if alternative == 't':
d = ma.abs(csum).max()
prob = _kolmog2(np.sqrt(n)*d)
elif alternative == 'l':
d = -csum.min()
prob = np.exp(-2*n*d**2)
elif alternative == 'g':
d = csum.max()
prob = np.exp(-2*n*d**2)
else:
raise ValueError("Invalid value for the alternative hypothesis: "\
"should be in 'two-sided', 'less' or 'greater'")
return (d, prob)
ks_2samp = ks_twosamp
def ks_twosamp_old(data1, data2):
""" Computes the Kolmogorov-Smirnov statistic on 2 samples.
Returns
-------
KS D-value, p-value
"""
(data1, data2) = [ma.asarray(d).compressed() for d in (data1,data2)]
return stats.ks_2samp(data1,data2)
#####--------------------------------------------------------------------------
#---- --- Trimming ---
#####--------------------------------------------------------------------------
def threshold(a, threshmin=None, threshmax=None, newval=0):
"""Clip array to a given value.
Similar to numpy.clip(), except that values less than threshmin or
greater than threshmax are replaced by newval, instead of by
threshmin and threshmax respectively.
Parameters
----------
a : ndarray
Input data
threshmin : {None, float}, optional
Lower threshold. If None, set to the minimum value.
threshmax : {None, float}, optional
Upper threshold. If None, set to the maximum value.
newval : {0, float}, optional
Value outside the thresholds.
Returns
-------
a, with values less (greater) than threshmin (threshmax) replaced with newval.
"""
a = ma.array(a, copy=True)
mask = np.zeros(a.shape, dtype=bool)
if threshmin is not None:
mask |= (a < threshmin).filled(False)
if threshmax is not None:
mask |= (a > threshmax).filled(False)
a[mask] = newval
return a
def trima(a, limits=None, inclusive=(True,True)):
"""Trims an array by masking the data outside some given limits.
Returns a masked version of the input array.
Parameters
----------
a : sequence
Input array.
limits : {None, tuple}, optional
Tuple of (lower limit, upper limit) in absolute values.
Values of the input array lower (greater) than the lower (upper) limit
will be masked. A limit is None indicates an open interval.
inclusive : {(True,True) tuple}, optional
Tuple of (lower flag, upper flag), indicating whether values exactly
equal to the lower (upper) limit are allowed.
"""
a = ma.asarray(a)
a.unshare_mask()
if limits is None:
return a
(lower_lim, upper_lim) = limits
(lower_in, upper_in) = inclusive
condition = False
if lower_lim is not None:
if lower_in:
condition |= (a < lower_lim)
else:
condition |= (a <= lower_lim)
if upper_lim is not None:
if upper_in:
condition |= (a > upper_lim)
else:
condition |= (a >= upper_lim)
a[condition.filled(True)] = masked
return a
def trimr(a, limits=None, inclusive=(True, True), axis=None):
"""Trims an array by masking some proportion of the data on each end.
Returns a masked version of the input array.
Parameters
----------
a : sequence
Input array.
limits : {None, tuple}, optional
Tuple of the percentages to cut on each side of the array, with respect
to the number of unmasked data, as floats between 0. and 1.
Noting n the number of unmasked data before trimming, the (n*limits[0])th
smallest data and the (n*limits[1])th largest data are masked, and the
total number of unmasked data after trimming is n*(1.-sum(limits))
The value of one limit can be set to None to indicate an open interval.
inclusive : {(True,True) tuple}, optional
Tuple of flags indicating whether the number of data being masked on the
left (right) end should be truncated (True) or rounded (False) to integers.
axis : {None,int}, optional
Axis along which to trim. If None, the whole array is trimmed, but its
shape is maintained.
"""
def _trimr1D(a, low_limit, up_limit, low_inclusive, up_inclusive):
n = a.count()
idx = a.argsort()
if low_limit:
if low_inclusive:
lowidx = int(low_limit*n)
else:
lowidx = np.round(low_limit*n)
a[idx[:lowidx]] = masked
if up_limit is not None:
if up_inclusive:
upidx = n - int(n*up_limit)
else:
upidx = n- np.round(n*up_limit)
a[idx[upidx:]] = masked
return a
#
a = ma.asarray(a)
a.unshare_mask()
if limits is None:
return a
# Check the limits
(lolim, uplim) = limits
errmsg = "The proportion to cut from the %s should be between 0. and 1."
if lolim is not None:
if lolim > 1. or lolim < 0:
raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim)
if uplim is not None:
if uplim > 1. or uplim < 0:
raise ValueError(errmsg % 'end' + "(got %s)" % uplim)
#
(loinc, upinc) = inclusive
#
if axis is None:
shp = a.shape
return _trimr1D(a.ravel(),lolim,uplim,loinc,upinc).reshape(shp)
else:
return ma.apply_along_axis(_trimr1D, axis, a, lolim,uplim,loinc,upinc)
trimdoc = """
Parameters
----------
a : sequence
Input array
limits : {None, tuple}, optional
If relative == False, tuple (lower limit, upper limit) in absolute values.
Values of the input array lower (greater) than the lower (upper) limit are
masked.
If relative == True, tuple (lower percentage, upper percentage) to cut
on each side of the array, with respect to the number of unmasked data.
Noting n the number of unmasked data before trimming, the (n*limits[0])th
smallest data and the (n*limits[1])th largest data are masked, and the
total number of unmasked data after trimming is n*(1.-sum(limits))
In each case, the value of one limit can be set to None to indicate an
open interval.
If limits is None, no trimming is performed
inclusive : {(True, True) tuple}, optional
If relative==False, tuple indicating whether values exactly equal to the
absolute limits are allowed.
If relative==True, tuple indicating whether the number of data being masked
on each side should be rounded (True) or truncated (False).
relative : {False, True}, optional
Whether to consider the limits as absolute values (False) or proportions
to cut (True).
axis : {None, integer}, optional
Axis along which to trim.
"""
def trim(a, limits=None, inclusive=(True,True), relative=False, axis=None):
"""Trims an array by masking the data outside some given limits.
Returns a masked version of the input array.
%s
Examples
--------
>>>z = [ 1, 2, 3, 4, 5, 6, 7, 8, 9,10]
>>>trim(z,(3,8))
[--,--, 3, 4, 5, 6, 7, 8,--,--]
>>>trim(z,(0.1,0.2),relative=True)
[--, 2, 3, 4, 5, 6, 7, 8,--,--]
"""
if relative:
return trimr(a, limits=limits, inclusive=inclusive, axis=axis)
else:
return trima(a, limits=limits, inclusive=inclusive)
if trim.__doc__:
trim.__doc__ = trim.__doc__ % trimdoc
def trimboth(data, proportiontocut=0.2, inclusive=(True,True), axis=None):
"""Trims the data by masking the int(proportiontocut*n) smallest and
int(proportiontocut*n) largest values of data along the given axis, where n
is the number of unmasked values before trimming.
Parameters
----------
data : ndarray
Data to trim.
proportiontocut : {0.2, float}, optional
Percentage of trimming (as a float between 0 and 1).
If n is the number of unmasked values before trimming, the number of
values after trimming is:
(1-2*proportiontocut)*n.
inclusive : {(True, True) tuple}, optional
Tuple indicating whether the number of data being masked on each side
should be rounded (True) or truncated (False).
axis : {None, integer}, optional
Axis along which to perform the trimming.
If None, the input array is first flattened.
"""
return trimr(data, limits=(proportiontocut,proportiontocut),
inclusive=inclusive, axis=axis)
#..............................................................................
def trimtail(data, proportiontocut=0.2, tail='left', inclusive=(True,True),
axis=None):
"""Trims the data by masking int(trim*n) values from ONE tail of the
data along the given axis, where n is the number of unmasked values.
Parameters
----------
data : {ndarray}
Data to trim.
proportiontocut : {0.2, float}, optional
Percentage of trimming. If n is the number of unmasked values
before trimming, the number of values after trimming is
(1-proportiontocut)*n.
tail : {'left','right'}, optional
If left (right), the ``proportiontocut`` lowest (greatest) values will
be masked.
inclusive : {(True, True) tuple}, optional
Tuple indicating whether the number of data being masked on each side
should be rounded (True) or truncated (False).
axis : {None, integer}, optional
Axis along which to perform the trimming.
If None, the input array is first flattened.
"""
tail = str(tail).lower()[0]
if tail == 'l':
limits = (proportiontocut,None)
elif tail == 'r':
limits = (None, proportiontocut)
else:
raise TypeError("The tail argument should be in ('left','right')")
return trimr(data, limits=limits, axis=axis, inclusive=inclusive)
trim1 = trimtail
def trimmed_mean(a, limits=(0.1,0.1), inclusive=(1,1), relative=True,
axis=None):
"""Returns the trimmed mean of the data along the given axis.
%s
""" % trimdoc
if (not isinstance(limits,tuple)) and isinstance(limits,float):
limits = (limits, limits)
if relative:
return trimr(a,limits=limits,inclusive=inclusive,axis=axis).mean(axis=axis)
else:
return trima(a,limits=limits,inclusive=inclusive).mean(axis=axis)
def trimmed_var(a, limits=(0.1,0.1), inclusive=(1,1), relative=True,
axis=None, ddof=0):
"""Returns the trimmed variance of the data along the given axis.
%s
ddof : {0,integer}, optional
Means Delta Degrees of Freedom. The denominator used during computations
is (n-ddof). DDOF=0 corresponds to a biased estimate, DDOF=1 to an un-
biased estimate of the variance.
""" % trimdoc
if (not isinstance(limits,tuple)) and isinstance(limits,float):
limits = (limits, limits)
if relative:
out = trimr(a,limits=limits, inclusive=inclusive,axis=axis)
else:
out = trima(a,limits=limits,inclusive=inclusive)
return out.var(axis=axis, ddof=ddof)
def trimmed_std(a, limits=(0.1,0.1), inclusive=(1,1), relative=True,
axis=None, ddof=0):
"""Returns the trimmed standard deviation of the data along the given axis.
%s
ddof : {0,integer}, optional
Means Delta Degrees of Freedom. The denominator used during computations
is (n-ddof). DDOF=0 corresponds to a biased estimate, DDOF=1 to an un-
biased estimate of the variance.
""" % trimdoc
if (not isinstance(limits,tuple)) and isinstance(limits,float):
limits = (limits, limits)
if relative:
out = trimr(a,limits=limits,inclusive=inclusive,axis=axis)
else:
out = trima(a,limits=limits,inclusive=inclusive)
return out.std(axis=axis,ddof=ddof)
def trimmed_stde(a, limits=(0.1,0.1), inclusive=(1,1), axis=None):
"""Returns the standard error of the trimmed mean of the data along the given
axis.
Parameters
----------
a : sequence
Input array
limits : {(0.1,0.1), tuple of float}, optional
tuple (lower percentage, upper percentage) to cut on each side of the
array, with respect to the number of unmasked data.
Noting n the number of unmasked data before trimming, the (n*limits[0])th
smallest data and the (n*limits[1])th largest data are masked, and the
total number of unmasked data after trimming is n*(1.-sum(limits))
In each case, the value of one limit can be set to None to indicate an
open interval.
If limits is None, no trimming is performed
inclusive : {(True, True) tuple}, optional
Tuple indicating whether the number of data being masked on each side
should be rounded (True) or truncated (False).
axis : {None, integer}, optional
Axis along which to trim.
"""
#........................
def _trimmed_stde_1D(a, low_limit, up_limit, low_inclusive, up_inclusive):
"Returns the standard error of the trimmed mean for a 1D input data."
n = a.count()
idx = a.argsort()
if low_limit:
if low_inclusive:
lowidx = int(low_limit*n)
else:
lowidx = np.round(low_limit*n)
a[idx[:lowidx]] = masked
if up_limit is not None:
if up_inclusive:
upidx = n - int(n*up_limit)
else:
upidx = n- np.round(n*up_limit)
a[idx[upidx:]] = masked
nsize = a.count()
a[idx[:lowidx]] = a[idx[lowidx]]
a[idx[upidx:]] = a[idx[upidx-1]]
winstd = a.std(ddof=1)
return winstd / ((1-low_limit-up_limit)*np.sqrt(len(a)))
#........................
a = ma.array(a, copy=True, subok=True)
a.unshare_mask()
if limits is None:
return a.std(axis=axis,ddof=1)/ma.sqrt(a.count(axis))
if (not isinstance(limits,tuple)) and isinstance(limits,float):
limits = (limits, limits)
# Check the limits
(lolim, uplim) = limits
errmsg = "The proportion to cut from the %s should be between 0. and 1."
if lolim is not None:
if lolim > 1. or lolim < 0:
raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim)
if uplim is not None:
if uplim > 1. or uplim < 0:
raise ValueError(errmsg % 'end' + "(got %s)" % uplim)
#
(loinc, upinc) = inclusive
if (axis is None):
shp = a.shape
return _trimmed_stde_1D(a.ravel(),lolim,uplim,loinc,upinc)
else:
if a.ndim > 2:
raise ValueError("Array 'a' must be at most two dimensional, but got a.ndim = %d" % a.ndim)
return ma.apply_along_axis(_trimmed_stde_1D, axis, a,
lolim,uplim,loinc,upinc)
def tmean(a, limits=None, inclusive=(True,True)):
return trima(a, limits=limits, inclusive=inclusive).mean()
tmean.__doc__ = stats.tmean.__doc__
def tvar(a, limits=None, inclusive=(True,True)):
return trima(a, limits=limits, inclusive=inclusive).var()
tvar.__doc__ = stats.tvar.__doc__
def tmin(a, lowerlimit=None, axis=0, inclusive=True):
a, axis = _chk_asarray(a, axis)
am = trima(a, (lowerlimit, None), (inclusive, False))
return ma.minimum.reduce(am, axis)
tmin.__doc__ = stats.tmin.__doc__
def tmax(a, upperlimit, axis=0, inclusive=True):
a, axis = _chk_asarray(a, axis)
am = trima(a, (None, upperlimit), (False, inclusive))
return ma.maximum.reduce(am, axis)
tmax.__doc__ = stats.tmax.__doc__
def tsem(a, limits=None, inclusive=(True,True)):
a = ma.asarray(a).ravel()
if limits is None:
n = float(a.count())
return a.std()/ma.sqrt(n)
am = trima(a.ravel(), limits, inclusive)
sd = np.sqrt(am.var())
return sd / am.count()
tsem.__doc__ = stats.tsem.__doc__
def winsorize(a, limits=None, inclusive=(True,True), inplace=False, axis=None):
"""Returns a Winsorized version of the input array.
The (limits[0])th lowest values are set to the (limits[0])th percentile,
and the (limits[1])th highest values are set to the (limits[1])th
percentile.
Masked values are skipped.
Parameters
----------
a : sequence
Input array.
limits : {None, tuple of float}, optional
Tuple of the percentages to cut on each side of the array, with respect
to the number of unmasked data, as floats between 0. and 1.
Noting n the number of unmasked data before trimming, the (n*limits[0])th
smallest data and the (n*limits[1])th largest data are masked, and the
total number of unmasked data after trimming is n*(1.-sum(limits))
The value of one limit can be set to None to indicate an open interval.
inclusive : {(True, True) tuple}, optional
Tuple indicating whether the number of data being masked on each side
should be rounded (True) or truncated (False).
inplace : {False, True}, optional
Whether to winsorize in place (True) or to use a copy (False)
axis : {None, int}, optional
Axis along which to trim. If None, the whole array is trimmed, but its
shape is maintained.
"""
def _winsorize1D(a, low_limit, up_limit, low_include, up_include):
n = a.count()
idx = a.argsort()
if low_limit:
if low_include:
lowidx = int(low_limit*n)
else:
lowidx = np.round(low_limit*n)
a[idx[:lowidx]] = a[idx[lowidx]]
if up_limit is not None:
if up_include:
upidx = n - int(n*up_limit)
else:
upidx = n- np.round(n*up_limit)
a[idx[upidx:]] = a[idx[upidx-1]]
return a
# We gonna modify a: better make a copy
a = ma.array(a, copy=np.logical_not(inplace))
#
if limits is None:
return a
if (not isinstance(limits,tuple)) and isinstance(limits,float):
limits = (limits, limits)
# Check the limits
(lolim, uplim) = limits
errmsg = "The proportion to cut from the %s should be between 0. and 1."
if lolim is not None:
if lolim > 1. or lolim < 0:
raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim)
if uplim is not None:
if uplim > 1. or uplim < 0:
raise ValueError(errmsg % 'end' + "(got %s)" % uplim)
#
(loinc, upinc) = inclusive
#
if axis is None:
shp = a.shape
return _winsorize1D(a.ravel(),lolim,uplim,loinc,upinc).reshape(shp)
else:
return ma.apply_along_axis(_winsorize1D, axis,a,lolim,uplim,loinc,upinc)
#####--------------------------------------------------------------------------
#---- --- Moments ---
#####--------------------------------------------------------------------------
def moment(a, moment=1, axis=0):
a, axis = _chk_asarray(a, axis)
if moment == 1:
# By definition the first moment about the mean is 0.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.zeros(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return np.float64(0.0)
else:
mn = ma.expand_dims(a.mean(axis=axis), axis)
s = ma.power((a-mn), moment)
return s.mean(axis=axis)
moment.__doc__ = stats.moment.__doc__
def variation(a, axis=0):
a, axis = _chk_asarray(a, axis)
return a.std(axis)/a.mean(axis)
variation.__doc__ = stats.variation.__doc__
def skew(a, axis=0, bias=True):
a, axis = _chk_asarray(a,axis)
n = a.count(axis)
m2 = moment(a, 2, axis)
m3 = moment(a, 3, axis)
olderr = np.seterr(all='ignore')
try:
vals = ma.where(m2 == 0, 0, m3 / m2**1.5)
finally:
np.seterr(**olderr)
if not bias:
can_correct = (n > 2) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = ma.sqrt((n-1.0)*n)/(n-2.0)*m3/m2**1.5
np.place(vals, can_correct, nval)
return vals
skew.__doc__ = stats.skew.__doc__
def kurtosis(a, axis=0, fisher=True, bias=True):
a, axis = _chk_asarray(a, axis)
m2 = moment(a,2,axis)
m4 = moment(a,4,axis)
olderr = np.seterr(all='ignore')
try:
vals = ma.where(m2 == 0, 0, m4 / m2**2.0)
finally:
np.seterr(**olderr)
if not bias:
n = a.count(axis)
can_correct = (n > 3) & (m2 is not ma.masked and m2 > 0)
if can_correct.any():
n = np.extract(can_correct, n)
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3)*((n*n-1.0)*m4/m2**2.0-3*(n-1)**2.0)
np.place(vals, can_correct, nval+3.0)
if fisher:
return vals - 3
else:
return vals
kurtosis.__doc__ = stats.kurtosis.__doc__
def describe(a, axis=0):
"""
Computes several descriptive statistics of the passed array.
Parameters
----------
a : array
axis : int or None
Returns
-------
n : int
(size of the data (discarding missing values)
mm : (int, int)
min, max
arithmetic mean : float
unbiased variance : float
biased skewness : float
biased kurtosis : float
Examples
--------
>>> ma = np.ma.array(range(6), mask=[0, 0, 0, 1, 1, 1])
>>> describe(ma)
(array(3),
(0, 2),
1.0,
1.0,
masked_array(data = 0.0,
mask = False,
fill_value = 1e+20)
,
-1.5)
"""
a, axis = _chk_asarray(a, axis)
n = a.count(axis)
mm = (ma.minimum.reduce(a), ma.maximum.reduce(a))
m = a.mean(axis)
v = a.var(axis)
sk = skew(a, axis)
kurt = kurtosis(a, axis)
return n, mm, m, v, sk, kurt
#.............................................................................
def stde_median(data, axis=None):
"""Returns the McKean-Schrader estimate of the standard error of the sample
median along the given axis. masked values are discarded.
Parameters
----------
data : ndarray
Data to trim.
axis : {None,int}, optional
Axis along which to perform the trimming.
If None, the input array is first flattened.
"""
def _stdemed_1D(data):
data = np.sort(data.compressed())
n = len(data)
z = 2.5758293035489004
k = int(np.round((n+1)/2. - z * np.sqrt(n/4.),0))
return ((data[n-k] - data[k-1])/(2.*z))
#
data = ma.array(data, copy=False, subok=True)
if (axis is None):
return _stdemed_1D(data)
else:
if data.ndim > 2:
raise ValueError("Array 'data' must be at most two dimensional, but got data.ndim = %d" % data.ndim)
return ma.apply_along_axis(_stdemed_1D, axis, data)
#####--------------------------------------------------------------------------
#---- --- Normality Tests ---
#####--------------------------------------------------------------------------
def skewtest(a, axis=0):
"""
Tests whether the skew is different from the normal distribution.
This function tests the null hypothesis that the skewness of
the population that the sample was drawn from is the same
as that of a corresponding normal distribution.
Parameters
----------
a : array_like
The input array.
axis : int or None, optional
The axis along which to perform the skew test. Default is 0.
If `axis` is None, the array is first flattened.
Returns
-------
z-score : float
The computed z-score for this test.
p-value : float
a 2-sided p-value for the hypothesis test
Notes
-----
The sample size must be at least 8.
"""
a, axis = _chk_asarray(a, axis)
if axis is None:
a = a.ravel()
axis = 0
b2 = skew(a,axis)
n = a.count(axis)
if np.min(n) < 8:
warnings.warn(
"skewtest only valid for n>=8 ... continuing anyway, n=%i" %
np.min(n))
y = b2 * ma.sqrt(((n+1)*(n+3)) / (6.0*(n-2)) )
beta2 = ( 3.0*(n*n+27*n-70)*(n+1)*(n+3) ) / ( (n-2.0)*(n+5)*(n+7)*(n+9) )
W2 = -1 + ma.sqrt(2*(beta2-1))
delta = 1/ma.sqrt(0.5*ma.log(W2))
alpha = ma.sqrt(2.0/(W2-1))
y = ma.where(y==0, 1, y)
Z = delta*ma.log(y/alpha + ma.sqrt((y/alpha)**2+1))
return Z, (1.0 - stats.zprob(Z))*2
skewtest.__doc__ = stats.skewtest.__doc__
def kurtosistest(a, axis=0):
"""
Tests whether a dataset has normal kurtosis.
This function tests the null hypothesis that the kurtosis
of the population from which the sample was drawn is that
of the normal distribution: ``kurtosis = 3(n-1)/(n+1)``.
Parameters
----------
a : array_like
Array of the sample data.
axis : int or None, optional
The axis to operate along, or None to work on the whole array.
The default is 0 (the first axis).
Returns
-------
z-score : float
The computed z-score for this test.
p-value : float
The 2-sided p-value for the hypothesis test
Notes
-----
Valid only for n>20. The Z-score is set to 0 for bad entries.
"""
a, axis = _chk_asarray(a, axis)
n = a.count(axis=axis).astype(float)
if np.min(n) < 20:
warnings.warn(
"kurtosistest only valid for n>=20 ... continuing anyway, n=%i" %
np.min(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0*(n-1) /(n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1)*(n+3)*(n+5))
x = (b2-E)/ma.sqrt(varb2)
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5))/
(n*(n-2)*(n-3)))
A = 6.0 + 8.0/sqrtbeta1 *(2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2./(9.0*A)
denom = 1 + x*ma.sqrt(2/(A-4.0))
denom[denom < 0] = masked
term2 = ma.power((1-2.0/A)/denom,1/3.0)
Z = ( term1 - term2 ) / np.sqrt(2/(9.0*A))
return Z, (1.0-stats.zprob(Z))*2
kurtosistest.__doc__ = stats.kurtosistest.__doc__
def normaltest(a, axis=0):
a, axis = _chk_asarray(a, axis)
s,_ = skewtest(a,axis)
k,_ = kurtosistest(a,axis)
k2 = s*s + k*k
return k2, stats.chisqprob(k2,2)
normaltest.__doc__ = stats.normaltest.__doc__
# Martinez-Iglewicz test
# K-S test
#####--------------------------------------------------------------------------
#---- --- Percentiles ---
#####--------------------------------------------------------------------------
def mquantiles(a, prob=list([.25,.5,.75]), alphap=.4, betap=.4, axis=None,
limit=()):
"""
Computes empirical quantiles for a data array.
Samples quantile are defined by ``Q(p) = (1-g).x[i] +g.x[i+1]``,
where ``x[j]`` is the j-th order statistic, ``i = (floor(n*p+m))``,
``m=alpha+p*(1-alpha-beta)`` and ``g = n*p + m - i``.
Typical values of (alpha,beta) are:
- (0,1) : *p(k) = k/n* : linear interpolation of cdf (R, type 4)
- (.5,.5) : *p(k) = (k+1/2.)/n* : piecewise linear
function (R, type 5)
- (0,0) : *p(k) = k/(n+1)* : (R type 6)
- (1,1) : *p(k) = (k-1)/(n-1)*. In this case, p(k) = mode[F(x[k])].
That's R default (R type 7)
- (1/3,1/3): *p(k) = (k-1/3)/(n+1/3)*. Then p(k) ~ median[F(x[k])].
The resulting quantile estimates are approximately median-unbiased
regardless of the distribution of x. (R type 8)
- (3/8,3/8): *p(k) = (k-3/8)/(n+1/4)*. Blom.
The resulting quantile estimates are approximately unbiased
if x is normally distributed (R type 9)
- (.4,.4) : approximately quantile unbiased (Cunnane)
- (.35,.35): APL, used with PWM
Parameters
----------
a : array_like
Input data, as a sequence or array of dimension at most 2.
prob : array_like, optional
List of quantiles to compute.
alpha : float, optional
Plotting positions parameter, default is 0.4.
beta : float, optional
Plotting positions parameter, default is 0.4.
axis : int, optional
Axis along which to perform the trimming.
If None (default), the input array is first flattened.
limit : tuple
Tuple of (lower, upper) values.
Values of `a` outside this open interval are ignored.
Returns
-------
mquantiles : MaskedArray
An array containing the calculated quantiles.
Examples
--------
>>> from scipy.stats.mstats import mquantiles
>>> a = np.array([6., 47., 49., 15., 42., 41., 7., 39., 43., 40., 36.])
>>> mquantiles(a)
array([ 19.2, 40. , 42.8])
Using a 2D array, specifying axis and limit.
>>> data = np.array([[ 6., 7., 1.],
[ 47., 15., 2.],
[ 49., 36., 3.],
[ 15., 39., 4.],
[ 42., 40., -999.],
[ 41., 41., -999.],
[ 7., -999., -999.],
[ 39., -999., -999.],
[ 43., -999., -999.],
[ 40., -999., -999.],
[ 36., -999., -999.]])
>>> mquantiles(data, axis=0, limit=(0, 50))
array([[ 19.2 , 14.6 , 1.45],
[ 40. , 37.5 , 2.5 ],
[ 42.8 , 40.05, 3.55]])
>>> data[:, 2] = -999.
>>> mquantiles(data, axis=0, limit=(0, 50))
masked_array(data =
[[19.2 14.6 --]
[40.0 37.5 --]
[42.8 40.05 --]],
mask =
[[False False True]
[False False True]
[False False True]],
fill_value = 1e+20)
"""
def _quantiles1D(data,m,p):
x = np.sort(data.compressed())
n = len(x)
if n == 0:
return ma.array(np.empty(len(p), dtype=float), mask=True)
elif n == 1:
return ma.array(np.resize(x, p.shape), mask=nomask)
aleph = (n*p + m)
k = np.floor(aleph.clip(1, n-1)).astype(int)
gamma = (aleph-k).clip(0,1)
return (1.-gamma)*x[(k-1).tolist()] + gamma*x[k.tolist()]
# Initialization & checks ---------
data = ma.array(a, copy=False)
if data.ndim > 2:
raise TypeError("Array should be 2D at most !")
#
if limit:
condition = (limit[0]<data) & (data<limit[1])
data[~condition.filled(True)] = masked
#
p = np.array(prob, copy=False, ndmin=1)
m = alphap + p*(1.-alphap-betap)
# Computes quantiles along axis (or globally)
if (axis is None):
return _quantiles1D(data, m, p)
return ma.apply_along_axis(_quantiles1D, axis, data, m, p)
def scoreatpercentile(data, per, limit=(), alphap=.4, betap=.4):
"""Calculate the score at the given 'per' percentile of the
sequence a. For example, the score at per=50 is the median.
This function is a shortcut to mquantile
"""
if (per < 0) or (per > 100.):
raise ValueError("The percentile should be between 0. and 100. !"\
" (got %s)" % per)
return mquantiles(data, prob=[per/100.], alphap=alphap, betap=betap,
limit=limit, axis=0).squeeze()
def plotting_positions(data, alpha=0.4, beta=0.4):
"""
Returns plotting positions (or empirical percentile points) for the data.
Plotting positions are defined as ``(i-alpha)/(n+1-alpha-beta)``, where:
- i is the rank order statistics
- n is the number of unmasked values along the given axis
- alpha and beta are two parameters.
Typical values for alpha and beta are:
- (0,1) : ``p(k) = k/n``, linear interpolation of cdf (R, type 4)
- (.5,.5) : ``p(k) = (k-1/2.)/n``, piecewise linear function
(R, type 5)
- (0,0) : ``p(k) = k/(n+1)``, Weibull (R type 6)
- (1,1) : ``p(k) = (k-1)/(n-1)``, in this case,
``p(k) = mode[F(x[k])]``. That's R default (R type 7)
- (1/3,1/3): ``p(k) = (k-1/3)/(n+1/3)``, then
``p(k) ~ median[F(x[k])]``.
The resulting quantile estimates are approximately median-unbiased
regardless of the distribution of x. (R type 8)
- (3/8,3/8): ``p(k) = (k-3/8)/(n+1/4)``, Blom.
The resulting quantile estimates are approximately unbiased
if x is normally distributed (R type 9)
- (.4,.4) : approximately quantile unbiased (Cunnane)
- (.35,.35): APL, used with PWM
- (.3175, .3175): used in scipy.stats.probplot
Parameters
----------
data : array_like
Input data, as a sequence or array of dimension at most 2.
alpha : float, optional
Plotting positions parameter. Default is 0.4.
beta : float, optional
Plotting positions parameter. Default is 0.4.
Returns
-------
positions : MaskedArray
The calculated plotting positions.
"""
data = ma.array(data, copy=False).reshape(1,-1)
n = data.count()
plpos = np.empty(data.size, dtype=float)
plpos[n:] = 0
plpos[data.argsort()[:n]] = (np.arange(1, n+1) - alpha) / \
(n + 1.0 - alpha - beta)
return ma.array(plpos, mask=data._mask)
meppf = plotting_positions
#####--------------------------------------------------------------------------
#---- --- Variability ---
#####--------------------------------------------------------------------------
def obrientransform(*args):
"""
Computes a transform on input data (any number of columns). Used to
test for homogeneity of variance prior to running one-way stats. Each
array in *args is one level of a factor. If an F_oneway() run on the
transformed data and found significant, variances are unequal. From
Maxwell and Delaney, p.112.
Returns: transformed data for use in an ANOVA
"""
data = argstoarray(*args).T
v = data.var(axis=0,ddof=1)
m = data.mean(0)
n = data.count(0).astype(float)
# result = ((N-1.5)*N*(a-m)**2 - 0.5*v*(n-1))/((n-1)*(n-2))
data -= m
data **= 2
data *= (n-1.5)*n
data -= 0.5*v*(n-1)
data /= (n-1.)*(n-2.)
if not ma.allclose(v,data.mean(0)):
raise ValueError("Lack of convergence in obrientransform.")
return data
def signaltonoise(data, axis=0):
"""Calculates the signal-to-noise ratio, as the ratio of the mean over
standard deviation along the given axis.
Parameters
----------
data : sequence
Input data
axis : {0, int}, optional
Axis along which to compute. If None, the computation is performed
on a flat version of the array.
"""
data = ma.array(data, copy=False)
m = data.mean(axis)
sd = data.std(axis, ddof=0)
return m/sd
def sem(a, axis=0):
a, axis = _chk_asarray(a, axis)
n = a.count(axis=axis)
s = a.std(axis=axis,ddof=0) / ma.sqrt(n-1)
return s
sem.__doc__ = stats.sem.__doc__
zmap = stats.zmap
zscore = stats.zscore
#####--------------------------------------------------------------------------
#---- --- ANOVA ---
#####--------------------------------------------------------------------------
def f_oneway(*args):
"""
Performs a 1-way ANOVA, returning an F-value and probability given
any number of groups. From Heiman, pp.394-7.
Usage: f_oneway (*args) where *args is 2 or more arrays, one per
treatment group
Returns: f-value, probability
"""
# Construct a single array of arguments: each row is a group
data = argstoarray(*args)
ngroups = len(data)
ntot = data.count()
sstot = (data**2).sum() - (data.sum())**2/float(ntot)
ssbg = (data.count(-1) * (data.mean(-1)-data.mean())**2).sum()
sswg = sstot-ssbg
dfbg = ngroups-1
dfwg = ntot - ngroups
msb = ssbg/float(dfbg)
msw = sswg/float(dfwg)
f = msb/msw
prob = stats.fprob(dfbg,dfwg,f)
return f, prob
def f_value_wilks_lambda(ER, EF, dfnum, dfden, a, b):
"""Calculation of Wilks lambda F-statistic for multivarite data, per
Maxwell & Delaney p.657.
"""
ER = ma.array(ER, copy=False, ndmin=2)
EF = ma.array(EF, copy=False, ndmin=2)
if ma.getmask(ER).any() or ma.getmask(EF).any():
raise NotImplementedError("Not implemented when the inputs "\
"have missing data")
lmbda = np.linalg.det(EF) / np.linalg.det(ER)
q = ma.sqrt( ((a-1)**2*(b-1)**2 - 2) / ((a-1)**2 + (b-1)**2 -5) )
q = ma.filled(q, 1)
n_um = (1 - lmbda**(1.0/q))*(a-1)*(b-1)
d_en = lmbda**(1.0/q) / (n_um*q - 0.5*(a-1)*(b-1) + 1)
return n_um / d_en
def friedmanchisquare(*args):
"""Friedman Chi-Square is a non-parametric, one-way within-subjects ANOVA.
This function calculates the Friedman Chi-square test for repeated measures
and returns the result, along with the associated probability value.
Each input is considered a given group. Ideally, the number of treatments
among each group should be equal. If this is not the case, only the first
n treatments are taken into account, where n is the number of treatments
of the smallest group.
If a group has some missing values, the corresponding treatments are masked
in the other groups.
The test statistic is corrected for ties.
Masked values in one group are propagated to the other groups.
Returns: chi-square statistic, associated p-value
"""
data = argstoarray(*args).astype(float)
k = len(data)
if k < 3:
raise ValueError("Less than 3 groups (%i): " % k +\
"the Friedman test is NOT appropriate.")
ranked = ma.masked_values(rankdata(data, axis=0), 0)
if ranked._mask is not nomask:
ranked = ma.mask_cols(ranked)
ranked = ranked.compressed().reshape(k,-1).view(ndarray)
else:
ranked = ranked._data
(k,n) = ranked.shape
# Ties correction
repeats = np.array([find_repeats(_) for _ in ranked.T], dtype=object)
ties = repeats[repeats.nonzero()].reshape(-1,2)[:,-1].astype(int)
tie_correction = 1 - (ties**3-ties).sum()/float(n*(k**3-k))
#
ssbg = np.sum((ranked.sum(-1) - n*(k+1)/2.)**2)
chisq = ssbg * 12./(n*k*(k+1)) * 1./tie_correction
return chisq, stats.chisqprob(chisq,k-1)
#-############################################################################-#
| 33.684604
| 112
| 0.561453
|
49a5e3e2d7a0a96cd0f7e83f6b9f527d074d4280
| 2,832
|
py
|
Python
|
starlette/middleware/base.py
|
synodriver/starlette
|
9ef1b91c9c043197da6c3f38aa153fd874b95527
|
[
"BSD-3-Clause"
] | null | null | null |
starlette/middleware/base.py
|
synodriver/starlette
|
9ef1b91c9c043197da6c3f38aa153fd874b95527
|
[
"BSD-3-Clause"
] | 17
|
2022-01-01T21:22:33.000Z
|
2022-03-07T10:33:37.000Z
|
starlette/middleware/base.py
|
sthagen/encode-starlette
|
ba31df75daac7cc3b5fcfea89939372768a166cb
|
[
"BSD-3-Clause"
] | null | null | null |
import typing
import anyio
from starlette.requests import Request
from starlette.responses import Response, StreamingResponse
from starlette.types import ASGIApp, Receive, Scope, Send
RequestResponseEndpoint = typing.Callable[[Request], typing.Awaitable[Response]]
DispatchFunction = typing.Callable[
[Request, RequestResponseEndpoint], typing.Awaitable[Response]
]
class BaseHTTPMiddleware:
def __init__(
self, app: ASGIApp, dispatch: typing.Optional[DispatchFunction] = None
) -> None:
self.app = app
self.dispatch_func = self.dispatch if dispatch is None else dispatch
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
if scope["type"] != "http":
await self.app(scope, receive, send)
return
async def call_next(request: Request) -> Response:
app_exc: typing.Optional[Exception] = None
send_stream, recv_stream = anyio.create_memory_object_stream()
async def coro() -> None:
nonlocal app_exc
async with send_stream:
try:
await self.app(scope, request.receive, send_stream.send)
except Exception as exc:
app_exc = exc
task_group.start_soon(coro)
try:
message = await recv_stream.receive()
except anyio.EndOfStream:
if app_exc is not None:
raise app_exc
raise RuntimeError("No response returned.")
assert message["type"] == "http.response.start"
async def body_stream() -> typing.AsyncGenerator[bytes, None]:
async with recv_stream:
async for message in recv_stream:
assert message["type"] == "http.response.body"
body = message.get("body", b"")
if body:
yield body
if not message.get("more_body", False):
break
if app_exc is not None:
raise app_exc
response = StreamingResponse(
status_code=message["status"], content=body_stream()
)
response.raw_headers = message["headers"]
return response
async with anyio.create_task_group() as task_group:
request = Request(scope, receive=receive)
response = await self.dispatch_func(request, call_next)
await response(scope, receive, send)
task_group.cancel_scope.cancel()
async def dispatch(
self, request: Request, call_next: RequestResponseEndpoint
) -> Response:
raise NotImplementedError() # pragma: no cover
| 35.4
| 81
| 0.578743
|
6b790cc5011039ba1747da8d5a6837915483b716
| 13,551
|
py
|
Python
|
fairseq/optim/fp16_optimizer.py
|
Yuran-Zhao/bert-nmt
|
e5095860e59b305bdcf1264ad117e67d3d97e705
|
[
"BSD-3-Clause"
] | null | null | null |
fairseq/optim/fp16_optimizer.py
|
Yuran-Zhao/bert-nmt
|
e5095860e59b305bdcf1264ad117e67d3d97e705
|
[
"BSD-3-Clause"
] | null | null | null |
fairseq/optim/fp16_optimizer.py
|
Yuran-Zhao/bert-nmt
|
e5095860e59b305bdcf1264ad117e67d3d97e705
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
from fairseq import optim, utils
class DynamicLossScaler(object):
def __init__(
self,
init_scale=2.**15,
scale_factor=2.,
scale_window=2000,
tolerance=0.05,
threshold=None,
):
self.loss_scale = init_scale
self.scale_factor = scale_factor
self.scale_window = scale_window
self.tolerance = tolerance
self.threshold = threshold
self._iter = 0
self._last_overflow_iter = -1
self._last_rescale_iter = -1
self._overflows_since_rescale = 0
def update_scale(self, overflow):
iter_since_rescale = self._iter - self._last_rescale_iter
if overflow:
self._last_overflow_iter = self._iter
self._overflows_since_rescale += 1
pct_overflow = self._overflows_since_rescale / float(
iter_since_rescale)
if pct_overflow >= self.tolerance:
self._decrease_loss_scale()
self._last_rescale_iter = self._iter
self._overflows_since_rescale = 0
elif (self._iter - self._last_overflow_iter) % self.scale_window == 0:
self.loss_scale *= self.scale_factor
self._last_rescale_iter = self._iter
self._iter += 1
def _decrease_loss_scale(self):
self.loss_scale /= self.scale_factor
if self.threshold is not None:
self.loss_scale = max(self.loss_scale, self.threshold)
@staticmethod
def has_overflow(grad_norm):
# detect inf and nan
if grad_norm == float('inf') or grad_norm != grad_norm:
return True
return False
class FP16Optimizer(optim.FairseqOptimizer):
"""
Wrap an *optimizer* to support FP16 (mixed precision) training.
"""
def __init__(self, args, params, fp32_optimizer, fp32_params):
super().__init__(args, params)
self.fp32_optimizer = fp32_optimizer
self.fp32_params = fp32_params
if getattr(args, 'fp16_scale_window', None) is None:
if len(args.update_freq) > 1:
raise ValueError(
'--fp16-scale-window must be given explicitly when using a '
'custom --update-freq schedule')
scale_window = 2**14 / args.distributed_world_size / args.update_freq[
0]
else:
scale_window = args.fp16_scale_window
self.scaler = DynamicLossScaler(
init_scale=args.fp16_init_scale,
scale_window=scale_window,
tolerance=args.fp16_scale_tolerance,
threshold=args.threshold_loss_scale,
)
@classmethod
def build_optimizer(cls, args, params):
"""
Args:
args (argparse.Namespace): fairseq args
params (iterable): iterable of parameters to optimize
"""
# create FP32 copy of parameters and grads
total_param_size = sum(p.data.numel() for p in params)
fp32_params = params[0].new(0).float().new(total_param_size)
offset = 0
for p in params:
numel = p.data.numel()
fp32_params[offset:offset + numel].copy_(p.data.view(-1))
offset += numel
fp32_params = torch.nn.Parameter(fp32_params)
fp32_params.grad = fp32_params.data.new(total_param_size)
fp32_optimizer = optim.build_optimizer(args, [fp32_params])
return cls(args, params, fp32_optimizer, fp32_params)
@property
def optimizer(self):
return self.fp32_optimizer.optimizer
@property
def optimizer_config(self):
return self.fp32_optimizer.optimizer_config
def get_lr(self):
return self.fp32_optimizer.get_lr()
def set_lr(self, lr):
self.fp32_optimizer.set_lr(lr)
def state_dict(self):
"""Return the optimizer's state dict."""
state_dict = self.fp32_optimizer.state_dict()
state_dict['loss_scale'] = self.scaler.loss_scale
return state_dict
def load_state_dict(self, state_dict, optimizer_overrides=None):
"""Load an optimizer state dict.
In general we should prefer the configuration of the existing optimizer
instance (e.g., learning rate) over that found in the state_dict. This
allows us to resume training from a checkpoint using a new set of
optimizer args.
"""
if 'loss_scale' in state_dict:
self.scaler.loss_scale = state_dict['loss_scale']
self.fp32_optimizer.load_state_dict(state_dict, optimizer_overrides)
def backward(self, loss):
"""Computes the sum of gradients of the given tensor w.r.t. graph leaves.
Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this
function additionally dynamically scales the loss to avoid gradient
underflow.
"""
loss = loss * self.scaler.loss_scale
loss.backward()
self._needs_sync = True
def _sync_fp16_grads_to_fp32(self, multiply_grads=1.):
if self._needs_sync:
# copy FP16 grads to FP32
offset = 0
for p in self.params:
if not p.requires_grad:
continue
grad_data = p.grad.data if p.grad is not None else p.data.new_zeros(
p.data.shape)
numel = grad_data.numel()
self.fp32_params.grad.data[offset:offset + numel].copy_(
grad_data.view(-1))
offset += numel
# correct for dynamic loss scaler
self.fp32_params.grad.data.mul_(multiply_grads /
self.scaler.loss_scale)
self._needs_sync = False
def multiply_grads(self, c):
"""Multiplies grads by a constant ``c``."""
if self._needs_sync:
self._sync_fp16_grads_to_fp32(c)
else:
self.fp32_params.grad.data.mul_(c)
def clip_grad_norm(self, max_norm):
"""Clips gradient norm and updates dynamic loss scaler."""
self._sync_fp16_grads_to_fp32()
grad_norm = utils.clip_grad_norm_(self.fp32_params.grad.data, max_norm)
# detect overflow and adjust loss scale
overflow = DynamicLossScaler.has_overflow(grad_norm)
self.scaler.update_scale(overflow)
if overflow:
if self.scaler.loss_scale <= self.args.min_loss_scale:
# Use FloatingPointError as an uncommon error that parent
# functions can safely catch to stop training.
raise FloatingPointError((
'Minimum loss scale reached ({}). Your loss is probably exploding. '
'Try lowering the learning rate, using gradient clipping or '
'increasing the batch size.').format(
self.args.min_loss_scale))
raise OverflowError('setting loss scale to: ' +
str(self.scaler.loss_scale))
return grad_norm
def step(self, closure=None):
"""Performs a single optimization step."""
self._sync_fp16_grads_to_fp32()
self.fp32_optimizer.step(closure)
# copy FP32 params back into FP16 model
offset = 0
for p in self.params:
if not p.requires_grad:
continue
numel = p.data.numel()
p.data.copy_(self.fp32_params.data[offset:offset + numel].view_as(
p.data))
offset += numel
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
for p in self.params:
p.grad = None
self._needs_sync = False
class MemoryEfficientFP16Optimizer(optim.FairseqOptimizer):
"""
Wrap an *optimizer* to support FP16 (mixed precision) training.
Compared to :class:`fairseq.optim.FP16Optimizer`, this version does not
maintain an FP32 copy of the model. We instead expect the optimizer to
convert the gradients to FP32 internally and sync the results back to the
FP16 model params. This significantly reduces memory usage but slightly
increases the time spent in the optimizer.
Since this wrapper depends on specific functionality in the wrapped
optimizer (i.e., on-the-fly conversion of grads to FP32), only certain
optimizers can be wrapped. This is determined by the
*supports_memory_efficient_fp16* property.
"""
def __init__(self, args, params, optimizer):
if not optimizer.supports_memory_efficient_fp16:
raise ValueError('Unsupported optimizer: {}'.format(
optimizer.__class__.__name__))
super().__init__(args, params)
self.wrapped_optimizer = optimizer
if getattr(args, 'fp16_scale_window', None) is None:
if len(args.update_freq) > 1:
raise ValueError(
'--fp16-scale-window must be given explicitly when using a '
'custom --update-freq schedule')
scale_window = 2**14 / args.distributed_world_size / args.update_freq[
0]
else:
scale_window = args.fp16_scale_window
self.scaler = DynamicLossScaler(
init_scale=args.fp16_init_scale,
scale_window=scale_window,
tolerance=args.fp16_scale_tolerance,
threshold=args.threshold_loss_scale,
)
@classmethod
def build_optimizer(cls, args, params):
"""
Args:
args (argparse.Namespace): fairseq args
params (iterable): iterable of parameters to optimize
"""
fp16_optimizer = optim.build_optimizer(args, params)
return cls(args, params, fp16_optimizer)
@property
def optimizer(self):
return self.wrapped_optimizer.optimizer
@property
def optimizer_config(self):
return self.wrapped_optimizer.optimizer_config
def get_lr(self):
return self.wrapped_optimizer.get_lr()
def set_lr(self, lr):
self.wrapped_optimizer.set_lr(lr)
def state_dict(self):
"""Return the optimizer's state dict."""
state_dict = self.wrapped_optimizer.state_dict()
state_dict['loss_scale'] = self.scaler.loss_scale
return state_dict
def load_state_dict(self, state_dict, optimizer_overrides=None):
"""Load an optimizer state dict.
In general we should prefer the configuration of the existing optimizer
instance (e.g., learning rate) over that found in the state_dict. This
allows us to resume training from a checkpoint using a new set of
optimizer args.
"""
if 'loss_scale' in state_dict:
self.scaler.loss_scale = state_dict['loss_scale']
self.wrapped_optimizer.load_state_dict(state_dict, optimizer_overrides)
def backward(self, loss):
"""Computes the sum of gradients of the given tensor w.r.t. graph leaves.
Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this
function additionally dynamically scales the loss to avoid gradient
underflow.
"""
loss = loss * self.scaler.loss_scale
loss.backward()
self._grads_are_scaled = True
def _unscale_grads(self, multiply_grads=1.):
if self._grads_are_scaled:
self._grads_are_scaled = False
# correct for dynamic loss scaler
self.wrapped_optimizer.multiply_grads(multiply_grads /
self.scaler.loss_scale)
else:
assert multiply_grads == 1.
def multiply_grads(self, c):
"""Multiplies grads by a constant *c*."""
if self._grads_are_scaled:
self._unscale_grads(c)
else:
self.wrapped_optimizer.multiply_grads(c)
def clip_grad_norm(self, max_norm):
"""Clips gradient norm and updates dynamic loss scaler."""
self._unscale_grads()
grad_norm = self.wrapped_optimizer.clip_grad_norm(max_norm)
# detect overflow and adjust loss scale
overflow = DynamicLossScaler.has_overflow(grad_norm)
self.scaler.update_scale(overflow)
if overflow:
if self.scaler.loss_scale <= self.args.min_loss_scale:
# Use FloatingPointError as an uncommon error that parent
# functions can safely catch to stop training.
raise FloatingPointError((
'Minimum loss scale reached ({}). Your loss is probably exploding. '
'Try lowering the learning rate, using gradient clipping or '
'increasing the batch size.').format(
self.args.min_loss_scale))
raise OverflowError('setting loss scale to: ' +
str(self.scaler.loss_scale))
return grad_norm
def step(self, closure=None):
"""Performs a single optimization step."""
self._unscale_grads()
self.wrapped_optimizer.step(closure)
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
self.wrapped_optimizer.zero_grad()
self._grads_are_scaled = False
| 37.433702
| 88
| 0.625268
|
8364b8cd9cf9b59b7a88c9ba3187704d94f0460f
| 5,931
|
py
|
Python
|
simple_sqlite/test/sqliteTest.py
|
Svinokur/simple_sqlite
|
190d6dca5853c637208a7cd9dfe14e5bd0e2a4f1
|
[
"MIT"
] | 2
|
2021-04-27T15:04:07.000Z
|
2021-10-19T07:59:22.000Z
|
simple_sqlite/test/sqliteTest.py
|
Svinokur/simple_sqlite
|
190d6dca5853c637208a7cd9dfe14e5bd0e2a4f1
|
[
"MIT"
] | null | null | null |
simple_sqlite/test/sqliteTest.py
|
Svinokur/simple_sqlite
|
190d6dca5853c637208a7cd9dfe14e5bd0e2a4f1
|
[
"MIT"
] | null | null | null |
#Standart library imports
import unittest
import sys
import os.path
import time
import os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
#Local imports
from _setting import setting
from simple_sqlite import SimpleSqlite
class testSqlite(unittest.TestCase):
"""Class for unit-testing Sqllite class
Attributes:
sqllite (dict[str]) : Initialize class of Sqllite
startTime (float) : Time of starting unit-tests
"""
@classmethod
def setUpClass(cls):
cls.setting = setting
dbFile : str = setting['Sqllite']['TestDatabase']
cls.sqllite = SimpleSqlite(dbfile = dbFile)
@classmethod
def tearDownClass(cls):
del cls.sqllite
def setUp(self):
self.start_time : float = time.time()
#@unittest.skip('Temporary not needed')
def tearDown(self):
end_time = time.time() - self.start_time
print("%.3f" % end_time)
#@unittest.skip('Temporary not needed')
def test01_create_database(self):
self.sqllite.create_database()
#@unittest.skip('Temporary not needed')
def test02_open_database(self):
self.sqllite.connect_to_db()
#@unittest.skip('Temporary not needed')
def test03_create_table_test(self):
self.sqllite.connect_to_db()
tablename = self.setting["Sqllite"]["TableTest"]
strSql = self.setting["Sqllite"]["SqlTableTest"].format(tablename)
tableName, namesFields, typeFields = self.sqllite.create_table(tablename=tablename, strSql=strSql)
self.assertNotEqual(tableName, '')
self.assertNotEqual(namesFields, '')
self.assertNotEqual(typeFields, '')
self.assertEqual(tableName, self.setting["Sqllite"]["TableTest"])
self.assertEqual(len(namesFields), 3)
self.assertEqual(namesFields[0], "lesson_number")
self.assertEqual(namesFields[1], "lesson_name")
self.assertEqual(namesFields[2], "lesson_info")
self.assertEqual(len(typeFields), 3)
self.assertEqual(typeFields[0], "text")
self.assertEqual(typeFields[1], "text")
self.assertEqual(typeFields[2], "text")
#@unittest.skip('Temporary not needed')
def test04_insert_table_test(self):
self.sqllite.connect_to_db()
tablename = self.setting["Sqllite"]["TableTest"]
data = [{ 'lesson_number' : '1',
'lesson_name' : 'English Lesson',
'lesson_info' : 'There will be no lesson',
}]
self.sqllite.insert_table(tablename, data)
#@unittest.skip('Temporary not needed')
def test05_select_table_test(self):
self.sqllite.connect_to_db()
tablename = self.setting["Sqllite"]["TableTest"]
datas = self.sqllite.select_table(tablename)
self.assertGreater(len(datas), 0, len(datas))
for data in datas:
self.assertEqual(data[0], '1')
self.assertEqual(data[1], 'English Lesson')
self.assertEqual(data[2], 'There will be no lesson')
#@unittest.skip('Temporary not needed')
def test06_check_sql_script_table_test(self):
self.sqllite.connect_to_db()
sql_script = self.setting["Sqllite"]["SqlScriptSelectFirstLesson"]
rows = self.sqllite.select_sql_script(sql_script=sql_script)
self.assertGreater(len(rows), 0, len(rows))
for data in rows:
self.assertEqual(data[0], '1')
self.assertEqual(data[1], 'English Lesson')
self.assertEqual(data[2], 'There will be no lesson')
#@unittest.skip('Temporary not needed')
def test07_check_sql_script_execution_table_test(self):
self.sqllite.connect_to_db()
sql_script = self.setting["Sqllite"]["SqlScriptChangeLessonName"]
self.sqllite.execute_sql_script(sql_script=sql_script)
tablename = self.setting["Sqllite"]["TableTest"]
datas = self.sqllite.select_table(tablename)
self.assertGreater(len(datas), 0, len(datas))
for data in datas:
self.assertEqual(data[0], '1')
self.assertEqual(data[1], 'Spanish lesson')
self.assertEqual(data[2], 'There will be no lesson')
#@unittest.skip('Temporary not needed')
def test08_create_view_test(self):
self.sqllite.connect_to_db()
viewname = self.setting["Sqllite"]["ViewTest"]
str_sql = self.setting["Sqllite"]["SqlViewTest"].format(viewname)
tableName, namesFields, typeFields = self.sqllite.create_view(viewname, str_sql)
self.assertEqual(tableName, viewname)
self.assertEqual(len(namesFields), 3)
self.assertEqual(namesFields[0], "lesson_number")
self.assertEqual(namesFields[1], "lesson_name")
self.assertEqual(namesFields[2], "lesson_info")
self.assertEqual(len(typeFields), 3)
self.assertEqual(typeFields[0], "text")
self.assertEqual(typeFields[1], "text")
self.assertEqual(typeFields[2], "text")
#@unittest.skip
def test10_open_view_test(self):
self.sqllite.connect_to_db()
viewname = self.setting["Sqllite"]["ViewTest"]
rows = self.sqllite.open_view(viewname=viewname)
for data in rows:
self.assertEqual(data[0], '1')
self.assertEqual(data[1], 'Spanish lesson')
self.assertEqual(data[2], 'There will be no lesson')
#@unittest.skip('Temporary not needed')
def test11_replace_database(self):
full_name_database_src = self.setting["Sqllite"]["TestDatabase"]
full_name_database_dst = self.setting["Sqllite"]["TestDatabaseUpd"]
self.sqllite.replace_database(full_name_database_src=full_name_database_src, full_name_database_dst=full_name_database_dst)
if __name__ == '__main__':
unittest.main(verbosity=2, failfast=True, exit=False)
| 35.728916
| 131
| 0.653515
|
4fa533e6d750a1f23d6401d475fa90599de01b29
| 2,359
|
py
|
Python
|
tests/__init__.py
|
JustinHedge/hydra
|
108af1aec8ef86d752f46ec4c5542ee64fd80412
|
[
"MIT"
] | 1
|
2020-09-25T07:12:14.000Z
|
2020-09-25T07:12:14.000Z
|
tests/__init__.py
|
JustinHedge/hydra
|
108af1aec8ef86d752f46ec4c5542ee64fd80412
|
[
"MIT"
] | 6
|
2021-06-28T20:39:10.000Z
|
2022-02-27T10:27:28.000Z
|
tests/__init__.py
|
JustinHedge/hydra
|
108af1aec8ef86d752f46ec4c5542ee64fd80412
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from dataclasses import dataclass
from typing import Any, List, Tuple
from omegaconf import MISSING
from hydra.types import TargetConf
def verify_hydra_pytest_plugin_not_installed() -> None:
try:
# This check can be removed in 1.1
import hydra_pytest_plugin # type: ignore
except ImportError:
hydra_pytest_plugin = None
if hydra_pytest_plugin is not None:
raise ValueError("hydra-pytest-plugin is no longer required, please uninstall")
verify_hydra_pytest_plugin_not_installed()
def module_function(x: int) -> int:
return x
@dataclass
class AClass:
a: Any
b: Any
c: Any
d: Any = "default_value"
@staticmethod
def static_method(z: int) -> int:
return z
@dataclass
class UntypedPassthroughConf:
_target_: str = "tests.UntypedPassthroughClass"
a: Any = MISSING
@dataclass
class UntypedPassthroughClass:
a: Any
# Type not legal in a config
class IllegalType:
def __eq__(self, other: Any) -> Any:
return isinstance(other, IllegalType)
@dataclass
class AnotherClass:
x: int
class ASubclass(AnotherClass):
@classmethod
def class_method(cls, y: int) -> Any:
return cls(y + 1)
@staticmethod
def static_method(z: int) -> int:
return z
class Parameters:
def __init__(self, params: List[float]):
self.params = params
@dataclass
class Adam:
params: Parameters
lr: float = 0.001
betas: Tuple[float, ...] = (0.9, 0.999)
eps: float = 1e-08
weight_decay: int = 0
amsgrad: bool = False
@dataclass
class NestingClass:
a: ASubclass = ASubclass(10)
nesting = NestingClass()
class ClassWithMissingModule:
def __init__(self) -> None:
import some_missing_module # type: ignore # noqa: F401
self.x = 1
@dataclass
class AdamConf(TargetConf):
_target_: str = "tests.Adam"
lr: float = 0.001
betas: Tuple[float, ...] = (0.9, 0.999)
eps: float = 1e-08
weight_decay: int = 0
amsgrad: bool = False
@dataclass
class BadAdamConf(TargetConf):
# Missing str annotation
_target_ = "tests.Adam"
@dataclass
class User:
name: str = MISSING
age: int = MISSING
@dataclass
class UserGroup:
name: str = MISSING
users: List[User] = MISSING
| 18.286822
| 87
| 0.667656
|
5dd107f67fbceeefbbdaef86a75a7fa69b6c5a57
| 818
|
py
|
Python
|
spinoffs/oryx/oryx/util/__init__.py
|
brianwa84/probability
|
6f8e78d859ac41170be5147c8c7bde54cc5aa83e
|
[
"Apache-2.0"
] | 3,670
|
2018-02-14T03:29:40.000Z
|
2022-03-30T01:19:52.000Z
|
spinoffs/oryx/oryx/util/__init__.py
|
brianwa84/probability
|
6f8e78d859ac41170be5147c8c7bde54cc5aa83e
|
[
"Apache-2.0"
] | 1,395
|
2018-02-24T02:28:49.000Z
|
2022-03-31T16:12:06.000Z
|
spinoffs/oryx/oryx/util/__init__.py
|
brianwa84/probability
|
6f8e78d859ac41170be5147c8c7bde54cc5aa83e
|
[
"Apache-2.0"
] | 1,135
|
2018-02-14T01:51:10.000Z
|
2022-03-28T02:24:11.000Z
|
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Lint as: python3
"""Contains Oryx utility functions."""
from oryx.util.summary import get_summaries
from oryx.util.summary import summary
| 43.052632
| 78
| 0.698044
|
24dfe42281ee7b25af0e63d3684c4901f0371181
| 878
|
py
|
Python
|
Task2C.py
|
cued-ia-computing/flood-jw2190-lk476
|
e58ac7a99e9c98fec610c568c4e36969f1fc3a39
|
[
"MIT"
] | null | null | null |
Task2C.py
|
cued-ia-computing/flood-jw2190-lk476
|
e58ac7a99e9c98fec610c568c4e36969f1fc3a39
|
[
"MIT"
] | null | null | null |
Task2C.py
|
cued-ia-computing/flood-jw2190-lk476
|
e58ac7a99e9c98fec610c568c4e36969f1fc3a39
|
[
"MIT"
] | null | null | null |
from floodsystem.flood import stations_highest_rel_level
from floodsystem.stationdata import build_station_list, update_water_levels
from floodsystem.station import consistent_typical_range_stations, remove_latest_level_nonetype
def run():
"""Requirements for Task2C"""
#build list of stations
stations = build_station_list()
#update water levels
update_water_levels(stations)
#clean station data
stations = consistent_typical_range_stations(stations)
stations = remove_latest_level_nonetype(stations)
#get the 10 stations with the highest relative water level
highest_rel_stations = stations_highest_rel_level(stations, N=10)
for station in highest_rel_stations:
print(f'{station.name} {station.relative_water_level()}')
if __name__ == "__main__":
print("*** Task 2C: CUED Part 1A Flood Warning System ***")
run()
| 38.173913
| 95
| 0.769932
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.