hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
18ad36444d5128007b08506ac3f31875adc10b4d
| 127
|
py
|
Python
|
books/SystemProgramming/ch4_advanced/echo_command.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
books/SystemProgramming/ch4_advanced/echo_command.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
books/SystemProgramming/ch4_advanced/echo_command.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
from subprocess import Popen, PIPE
cmd = "echo hello world"
p = Popen(cmd, shell=True, stdout=PIPE)
ret, err = p.communicate()
| 25.4
| 39
| 0.724409
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 18
| 0.141732
|
18ad5089ae1f33994da7db7c1701301bde09c817
| 2,238
|
py
|
Python
|
NiaPy/algorithms/basic/bbfwa.py
|
Flyzoor/NiaPy
|
fec1faee0f215cc3a6c2c967ec77dcbe2cbffa42
|
[
"MIT"
] | null | null | null |
NiaPy/algorithms/basic/bbfwa.py
|
Flyzoor/NiaPy
|
fec1faee0f215cc3a6c2c967ec77dcbe2cbffa42
|
[
"MIT"
] | null | null | null |
NiaPy/algorithms/basic/bbfwa.py
|
Flyzoor/NiaPy
|
fec1faee0f215cc3a6c2c967ec77dcbe2cbffa42
|
[
"MIT"
] | null | null | null |
# encoding=utf8
# pylint: disable=mixed-indentation, trailing-whitespace, multiple-statements, attribute-defined-outside-init, logging-not-lazy
import logging
from numpy import apply_along_axis, argmin
from NiaPy.algorithms.algorithm import Algorithm
logging.basicConfig()
logger = logging.getLogger('NiaPy.algorithms.basic')
logger.setLevel('INFO')
__all__ = ['BareBonesFireworksAlgorithm']
class BareBonesFireworksAlgorithm(Algorithm):
r"""Implementation of bare bone fireworks algorithm.
**Algorithm:** Bare Bones Fireworks Algorithm
**Date:** 2018
**Authors:** Klemen Berkovič
**License:** MIT
**Reference URL:**
https://www.sciencedirect.com/science/article/pii/S1568494617306609
**Reference paper:**
Junzhi Li, Ying Tan, The bare bones fireworks algorithm: A minimalist global optimizer, Applied Soft Computing, Volume 62, 2018, Pages 454-462, ISSN 1568-4946, https://doi.org/10.1016/j.asoc.2017.10.046.
"""
def __init__(self, **kwargs):
r"""Initialize Bare Bones Fireworks algorithm class.
**See**:
Algorithm.__init__(self, **kwargs)
"""
super(BareBonesFireworksAlgorithm, self).__init__(name='BareBonesFireworksAlgorithm', sName='BBFA', **kwargs)
def setParameters(self, **kwargs):
r"""Set the algorithm parameters/arguments.
**See**:
BareBonesFireworksAlgorithm.__setparams(self, n=10, c_a=1.5, c_r=0.5, **ukwargs)
"""
self.__setParams(**kwargs)
def __setParams(self, n=10, C_a=1.5, C_r=0.5, **ukwargs):
r"""Set the arguments of an algorithm.
**Arguments**:
n {integer} -- number of sparks $\in [1, \infty)$
C_a {real} -- amplification coefficient $\in [1, \infty)$
C_r {real} -- reduction coefficient $\in (0, 1)$
"""
self.n, self.C_a, self.C_r = n, C_a, C_r
if ukwargs: logger.info('Unused arguments: %s' % (ukwargs))
def runTask(self, task):
x, A = self.rand.uniform(task.Lower, task.Upper, task.D), task.bRange
x_fit = task.eval(x)
while not task.stopCond():
S = self.rand.uniform(x - A, x + A, [self.n, task.D])
S_fit = apply_along_axis(task.eval, 1, S)
iS = argmin(S_fit)
if S_fit[iS] < x_fit: x, x_fit, A = S[iS], S_fit[iS], self.C_a * A
else: A = self.C_r * A
return x, x_fit
# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
| 32.434783
| 204
| 0.707328
| 1,786
| 0.797678
| 0
| 0
| 0
| 0
| 0
| 0
| 1,280
| 0.571684
|
18ae5fde1fdfdd5b09f5207f83e23ef0e8f54a07
| 854
|
py
|
Python
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/ripng_template.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/ripng_template.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/ripng_template.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class RIPng(Base):
__slots__ = ()
_SDM_NAME = 'ripng'
_SDM_ATT_MAP = {
'RIPng Header': 'ripng.header.ripngHeader',
'Route Table entries': 'ripng.header.routeTableEntries',
}
def __init__(self, parent):
super(RIPng, self).__init__(parent)
@property
def RIPng_Header(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RIPng Header']))
@property
def Route_Table_entries(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Route Table entries']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
| 30.5
| 94
| 0.701405
| 771
| 0.90281
| 0
| 0
| 382
| 0.447307
| 0
| 0
| 135
| 0.15808
|
18af0a7d2a7ce2d43b7672a9c24d93c96068fd61
| 1,083
|
py
|
Python
|
backend/feedback/migrations/0001_initial.py
|
kylecarter/ict4510-advwebdvlp
|
0360b2353535611a6b3dd79cefe2d5780d027511
|
[
"Apache-2.0"
] | null | null | null |
backend/feedback/migrations/0001_initial.py
|
kylecarter/ict4510-advwebdvlp
|
0360b2353535611a6b3dd79cefe2d5780d027511
|
[
"Apache-2.0"
] | null | null | null |
backend/feedback/migrations/0001_initial.py
|
kylecarter/ict4510-advwebdvlp
|
0360b2353535611a6b3dd79cefe2d5780d027511
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.1.3 on 2018-11-18 02:34
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Conversation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True)),
('modified_date', models.DateTimeField(auto_now=True)),
('contact', models.CharField(help_text='Name of the contact', max_length=255, verbose_name='Full Name')),
('email', models.EmailField(help_text='Contact email.', max_length=255, verbose_name='Email')),
('message', models.TextField(help_text='Message provided by the contact.', verbose_name='Message')),
('resolution', models.TextField(blank=True, help_text='Resolution if any for the conversation.', null=True, verbose_name='Resolution')),
],
),
]
| 40.111111
| 152
| 0.626962
| 990
| 0.914127
| 0
| 0
| 0
| 0
| 0
| 0
| 286
| 0.264081
|
18b132a361a1a147d36815958a1a5e8956b159fc
| 6,050
|
py
|
Python
|
ktaned/bomb.py
|
MartinHarding/ktaned
|
b38fb91b4e2d370d20310e472863766007d4adb3
|
[
"MIT"
] | 1
|
2017-12-02T21:21:37.000Z
|
2017-12-02T21:21:37.000Z
|
ktaned/bomb.py
|
MartinHarding/ktaned
|
b38fb91b4e2d370d20310e472863766007d4adb3
|
[
"MIT"
] | 22
|
2017-12-02T05:15:32.000Z
|
2018-07-24T02:04:56.000Z
|
ktaned/bomb.py
|
MartinHarding/ktaned
|
b38fb91b4e2d370d20310e472863766007d4adb3
|
[
"MIT"
] | 2
|
2017-12-01T23:49:17.000Z
|
2017-12-27T17:05:03.000Z
|
import random
class Bomb(object):
"""Represents the Bomb context that modules should compute against"""
def __init__(self):
super(Bomb, self).__init__()
self.valid_battery_types = ['AA', 'D']
self.valid_ports = ['DVI-D', 'Parallel', 'PS/2',
'RJ-45', 'Serial', 'Stereo RCA']
self.valid_indicator_labels = ['SND', 'CLR', 'CAR',
'IND', 'FRQ', 'SIG',
'NSA', 'MSA', 'TRN',
'BOB', 'FRK']
self.reset() # Sets up defaults for bomb
def add_battery_pack(self, battery_type, quantity):
"""Add battery pack to bomb (required for certain modules)
Args:
battery_type (string): type of battery in the pack
quantity (int): number batteries in the pack
"""
if battery_type not in self.valid_battery_types:
raise Exception('Battery type ({}) must be one of {}'
.format(battery_type, self.valid_battery_types))
if quantity < 1:
raise Exception('Battery packs must have at least one battery')
self.battery_packs.append({'type': battery_type, 'quantity': quantity})
def set_battery_packs(self, battery_packs):
"""Set battery packs on the bomb (replaces existing battery packs)
Args:
battery_packs (list): list of dicts representing battery packs
"""
self.battery_packs = []
for battery_pack in battery_packs:
self.add_battery_pack(battery_pack['type'],
battery_pack['quantity'])
self.batteries = self.get_battery_count()
def get_battery_count(self):
"""Set battery packs on the bomb (replaces existing battery packs)
Returns:
battery_count (int): sum total of batteries accross all types
"""
return sum([d['quantity'] for d in self.battery_packs])
def add_port(self, port):
"""Add port to bomb (required for certain modules)
Args:
port (string): name of port
"""
if port not in self.valid_ports:
raise Exception('Port ({}) must be one of {}'
.format(port, self.valid_ports))
self.ports.append(port)
def set_ports(self, ports):
"""Set ports on the bomb (replaces existing ports)
Args:
ports (list): list of ports
"""
self.ports = []
for port in ports:
self.add_port(port)
def add_indicator(self, label, lit):
"""Add indicator to bomb (required for certain modules)
Args:
label (string): label for the indicator
lit (boolean): whether the indicator is lit (True) or not (False)
"""
if label not in self.valid_indicator_labels:
raise ValueError('Indicator "label" property must be one of {}'
.format(self.valid_indicator_labels))
if lit not in [True, False]:
raise ValueError('Indicator "lit" property must be boolean')
self.indicators.append({'label': label, 'lit': lit})
def set_indicators(self, indicators):
"""Set indicators on the bomb (replaces existing indicators)
Args:
indicators (list): list of dicts representing indicators
"""
self.indicators = []
for indicator in indicators:
self.add_indicator(indicator['label'], indicator['lit'])
def get_indicator_labels(self, lit=None):
"""Retrieve the label strings of the indicators on the bomb
Args:
indicators (list): list of indicator labels
lit (mixed): optional bool that filters by lit or unlit indicators
Returns:
list: a list of strings representing indicator labels
"""
indicator_labels = []
for indicator in self.indicators:
if lit is None or indicator['lit'] is lit:
indicator_labels.append(indicator['label'])
return indicator_labels
def check_serial_for_vowel(self):
"""Check whether the serial set contains a vowel
Returns:
bool: True if contains a vowel
"""
if not hasattr(self, 'serial') or self.serial is None:
raise Exception('Must set serial before checking for vowel')
if set(self.serial) & set('aeiou'):
return True
else:
return False
def check_serial_ends_odd(self):
"""Check whether the serial ends in an odd or even number
Returns:
bool: True if ends in odd
"""
if not hasattr(self, 'serial') or self.serial is None:
raise Exception('Must set serial before checking ends in odd')
try:
last_character_as_int = int(self.serial[-1])
except Exception as e:
return False
return bool(last_character_as_int % 2)
def add_strikes(self, strikes=1):
"""Add one or more strikes (mistake) to the bomb context
Args:
strikes (int): number of strikes to add (defaults to 1)
"""
self.strikes += strikes
if self.strikes > 2:
self.explode()
def set_strikes(self, strikes):
"""Add one or more strikes (mistake) to the bomb context
Args:
strikes (int): what number to set the strikes at
"""
self.strikes = strikes
if self.strikes > 2:
self.explode()
def reset(self):
"""Reset bomb properties to their default values (called in __init__,
but may be useful for starting over"""
self.ports = []
self.indicators = []
self.battery_packs = []
self.strikes = 0
self.serial = None
def explode(self):
"""Raise an error if the bomb explodes."""
raise Exception('Kaboom! You have exploded.')
| 31.842105
| 79
| 0.571074
| 6,033
| 0.99719
| 0
| 0
| 0
| 0
| 0
| 0
| 2,773
| 0.458347
|
18b146154d393893b10c35ac0c235675a70fdc26
| 1,377
|
py
|
Python
|
Aula19/ex09.py
|
danicon/MD3-Curso_Python
|
3d419d440d3b28adb5c019268f4b217e7d0ce45a
|
[
"MIT"
] | null | null | null |
Aula19/ex09.py
|
danicon/MD3-Curso_Python
|
3d419d440d3b28adb5c019268f4b217e7d0ce45a
|
[
"MIT"
] | null | null | null |
Aula19/ex09.py
|
danicon/MD3-Curso_Python
|
3d419d440d3b28adb5c019268f4b217e7d0ce45a
|
[
"MIT"
] | null | null | null |
jogador = dict()
partidas = list()
jogador['nome'] = str(input('Nome do jogador: '))
tot = int(input(f'Quantas partidas {jogador["nome"]} jogou? '))
for c in range(0, tot):
partidas.append(int(input(f' Quantos gols na partida {c}? ')))
jogador['gols'] = partidas[:]
jogador['total'] = sum(partidas)
print(30*'-=')
print(jogador)
print(30*'-=')
for k, v in jogador.items():
print(f'O campo {k} tem o valor {v}')
print(30*'-=')
print(f'O jogador {jogador["nome"]} jogou {len(jogador["gols"])} partidas.')
for i, v in enumerate(jogador["gols"]):
print(f' => Na partida {i}, fez {v} gols.')
print(f'Foi um total de {jogador["total"]} gols.')
# Ou
# jogador = dict()
# partidas = list()
# p = tot = 0
# jogador['nome'] = str(input('Nome do Jogador: '))
# quant = int(input(f'Quantas partidas {jogador["nome"]} jogou? '))
# while p < quant:
# jogos = int(input(f' Quantos gols na partida {p}? '))
# partidas.append(jogos)
# tot += jogos
# p += 1
# jogador['gols'] = partidas
# jogador['total'] = tot
# print(30*'-=')
# print(jogador)
# print(30*'-=')
# for k, v in jogador.items():
# print(f'O campo {k} tem o valor {v}')
# print(30*'-=')
# print(f'O jogador {jogador["nome"]} jogou {quant} partidas.')
# for c, g in enumerate(partidas):
# print(f' => Na partida {c}, fez {g} gols.')
# print(f'Foi um total de {jogador["total"]} gols.')
| 31.295455
| 76
| 0.600581
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,014
| 0.736383
|
18b187b96d4e16d8219c2f6163b45c5b1b15ce59
| 2,832
|
py
|
Python
|
hummingbot/core/data_type/kline_stream_tracker.py
|
gmfang/hummingbot
|
fbdf516903c3b98c8447e4dc1bdceee6607b20ab
|
[
"Apache-2.0"
] | null | null | null |
hummingbot/core/data_type/kline_stream_tracker.py
|
gmfang/hummingbot
|
fbdf516903c3b98c8447e4dc1bdceee6607b20ab
|
[
"Apache-2.0"
] | null | null | null |
hummingbot/core/data_type/kline_stream_tracker.py
|
gmfang/hummingbot
|
fbdf516903c3b98c8447e4dc1bdceee6607b20ab
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import asyncio
from abc import abstractmethod, ABC
from enum import Enum
import logging
from typing import (
Optional,
List,
Deque
)
from hummingbot.logger import HummingbotLogger
from hummingbot.core.data_type.kline_stream_tracker_data_source import \
KlineStreamTrackerDataSource
from hummingbot.core.data_type.kline import Kline
import numpy as np
import talib
from collections import deque
class KlineStreamTrackerDataSourceType(Enum):
# LOCAL_CLUSTER = 1 deprecated
REMOTE_API = 2
EXCHANGE_API = 3
class KlineStreamTracker(ABC):
_ust_logger: Optional[HummingbotLogger] = None
@classmethod
def logger(cls) -> HummingbotLogger:
if cls._ust_logger is None:
cls._ust_logger = logging.getLogger(__name__)
return cls._ust_logger
def __init__(self):
self._kline_stream: asyncio.Queue = asyncio.Queue()
self._ev_loop: asyncio.BaseEventLoop = asyncio.get_event_loop()
self._klines: Deque[Kline] = deque([], maxlen=200)
self._ema_short = float("Nan")
self._ema_long = float("Nan")
self._macd_histograms: List[float] = []
@property
@abstractmethod
def data_source(self) -> KlineStreamTrackerDataSource:
raise NotImplementedError
@property
def last_recv_time(self) -> float:
return self.data_source.last_recv_time
@abstractmethod
async def start(self):
raise NotImplementedError
@property
def kline_stream(self) -> asyncio.Queue:
return self._kline_stream
@property
def ema_short(self) -> float:
return self._ema_short
@property
def ema_long(self) -> float:
return self._ema_long
@property
def macd_histograms(self) -> List[float]:
return self._macd_histograms
@property
def klines(self) -> List[Kline]:
return self._klines
def add_kline(self, kline: Kline):
self._klines.append(kline)
def calc_tech_indicators(self):
array = [float(kline.close_price) for kline in self._klines]
# self.logger().info(f"HAHA array is {array}")
np_closes = np.array(array)
ema_short = talib.EMA(np_closes, timeperiod=7)
ema_long = talib.EMA(np_closes, timeperiod=20)
macd = talib.MACD(np_closes, fastperiod=7, slowperiod=20,
signalperiod=9)
self._ema_short = ema_short[-1]
self._ema_long = ema_long[-1]
# MACD output 3 lists. We only need last list(histogram). We only
# copy the last 10 histograms.
self._macd_histograms = macd[-1][-10:]
self.logger().info(
f"(Classic) EMA_7 is {self._ema_short}, EMA_20 is {self._ema_long}, MACD(7, 20, 9) Histogram is {macd[-1][-1]} Histogram list is {self._macd_histograms}")
| 28.897959
| 166
| 0.67161
| 2,393
| 0.844986
| 0
| 0
| 871
| 0.307556
| 56
| 0.019774
| 355
| 0.125353
|
18b20197ca16f4d94391b3685611593c8849a3d6
| 23,599
|
py
|
Python
|
cogs/management.py
|
xthecoolboy/MizaBOT
|
fb8a449bde29fdf1d32b5a597e48e6b3463dd867
|
[
"MIT"
] | null | null | null |
cogs/management.py
|
xthecoolboy/MizaBOT
|
fb8a449bde29fdf1d32b5a597e48e6b3463dd867
|
[
"MIT"
] | null | null | null |
cogs/management.py
|
xthecoolboy/MizaBOT
|
fb8a449bde29fdf1d32b5a597e48e6b3463dd867
|
[
"MIT"
] | null | null | null |
import discord
from discord.ext import commands
import asyncio
from datetime import datetime, timedelta
import psutil
# Bot related commands
class Management(commands.Cog):
"""Bot related commands. Might require some mod powers in your server"""
def __init__(self, bot):
self.bot = bot
self.color = 0xf49242
def isAuthorized(): # for decorators
async def predicate(ctx):
return ctx.bot.isAuthorized(ctx)
return commands.check(predicate)
def isMod(): # for decorators
async def predicate(ctx):
return ctx.bot.isMod(ctx)
return commands.check(predicate)
def isAuthorizedSpecial(): # for decorators
async def predicate(ctx):
return (ctx.bot.isDebugServer(ctx) or (ctx.bot.isYouServer(ctx) and ctx.bot.isMod(ctx)))
return commands.check(predicate)
@commands.command(no_pm=True, cooldown_after_parsing=True)
@isMod()
@commands.cooldown(1, 3, commands.BucketType.guild)
async def setPrefix(self, ctx, prefix_string : str):
"""Set the prefix used on your server (Mod Only)"""
if len(prefix_string) == 0: return
id = str(ctx.guild.id)
if prefix_string == '$':
if id in self.bot.prefixes:
self.bot.prefixes.pop(id)
self.bot.savePending = True
else:
self.bot.prefixes[id] = prefix_string
self.bot.savePending = True
await ctx.send(embed=self.bot.buildEmbed(title=ctx.guild.name, description="Server Prefix changed to `{}`".format(prefix_string), color=self.color))
@commands.command(no_pm=True, cooldown_after_parsing=True, aliases=['bug', 'report', 'bug_report'])
@commands.cooldown(1, 10, commands.BucketType.guild)
async def bugReport(self, ctx, *, terms : str):
"""Send a bug report (or your love confessions) to the author"""
if len(terms) == 0:
return
await self.bot.send('debug', embed=self.bot.buildEmbed(title="Bug Report", description=terms, footer="{} ▫️ User ID: {}".format(ctx.author.name, ctx.author.id), thumbnail=ctx.author.avatar_url, color=self.color))
await ctx.message.add_reaction('✅') # white check mark
@commands.command(no_pm=True, cooldown_after_parsing=True)
@isAuthorized()
async def joined(self, ctx, member : discord.Member):
"""Says when a member joined."""
await ctx.send(embed=self.bot.buildEmbed(title=ctx.guild.name, description="Joined at {0.joined_at}".format(member), thumbnail=member.avatar_url, color=self.color))
@commands.command(no_pm=True, cooldown_after_parsing=True, aliases=['source'])
@commands.cooldown(1, 20, commands.BucketType.guild)
async def github(self, ctx):
"""Post the bot.py file running right now"""
await ctx.send(embed=self.bot.buildEmbed(title=self.bot.description.splitlines()[0], description="Code source at https://github.com/MizaGBF/MizaBOT", thumbnail=ctx.guild.me.avatar_url, color=self.color))
@commands.command(no_pm=True, cooldown_after_parsing=True)
@isMod()
async def delST(self, ctx):
"""Delete the ST setting of this server (Mod Only)"""
id = str(ctx.guild.id)
if id in self.bot.st:
self.bot.st.pop(id)
self.bot.savePending = True
await ctx.message.add_reaction('✅') # white check mark
else:
await ctx.send(embed=self.bot.buildEmbed(title=ctx.guild.name, description="No ST set on this server\nI can't delete.", thumbnail=ctx.guild.icon_url, color=self.color))
@commands.command(no_pm=True, cooldown_after_parsing=True)
@isMod()
async def setST(self, ctx, st1 : int, st2 : int):
"""Set the two ST of this server (Mod Only)"""
if st1 < 0 or st1 >= 24 or st2 < 0 or st2 >= 24:
await ctx.send(embed=self.bot.buildEmbed(title="Error", description="Values must be between 0 and 23 included", color=self.color))
return
self.bot.st[str(ctx.message.author.guild.id)] = [st1, st2]
self.bot.savePending = True
await ctx.message.add_reaction('✅') # white check mark
@commands.command(no_pm=True, cooldown_after_parsing=True, aliases=['banspark'])
@isMod()
async def banRoll(self, ctx, member: discord.Member):
"""Ban an user from the roll ranking (Mod Only)
To avoid retards with fake numbers
The ban is across all servers"""
id = str(member.id)
if id not in self.bot.spark[1]:
self.bot.spark[1].append(id)
self.bot.savePending = True
await ctx.send(embed=self.bot.buildEmbed(title="{} ▫️ {}".format(member.display_name, id), description="Banned from all roll rankings by {}".format(ctx.author.display_name), thumbnail=member.avatar_url, color=self.color, footer=ctx.guild.name))
await self.bot.send('debug', embed=self.bot.buildEmbed(title="{} ▫️ {}".format(member.display_name, id), description="Banned from all roll rankings by {}".format(ctx.author.display_name), thumbnail=member.avatar_url, color=self.color, footer=ctx.guild.name))
else:
await ctx.send(embed=self.bot.buildEmbed(title=member.display_name, description="Already banned", thumbnail=member.avatar_url, color=self.color))
@commands.command(no_pm=True, cooldown_after_parsing=True)
@isAuthorizedSpecial()
async def setGW(self, ctx, id : int, element : str, day : int, month : int, year : int):
"""Set the GW date ((You) Mod only)"""
try:
# stop the task
self.bot.cancelTask('check_buff')
self.bot.gw['state'] = False
self.bot.gw['id'] = id
self.bot.gw['ranking'] = ""
self.bot.gw['element'] = element.lower()
# build the calendar
self.bot.gw['dates'] = {}
self.bot.gw['dates']["Preliminaries"] = datetime.utcnow().replace(year=year, month=month, day=day, hour=19, minute=0, second=0, microsecond=0)
self.bot.gw['dates']["Interlude"] = self.bot.gw['dates']["Preliminaries"] + timedelta(days=1, seconds=43200) # +36h
self.bot.gw['dates']["Day 1"] = self.bot.gw['dates']["Interlude"] + timedelta(days=1) # +24h
self.bot.gw['dates']["Day 2"] = self.bot.gw['dates']["Day 1"] + timedelta(days=1) # +24h
self.bot.gw['dates']["Day 3"] = self.bot.gw['dates']["Day 2"] + timedelta(days=1) # +24h
self.bot.gw['dates']["Day 4"] = self.bot.gw['dates']["Day 3"] + timedelta(days=1) # +24h
self.bot.gw['dates']["Day 5"] = self.bot.gw['dates']["Day 4"] + timedelta(days=1) # +24h
self.bot.gw['dates']["End"] = self.bot.gw['dates']["Day 5"] + timedelta(seconds=61200) # +17h
# build the buff list for (you)
self.bot.gw['buffs'] = []
# Prelims all
self.bot.gw['buffs'].append([self.bot.gw['dates']["Preliminaries"]+timedelta(seconds=7200-300), True, True, True, True]) # warning, double
self.bot.gw['buffs'].append([self.bot.gw['dates']["Preliminaries"]+timedelta(seconds=7200), True, True, False, True])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Preliminaries"]+timedelta(seconds=43200-300), True, False, True, False]) # warning
self.bot.gw['buffs'].append([self.bot.gw['dates']["Preliminaries"]+timedelta(seconds=43200), True, False, False, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Preliminaries"]+timedelta(seconds=43200+3600-300), False, True, True, False]) # warning
self.bot.gw['buffs'].append([self.bot.gw['dates']["Preliminaries"]+timedelta(seconds=43200+3600), False, True, False, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Preliminaries"]+timedelta(days=1, seconds=10800-300), True, True, True, False]) # warning
self.bot.gw['buffs'].append([self.bot.gw['dates']["Preliminaries"]+timedelta(days=1, seconds=10800), True, True, False, False])
# Interlude
self.bot.gw['buffs'].append([self.bot.gw['dates']["Interlude"]-timedelta(seconds=300), True, False, True, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Interlude"], True, False, False, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Interlude"]+timedelta(seconds=3600-300), False, True, True, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Interlude"]+timedelta(seconds=3600), False, True, False, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Interlude"]+timedelta(seconds=54000-300), True, True, True, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Interlude"]+timedelta(seconds=54000), True, True, False, False])
# Day 1
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 1"]-timedelta(seconds=300), True, False, True, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 1"], True, False, False, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 1"]+timedelta(seconds=3600-300), False, True, True, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 1"]+timedelta(seconds=3600), False, True, False, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 1"]+timedelta(seconds=54000-300), True, True, True, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 1"]+timedelta(seconds=54000), True, True, False, False])
# Day 2
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 2"]-timedelta(seconds=300), True, False, True, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 2"], True, False, False, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 2"]+timedelta(seconds=3600-300), False, True, True, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 2"]+timedelta(seconds=3600), False, True, False, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 2"]+timedelta(seconds=54000-300), True, True, True, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 2"]+timedelta(seconds=54000), True, True, False, False])
# Day 3
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 3"]-timedelta(seconds=300), True, False, True, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 3"], True, False, False, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 3"]+timedelta(seconds=3600-300), False, True, True, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 3"]+timedelta(seconds=3600), False, True, False, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 3"]+timedelta(seconds=54000-300), True, True, True, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 3"]+timedelta(seconds=54000), True, True, False, False])
# Day 4
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 4"]-timedelta(seconds=300), True, False, True, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 4"], True, False, False, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 4"]+timedelta(seconds=3600-300), False, True, True, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 4"]+timedelta(seconds=3600), False, True, False, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 4"]+timedelta(seconds=54000-300), True, True, True, False])
self.bot.gw['buffs'].append([self.bot.gw['dates']["Day 4"]+timedelta(seconds=54000), True, True, False, False])
# set the gw state to true
self.bot.gw['state'] = True
self.bot.savePending = True
self.bot.runTask('check_buff', self.bot.get_cog('GuildWar').checkGWBuff)
await ctx.send(embed=self.bot.buildEmbed(title="{} Guild War Mode".format(self.bot.getEmote('gw')), description="Set to : **{:%m/%d %H:%M}**".format(self.bot.gw['dates']["Preliminaries"]), color=self.color))
except Exception as e:
self.bot.cancelTask('check_buff')
self.bot.gw['dates'] = {}
self.bot.gw['buffs'] = []
self.bot.gw['state'] = False
self.bot.savePending = True
await ctx.send(embed=self.bot.buildEmbed(title="Error", description="An unexpected error occured", footer=str(e), color=self.color))
await self.bot.sendError('setgw', str(e))
@commands.command(no_pm=True, cooldown_after_parsing=True)
@isAuthorizedSpecial()
async def disableGW(self, ctx):
"""Disable the GW mode ((You) Mod only)
It doesn't delete the GW settings"""
self.bot.cancelTask('check_buff')
self.bot.gw['state'] = False
self.bot.savePending = True
await ctx.message.add_reaction('✅') # white check mark
@commands.command(no_pm=True, cooldown_after_parsing=True)
@isAuthorizedSpecial()
async def enableGW(self, ctx):
"""Enable the GW mode ((You) Mod only)"""
if self.bot.gw['state'] == True:
await ctx.send(embed=self.bot.buildEmbed(title="{} Guild War Mode".format(self.bot.getEmote('gw')), description="Already enabled", color=self.color))
elif len(self.bot.gw['dates']) == 8:
self.bot.gw['state'] = True
self.bot.runTask('check_buff', self.bot.get_cog('GuildWar').checkGWBuff)
self.bot.savePending = True
await ctx.message.add_reaction('✅') # white check mark
else:
await ctx.send(embed=self.bot.buildEmbed(title="Error", description="No Guild War available in my memory", color=self.color))
@commands.command(no_pm=True, cooldown_after_parsing=True, aliases=['skipGW'])
@isAuthorizedSpecial()
async def skipGWBuff(self, ctx):
"""The bot will skip the next GW buff call ((You) Mod only)"""
if not self.bot.gw['skip']:
self.bot.gw['skip'] = True
self.bot.savePending = True
await ctx.message.add_reaction('✅') # white check mark
else:
await ctx.send(embed=self.bot.buildEmbed(title="Error", description="I'm already skipping the next set of buffs", color=self.color))
@commands.command(no_pm=True, cooldown_after_parsing=True)
@isAuthorizedSpecial()
async def cancelSkipGWBuff(self, ctx):
"""Cancel the GW buff call skipping ((You) Mod only)"""
if self.bot.gw['skip']:
self.bot.gw['skip'] = False
self.bot.savePending = True
await ctx.message.add_reaction('✅') # white check mark
else:
await ctx.send(embed=self.bot.buildEmbed(title="Error", description="No buff skip is currently set", color=self.color))
@commands.command(no_pm=True, cooldown_after_parsing=True)
@isMod()
async def toggleFullBot(self, ctx):
"""Allow or not this channel to use all commands (Mod only)
It disables game/obnoxious commands outside of the whitelisted channels"""
gid = str(ctx.guild.id)
cid = ctx.channel.id
if gid not in self.bot.permitted:
self.bot.permitted[gid] = []
for i in range(0, len(self.bot.permitted[gid])):
if self.bot.permitted[gid][i] == cid:
self.bot.permitted[gid].pop(i)
self.bot.savePending = True
try:
await self.bot.callCommand(ctx, 'seeBotPermission', 'Management')
except Exception as e:
pass
await ctx.message.add_reaction('➖')
return
self.bot.permitted[gid].append(cid)
self.bot.savePending = True
await ctx.message.add_reaction('➕')
try:
await self.bot.callCommand(ctx, 'seeBotPermission', 'Management')
except Exception as e:
pass
@commands.command(no_pm=True, cooldown_after_parsing=True)
@isMod()
async def allowBotEverywhere(self, ctx):
"""Allow full bot access in every channel (Mod only)"""
gid = str(ctx.guild.id)
if gid in self.bot.permitted:
self.bot.permitted.pop(gid)
self.bot.savePending = True
await ctx.send(embed=self.bot.buildEmbed(title="Commands are now sauthorized everywhere", thumbnail=ctx.guild.icon_url, footer=ctx.guild.name + " ▫️ " + str(ctx.guild.id), color=self.color))
else:
await ctx.send(embed=self.bot.buildEmbed(title="Commands are already sauthorized everywhere", thumbnail=ctx.guild.icon_url, footer=ctx.guild.name + " ▫️ " + str(ctx.guild.id), color=self.color))
@commands.command(no_pm=True, cooldown_after_parsing=True)
@isMod()
async def seeBotPermission(self, ctx):
"""See all channels permitted to use all commands (Mod only)"""
gid = str(ctx.guild.id)
if gid in self.bot.permitted:
msg = ""
for c in ctx.guild.channels:
if c.id in self.bot.permitted[gid]:
try:
msg += c.name + "\n"
except:
pass
await ctx.send(embed=self.bot.buildEmbed(title="Channels permitted to use all commands", description=msg, thumbnail=ctx.guild.icon_url, footer=ctx.guild.name + " ▫️ " + str(ctx.guild.id), color=self.color))
else:
await ctx.send(embed=self.bot.buildEmbed(title="Commands are sauthorized everywhere", thumbnail=ctx.guild.icon_url, footer=ctx.guild.name + " ▫️ " + str(ctx.guild.id), color=self.color))
@commands.command(no_pm=True, cooldown_after_parsing=True)
@isMod()
async def toggleBroadcast(self, ctx):
"""Allow or not this channel to use all commands (Mod only)
It disables game/obnoxious commands outside of the whitelisted channels"""
gid = str(ctx.guild.id)
cid = ctx.channel.id
if gid not in self.bot.news:
self.bot.news[gid] = []
for i in range(0, len(self.bot.news[gid])):
if self.bot.news[gid][i] == cid:
self.bot.news[gid].pop(i)
self.bot.savePending = True
try:
await self.bot.callCommand(ctx, 'seeBroadcast', 'Management')
except Exception as e:
pass
await ctx.message.add_reaction('➖')
return
self.bot.news[gid].append(cid)
self.bot.savePending = True
await ctx.message.add_reaction('➕')
try:
await self.bot.callCommand(ctx, 'seeBroadcast', 'Management')
except Exception as e:
pass
@commands.command(no_pm=True, cooldown_after_parsing=True)
@isMod()
async def seeBroadcast(self, ctx):
"""See all channels news to use all commands (Mod only)"""
gid = str(ctx.guild.id)
if gid in self.bot.news:
msg = ""
for c in ctx.guild.channels:
if c.id in self.bot.news[gid]:
try:
msg += c.name + "\n"
except:
pass
await ctx.send(embed=self.bot.buildEmbed(title="Channels receiving broadcasts", description=msg, thumbnail=ctx.guild.icon_url, footer=ctx.guild.name + " ▫️ " + str(ctx.guild.id), color=self.color))
else:
await ctx.send(embed=self.bot.buildEmbed(title="No channels set to receive broadcasts", thumbnail=ctx.guild.icon_url, footer=ctx.guild.name + " ▫️ " + str(ctx.guild.id), color=self.color))
@commands.command(no_pm=True, cooldown_after_parsing=True, aliases=['mizabot'])
@commands.cooldown(1, 10, commands.BucketType.guild)
async def status(self, ctx):
"""Post the bot status"""
await ctx.send(embed=self.bot.buildEmbed(title="{} ▫️ v{}".format(ctx.guild.me.display_name, self.bot.botversion), description="**Uptime**▫️{}\n**CPU**▫️{}%\n**Memory**▫️{}MB\n**Save Pending**▫️{}\n**Errors since boot**▫️{}\n**Tasks Count**▫️{}\n**Servers Count**▫️{}\n**Pending Servers**▫️{}\n**Cogs Loaded**▫️{}/{}\n**Twitter**▫️{}".format(self.bot.uptime(), self.bot.process.cpu_percent(), self.bot.process.memory_full_info().uss >> 20, self.bot.savePending, self.bot.errn, len(asyncio.all_tasks()), len(self.bot.guilds), len(self.bot.newserver['pending']), len(self.bot.cogs), self.bot.cogn, (self.bot.twitter_api is not None)), thumbnail=ctx.guild.me.avatar_url, color=self.color))
@commands.command(no_pm=True, cooldown_after_parsing=True)
@commands.cooldown(1, 10, commands.BucketType.guild)
async def changelog(self, ctx):
"""Post the bot changelog"""
msg = ""
for c in self.bot.botchangelog:
msg += "▫️ {}\n".format(c)
if msg != "":
await ctx.send(embed=self.bot.buildEmbed(title="{} ▫️ v{}".format(ctx.guild.me.display_name, self.bot.botversion), description="**Changelog**\n" + msg, thumbnail=ctx.guild.me.avatar_url, color=self.color))
@commands.command(no_pm=True, cooldown_after_parsing=True)
@isMod()
async def asar(self, ctx, *, role_name : str = ""):
"""Add a role to the list of self-assignable roles (Mod Only)"""
if role_name == "":
await ctx.message.add_reaction('❎') # negative check mark
return
role = None
for r in ctx.guild.roles:
if role_name.lower() == r.name.lower():
role = r
break
if role is None:
await ctx.message.add_reaction('❎') # negative check mark
return
id = str(ctx.guild.id)
if id not in self.bot.assignablerole:
self.bot.assignablerole[id] = {}
if role.name.lower() in self.bot.assignablerole[id]:
await ctx.message.add_reaction('❎') # negative check mark
return
self.bot.assignablerole[id][role.name.lower()] = role.id
self.bot.savePending = True
await ctx.message.add_reaction('✅') # white check mark
@commands.command(no_pm=True, cooldown_after_parsing=True)
@isMod()
async def rsar(self, ctx, *, role_name : str = ""):
"""Remove a role from the list of self-assignable roles (Mod Only)"""
if role_name == "":
await ctx.message.add_reaction('❎') # negative check mark
return
role = None
for r in ctx.guild.roles:
if role_name.lower() == r.name.lower():
role = r
break
if role is None:
await ctx.message.add_reaction('❎') # negative check mark
return
id = str(ctx.guild.id)
if id not in self.bot.assignablerole:
self.bot.assignablerole[id] = {}
if role.name.lower() not in self.bot.assignablerole[id]:
await ctx.message.add_reaction('❎') # negative check mark
return
self.bot.assignablerole[id].pop(role.name.lower())
self.bot.savePending = True
await ctx.message.add_reaction('✅') # white check mark
| 59.593434
| 695
| 0.604221
| 23,575
| 0.993594
| 0
| 0
| 22,665
| 0.955241
| 20,855
| 0.878956
| 4,933
| 0.207907
|
18b252f0addcf4c4512b055a5ed661c24cb4f654
| 3,658
|
py
|
Python
|
interpreter.py
|
Wheatwizard/Lost
|
59281e2e8ab6f0fd35b8496b5f04b2a4a8d7b350
|
[
"MIT"
] | 13
|
2017-08-10T21:54:12.000Z
|
2021-12-08T12:50:31.000Z
|
interpreter.py
|
Wheatwizard/Lost
|
59281e2e8ab6f0fd35b8496b5f04b2a4a8d7b350
|
[
"MIT"
] | null | null | null |
interpreter.py
|
Wheatwizard/Lost
|
59281e2e8ab6f0fd35b8496b5f04b2a4a8d7b350
|
[
"MIT"
] | null | null | null |
from Stack import Stack
from random import randint
class Interpreter(object):
def __init__(self,source,input,startx=None,starty=None,dir=None):
source = source.strip().split("\n")
dim = max(map(len,source)+[len(source)])
self.source = [list(x.ljust(dim,"."))for x in source]
self.dim = (len(self.source),len(self.source[0]))
if dir == None:
self.direction = [[1,0],[0,1],[-1,0],[0,-1]][randint(0,3)]
else:
self.direction = dir
if (startx,starty) == (None,None):
self.location = [randint(0,self.dim[0]-1),randint(0,self.dim[1]-1)]
else:
self.location = [startx,starty]
self.memory = Stack(input)
self.scope = Stack()
self.read = False
self.safety = False
def wrapAround(self):
self.location[0] %= self.dim[0]
self.location[1] %= self.dim[1]
def move(self):
self.location = [
self.location[0]+self.direction[0],
self.location[1]+self.direction[1]
]
#Important bit
if self.location[0] < 0:
self.wrapAround()
if self.location[1] < 0:
self.wrapAround()
if self.location[0] >= self.dim[0]:
self.wrapAround()
if self.location[1] >= self.dim[1]:
self.wrapAround()
def character(self):
return self.source[self.location[0]][self.location[1]]
def action(self):
if self.read:
if self.character() == '"':
self.read = False
else:
self.memory.append(ord(self.character()))
elif self.character() == "/":
self.direction = map(lambda x:-x,self.direction[::-1])
elif self.character() == "\\":
self.direction = self.direction[::-1]
elif self.character() == "|":
self.direction[1] *= -1
elif self.character() == ">":
self.direction = [0,1]
elif self.character() == "<":
self.direction = [0,-1]
elif self.character() == "v":
self.direction = [1,0]
elif self.character() == "^":
self.direction = [-1,0]
elif self.character() == "%":
self.safety = True
elif self.character() == "#":
self.safety = False
elif self.character() == "@":
if self.safety:
self.direction = [0,0]
elif self.character() == "[":
if self.direction[1] == 1:
self.direction[1] = -1
if self.direction[1]:
self.source[self.location[0]][self.location[1]] = "]"
elif self.character() == "]":
if self.direction[1] == -1:
self.direction[1] = 1
if self.direction[1]:
self.source[self.location[0]][self.location[1]] = "["
elif self.character() in "0123456879":
self.memory.append(int(self.character()))
elif self.character() == "+":
self.memory.append(self.memory.pop()+self.memory.pop())
elif self.character() == "*":
self.memory.append(self.memory.pop()*self.memory.pop())
elif self.character() == "-":
self.memory.append(-self.memory.pop())
elif self.character() == ":":
self.memory.append(self.memory[-1])
elif self.character() == "$":
a,b=self.memory.pop(),self.memory.pop()
self.memory.append(a)
self.memory.append(b)
elif self.character() == "!":
self.move()
elif self.character() == "?":
if self.memory.pop():
self.move()
elif self.character() == "(":
self.scope.append(self.memory.pop())
elif self.character() == ")":
self.memory.append(self.scope.pop())
elif self.character() == '"':
self.read = True
def output(self,screen,a,b):
try:
import curses
curselib = curses
except ImportError:
import unicurses
curselib = unicurses
for x in range(self.dim[0]):
for y in range(self.dim[1]):
try:
if [x,y] == self.location:
if curselib.has_colors():
screen.addstr(a+x,b+y*2,"X",curselib.color_pair(1))
else:
screen.addstr(a+x,b+y*2,"X")
else:
screen.addstr(a+x,b+y*2,self.source[x][y])
except:pass
| 29.983607
| 70
| 0.617824
| 3,605
| 0.985511
| 0
| 0
| 0
| 0
| 0
| 0
| 115
| 0.031438
|
18b25e53c1ed1abb7bdec386aaba62360b44deb4
| 1,826
|
py
|
Python
|
masterStock.py
|
Coway/premeStock
|
27106fd581b71df1729f94a79f5a6a10b41ece00
|
[
"MIT"
] | 69
|
2017-03-09T00:24:09.000Z
|
2021-11-15T05:52:09.000Z
|
masterStock.py
|
Coway/premeStock
|
27106fd581b71df1729f94a79f5a6a10b41ece00
|
[
"MIT"
] | 12
|
2017-03-11T04:31:29.000Z
|
2018-06-21T03:54:28.000Z
|
masterStock.py
|
supthunder/premeStock
|
27106fd581b71df1729f94a79f5a6a10b41ece00
|
[
"MIT"
] | 19
|
2017-03-05T22:16:37.000Z
|
2020-06-23T22:41:33.000Z
|
import requests
from bs4 import BeautifulSoup
import json
def loadMasterStock():
url = "http://www.supremenewyork.com/mobile_stock.json"
user = {"User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 10_2_1 like Mac OS X) AppleWebKit/602.4.6 (KHTML, like Gecko) Version/10.0 Mobile/14D27 Safari/602.1"}
# user = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36"}
r = requests.get(url, headers=user)
masterStock = json.loads(r.text)
with open("masterstock.json", 'w') as outfile:
json.dump(masterStock, outfile, indent=4, sort_keys=True)
print("Saved to masterstock.json")
itemInfo = ""
while(True):
try:
item = input("Enter item name to get id or cntrl-c to quit: ")
except:
print("Exiting...")
if itemInfo != "":
itemInfo = itemInfo[:-1]
print("\n"+itemInfo)
with open("filteredStock.txt",'w') as outfile:
outfile.write(itemInfo)
exit()
if item == "new":
print("Getting all new items...")
for itemCount in range(len(masterStock['products_and_categories']["new"])):
itemInfo += '"'+str(masterStock['products_and_categories']["new"][itemCount]['id'])+'":"'
itemInfo += str(masterStock['products_and_categories']["new"][itemCount]['name'])+'",'
else:
for itemCount in range(len(masterStock['products_and_categories']["new"])):
if item.lower() in str(masterStock['products_and_categories']["new"][itemCount]['name']).lower():
itemInfo += '"'+str(masterStock['products_and_categories']["new"][itemCount]['id'])+'":"'
print("Added "+str(masterStock['products_and_categories']["new"][itemCount]['name']))
itemInfo += str(masterStock['products_and_categories']["new"][itemCount]['name'])+'",'
# print(itemInfo)
if __name__ == '__main__':
loadMasterStock()
| 41.5
| 161
| 0.680723
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 845
| 0.46276
|
18b566b173e3af542df61de7dc132ac1fb281305
| 231
|
py
|
Python
|
tests/WebkitGtkDriverBenchmarkTest.py
|
hiroshitoda/WebDriverBenchmark.py
|
74b643b9f299436ef6fb50741a60f04c0c69cf8c
|
[
"Apache-2.0"
] | null | null | null |
tests/WebkitGtkDriverBenchmarkTest.py
|
hiroshitoda/WebDriverBenchmark.py
|
74b643b9f299436ef6fb50741a60f04c0c69cf8c
|
[
"Apache-2.0"
] | null | null | null |
tests/WebkitGtkDriverBenchmarkTest.py
|
hiroshitoda/WebDriverBenchmark.py
|
74b643b9f299436ef6fb50741a60f04c0c69cf8c
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from selenium import webdriver
from tests import Base
class WebKitGTKDriverBenchmarkTest(Base.Base):
def getDriver(self):
return webdriver.WebKitGTK()
if __name__ == "__main__":
unittest.main()
| 16.5
| 46
| 0.74026
| 109
| 0.471861
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.04329
|
18b58622c0bb04c070be5b53bb5876f7354aa18d
| 18,442
|
py
|
Python
|
utils/create_cropped_motion_dataset.py
|
maheriya/tennisLabels
|
d363addcd043dba731aebf1f4a5abb86ef434ac5
|
[
"MIT"
] | null | null | null |
utils/create_cropped_motion_dataset.py
|
maheriya/tennisLabels
|
d363addcd043dba731aebf1f4a5abb86ef434ac5
|
[
"MIT"
] | null | null | null |
utils/create_cropped_motion_dataset.py
|
maheriya/tennisLabels
|
d363addcd043dba731aebf1f4a5abb86ef434ac5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Given a VOC dataset of TENNIS videos dumped at 1920x1080 resolution, this script creates a
# scaled and cropped dataset. Even though the cropped zone size is static (1280x720/640x360)
# crop scale), the zones themselves are dynamically selected based on the objects locations
# (by reading the annotations).
# The zone size 1280x720 is selected for multiple reasons: [Other size is 640x360]
# a. This size (2/3 of full scale) gives grid boxes of 1/3rd the full scale. This grid size
# is the minimum overlap between the diagonal zones. Horizontal and vertically aligned
# zones have the overlap that is double the height or width of this grid size. The
# minimum grid size is large enough to include a trail of tennis ball across three frames
# even at fast speeds. This allows us to fully utilize motion information during training.
# b. When images are cropped at 1280x720, and then finally scaled by 1/2, we get 640x360
# as the final image size. This works perfectly with either 533x300 or 300x300 of final
# training size while still allowing for random crop for training time data augmentation.
#
# Alternative to 1280x720 cropping is direct cropping at 640x360. Of course, this imposes
# stricter tracking requirement at inference time.
#
# Since we want this to work well for motion dataset for at least three frames of motion, the
# algorithm reads three frames at a time to decide how to crop the images. The three frames of
# motion also adds inherent hysteresis to the zone selection, making it stable.
#
# The algorithm is as follows:
# 1. Read three sequential frames -- current, prev1, prev2
# 2. Read annotations. Use 'ball' and 'racket' objects annotations for zones selection.
# 3. Create a union of bboxes for each object across three frames. Let's call this uboxes.
# 4. Select zones to crop: The zone selection is based on how centered a ubox is inside a zone.
# Since zones have significant overlap with each other, multiple zones may contain an
# object. We compute the distance of each ubox center from the center of the zone.
# For each object, the zone where this distance is the smallest is selected.
# 5. Crop out the selected zone/s to create output image/s.
#
# Note that here the emphasis is NOT to center the objects within the cropped output. If we did
# that, the network will incorrectly learn to expect the objects at the center of the image.
# Since we can't provide the network with such images at the inference time, this type of
# training will be useless.
# Instead, we use fixed, four zone locations within the image, and select the zones purely on
# the basis of how *close* an object is to a zone center. This method guarantees to create
# output images where the objects will be found in various locations within the image which
# adds a good amount of regularization to the training and avoid overfitting.
#
# For the real-time inference, the application must make an initial guess about which region
# to crop for the input to the network, and may require multiple tries in the beginning.
# However, once the ball is detected, the one can implement rudimentary tracking for the next
# crop. Since ball detection (and not the racket detection) is the most important part of
# detection, decision making is trivial.
#
# Just to be clear, it is not necessary to use the same zones during inference; any region
# within the image will be fine as long as it contains the ball. When the ball nears the
# player, the racket will automatically get into the view. Note that at the time of training,
# we utilize all available samples of racket images, not just the images where both ball and
# racket are visible at the same time.
from __future__ import print_function
import os
import sys
import cv2 as cv
from lxml import etree
from glob import glob
import re
import argparse
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
import tennis_common as tc
## INPUT IMAGE DIMENSIONS (scaled to these dimensions if required)
WIDTH = 1920
HEIGHT = 1080
## MOTION DB setting: '3FRAMES' or 'FRAMESDIFF'
MOTION_TYPE = 'FRAMESDIFF'
## Change this to view images
SHOW_IMAGES = False
## Verbosity
DEBUG = 0
tc.DEBUG = DEBUG
def show_imgs(cvimg, cvimg_n, oimgs=[]):
global SHOW_IMAGES
cv.imshow("Original image", cvimg)
cv.imshow("Motion image", cvimg_n)
s = ["Out 1", "Out 2"]
for i in range(len(oimgs)):
cv.imshow(s[i], oimgs[i])
key = cv.waitKey(2) & 255
if key == 27:
cv.destroyAllWindows()
sys.exit(0)
elif key == ord('g'): ## Go for it; don't show images after this
cv.destroyAllWindows()
SHOW_IMAGES = False
def drawZone(img, zones, zid, cropsize):
if (cropsize[1] == 720):
## This is a fixed -- hardcoded -- grid of 4 equal sized zones:
# Zones: top-left, top-right, bottom-left, bottom-right
h = img.shape[0]
w = img.shape[1]
gy = [0, int(h/3.), int(h*2.0/3.0), h]
gx = [0, int(w/3.), int(w*2.0/3.0), w]
if zid == 0:
img = cv.rectangle(img, (gx[0], gy[0]), (gx[2], gy[2]-2), (255, 196, 128), 2) ## T-L
elif zid == 1:
img = cv.rectangle(img, (gx[1]+2, gy[0]), (gx[3], gy[2]), (128, 255, 128), 2) ## T-R
elif zid == 2:
img = cv.rectangle(img, (gx[0], gy[1]), (gx[2]+2, gy[3]), (255, 128, 0), 2) ## B-L
elif zid == 3:
img = cv.rectangle(img, (gx[1], gy[1]+2), (gx[3], gy[3]), (196, 0, 255), 2) ## B-R
else:
print("Zone {} is not supported".format(zid))
else:
colors = [(255, 196, 128), (128, 255, 128), (255, 128, 0), (196, 0, 255), (206, 206, 128), (128, 206, 255)]
gy0,gx0,gy2,gx2 = [int(b) for b in zones.getBBox(zid)]
img = cv.rectangle(img, (gx0, gy0), (gx2, gy2-2), colors[zid%6], 1)
return img
def parseArgs():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"invoc", type=str, #default="/IMAGESETS/TENNIS/VOCdevkit",
help="The input VOC root directory."
)
parser.add_argument(
"outvoc", type=str, #default="/IMAGESETS/TENNIS/VOCdevkitCropped",
help="Output VOC root directory."
)
parser.add_argument(
"--height", type=int, default=720, required=False,
help="Output image height. Not used right now."
)
args = parser.parse_args()
return args
##-#####################################################################################
args = parseArgs()
## Main variables
IN_VOCDIR = os.path.abspath(args.invoc)
IN_IMGDIR = os.path.join(IN_VOCDIR, "{}", "JPEGImages") # Template
IN_ANNDIR = os.path.join(IN_VOCDIR, "{}", "Annotations") # Template
OUT_VOCDIR = os.path.abspath(args.outvoc)
OUT_IMGDIR = os.path.join(OUT_VOCDIR, "{}", "JPEGImages") # Template
OUT_ANNDIR = os.path.join(OUT_VOCDIR, "{}", "Annotations")# Template
cropsize = (int(args.height*16./9.), args.height)
if args.height != 720 and args.height != 360:
print("Crop height of {} is not supported (use 720 or 360).".format(args.height))
sys.exit(1)
## Find base datasets containing annotations
output = tc.runSystemCmd(r"find {}/ -mindepth 3 -name '*.xml' | sed -e 's#/Annotations/.*.xml##g' | sort | uniq".format(IN_VOCDIR))
vocbases = [os.path.basename(d) for d in output]
#print(vocbases)
print("There are {} datasets to process".format(len(vocbases)))
cnt = 0
dbcnt = 0
for vocbase in vocbases:
dbcnt += 1
print("\n{}/{}. VOC Base: {}".format(dbcnt, len(vocbases), vocbase))
print("-------------------------------------------------")
i_imgdir = IN_IMGDIR.format(vocbase)
i_anndir = IN_ANNDIR.format(vocbase)
if not os.path.isdir(i_imgdir):
print("Input image dir {} is not accessible".format(i_imgdir))
if not os.path.isdir(i_anndir):
print("Input annotations dir {} is not accessible".format(i_anndir))
o_imgdir = OUT_IMGDIR.format(vocbase)
o_anndir = OUT_ANNDIR.format(vocbase)
for idir in [o_imgdir, o_anndir]:
if not os.path.isdir(idir):
os.makedirs(idir)
else:
print("Dir {} already exists".format(idir))
## Create image list to process
imgs = glob("{}/*.jpg".format(i_imgdir))
imgs = [os.path.basename(i) for i in imgs]
imgs.sort() # Sort images to pick frames in order. It is assumed the images are named likewise
(fprefix, ntemplate) = tc.getNumberingScheme(imgs[0])
if cropsize[1] == 720:
## Define the grid points
## 0/3 1/3 2/3 3/3
gy = [0, int(HEIGHT/3.), int(HEIGHT*2.0/3.0), HEIGHT]
gx = [0, int( WIDTH/3.), int( WIDTH*2.0/3.0), WIDTH]
## Create zones based on the grid
zones = tc.BoundingBoxes('zones')
# ymin xmin ymax xmax
zones.addBBox([gy[0], gx[0], gy[2], gx[2]]) # Top-left zone
zones.addBBox([gy[0], gx[1], gy[2], gx[3]]) # Top-right zone
zones.addBBox([gy[1], gx[0], gy[3], gx[2]]) # Bottom-left zone
zones.addBBox([gy[1], gx[1], gy[3], gx[3]]) # Bottom-right zone
else: # cropsize[1] == 360:
## Define the grid points
## 0/6 1/6 2/6 3/6 4/6 5/6 6/6
gy = [0, int(HEIGHT/6.), int(HEIGHT/3.), int(HEIGHT/2.), int(HEIGHT*2.0/3.0), int(HEIGHT*5.0/6.0), HEIGHT]
gx = [0, int( WIDTH/6.), int( WIDTH/3.), int( WIDTH/2.), int( WIDTH*2.0/3.0), int( WIDTH*5.0/6.0), WIDTH]
## Create zones based on the grid
zones = tc.BoundingBoxes('zones')
for y in range(len(gy)-2):
for x in range(len(gx)-2):
zones.addBBox([gy[y], gx[x], gy[y+2], gx[x+2]])
annnames = glob("{}/*.xml".format(i_anndir))
annnames = [os.path.basename(i) for i in annnames]
annnames.sort() # Sort files to pick frames in order. It is assumed that xml/images are named likewise
if len(annnames) < 3:
print("This VOC Base has less than 3 annotations. Skipping.")
continue
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE,(4,4))
i = 2 ## Index
for annfile in annnames[2:]:
annName_i = annnames[i]
annName_p1 = annnames[i-1]
annName_p2 = annnames[i-2]
i += 1
fnum = int(re.sub(r'.*[-_](\d+).xml', r'\1', annName_i))
eannName_i = fprefix + ntemplate.format(fnum) + '.xml'
eannName_p1 = fprefix + ntemplate.format(fnum-1) + '.xml'
eannName_p2 = fprefix + ntemplate.format(fnum-2) + '.xml'
if annName_i != eannName_i or annName_p1 != eannName_p1 or annName_p2 != eannName_p2:
# Not a continuous series of three frames including previous two, we skip this frame
if 1: #DEBUG>=1:
print("Skipping. Frame sequence not found for {}. ".format(annName_i))
continue # Get next image/ann
else:
if DEBUG>=1:
print("Processing {}".format(annName_i))
## Now that we got a three sequential frames, let's read annotations and get uboxes
## uboxes = union of bboxes for each of the 'ball' or 'racket' bbox in all three images
## We are assuming only one 'ball' annotation per image. However, it is easy to handle
## multiple balls per image too. Not needed for our dataset.
annfiles = [fprefix + ntemplate.format(fn) + '.xml' for fn in [fnum, fnum-1, fnum-2]]
anns = [tc.getAnnotations(os.path.join(i_anndir, annfile)) for annfile in annfiles]
seq = True
for ann_ in anns:
objs = ann_.findall('.//object/name')
if 'ball' not in objs:
seq = False
break # don't check other anns
if not seq:
if 1: # DEBUG>=1:
print("\tSkipping. 3 ball labels sequence not found for {}".format(annName_i))
continue # Get next image/ann
ballUBox, _ = tc.getUBoxes(anns[1:]) # Find union bbox for ball label from two previous frames
assert(ballUBox is not None),"Error! Cannot find union of previous two balls bounding boxes"
## Add this as a new label. We call this label 'pballs' for 'previous balls'
tc.addAnnotation(anns[0], 'pballs', ballUBox)
w = anns[0].size.width
## Scale input to WIDTHxHEIGHT fixed dimensions if input size is different
if w != WIDTH:
scale = float(WIDTH) / float(w)
## Scale annotations
anns = [tc.scaleAnnotations(ann, scale) for ann in anns]
else:
scale = 1.0
ballUBox, racketUBox = tc.getUBoxes(anns)
## Find best enclosing zone for ball and racket UBoxes
zid_b = zones.findEnclosing(ballUBox)
zid_r = zones.findEnclosing(racketUBox)
crop_zids = []
if zid_b == zid_r: ## Both ball and racket are in the same zone
if zid_b is not None:
crop_zids.append(zid_b)
else:
for zid in [zid_b, zid_r]:
if zid is not None:
crop_zids.append(zid)
if DEBUG>=1:
print("Crop Zones: {}".format(crop_zids))
#assert(len(crop_zids) != 0), "No zones found for cropping. This means that the frame doesn't have ball or racket"
if len(crop_zids) == 0:
print("No zones found for cropping. This means that the frame doesn't have ball or racket. Skipped")
continue
## load images as grayscale
img_i, img_p1, img_p2 = [fprefix + ntemplate.format(fn) + '.jpg' for fn in [fnum, fnum-1, fnum-2]]
_cvimg_c = cv.imread(os.path.join(i_imgdir, img_i), cv.IMREAD_COLOR)
_cvimg = cv.cvtColor(_cvimg_c, cv.COLOR_BGR2GRAY)
_cvimg1 = cv.imread(os.path.join(i_imgdir, img_p1), cv.IMREAD_GRAYSCALE)
_cvimg2 = cv.imread(os.path.join(i_imgdir, img_p2), cv.IMREAD_GRAYSCALE)
if w != WIDTH:
## Resize if scale is different
cvimg_c = cv.resize(_cvimg_c, (WIDTH, HEIGHT), interpolation = cv.INTER_CUBIC)
cvimg = cv.resize(_cvimg, (WIDTH, HEIGHT), interpolation = cv.INTER_CUBIC)
cvimg1 = cv.resize(_cvimg1, (WIDTH, HEIGHT), interpolation = cv.INTER_CUBIC)
cvimg2 = cv.resize(_cvimg2, (WIDTH, HEIGHT), interpolation = cv.INTER_CUBIC)
else:
cvimg_c = _cvimg_c
cvimg = _cvimg
cvimg1 = _cvimg1
cvimg2 = _cvimg2
if MOTION_TYPE == '3FRAMES':
# Merge (merge 3 grascale motion frames into BGR channels)
cvimg_n = cv.merge([cvimg, cvimg1, cvimg2])
elif MOTION_TYPE == 'FRAMESDIFF':
## Create frame-diff based background subtracted image with a trail of three balls
## We are doing this (keeping the trail) on purpse. This to provide the network
## with some referene in the case when the ball is not visible in the current frame
## but it was visible in previous frames.
diff_p1p2 = cv.absdiff(cvimg1, cvimg2)
diff_cp1 = cv.absdiff(cvimg, cvimg1)
image_b = cv.bitwise_or(diff_p1p2, diff_cp1) ## This will create the trail of three objects
#bring back? =>#image_diff= cv.dilate(image_b, kernel) ## enlarge the blobs
# Replace blue channel with frame diff. Blue channel is less important in tennis for us
# since the ball is greenish yellow -- most information in red and green channel.
cvimg_n = cvimg_c.copy()
cvimg_n[:,:,0] = image_b #image_diff
else:
print("Unsupported motion type {}".format(MOTION_TYPE))
sys.exit(1)
## Crop images and annotations as per selected zones
imgfilenames = []
annfilenames = []
outimgs = []
outanns = []
for zid in crop_zids:
imgbase = fprefix + ntemplate.format(fnum) + '-z{:02d}'.format(zid)
imgname = imgbase + '.jpg'
annname = imgbase + '.xml'
imgfilenames.append(imgname)
annfilenames.append(annname)
roi = zones.getBBox(zid)
outann = tc.cropAnnotations(anns[0], roi, imgname, 6)
outimg = zones.getImgRoI(zid, cvimg_n).copy()
outanns.append(outann)
outimgs.append(outimg)
if DEBUG>=3 and len(crop_zids) > 1:
obj_xml = etree.tostring(outann, pretty_print=True, xml_declaration=False)
print("Annotation {}\n{}".format(annname, obj_xml))
######################################################################################
## Write output files
######################################################################################
for index in range(len(outimgs)):
## Write annotation files
tc.cleanUpAnnotations(outanns[index], ['ball', 'racket', 'pballs'])
tc.writeAnnotation(outanns[index], os.path.join(o_anndir, annfilenames[index]))
## Write cropped motion images
imgfile = os.path.join(o_imgdir, imgfilenames[index])
if DEBUG>=2:
print("Writing {}".format(imgfile))
cv.imwrite(imgfile, outimgs[index])
if SHOW_IMAGES:
for zid in crop_zids:
cvimg_n = drawZone(cvimg_n, zones, zid, cropsize)
for index in range(len(outimgs)):
img = outimgs[index]
for obj in outanns[index].iter('object'):
bbox = [obj.bndbox.ymin, obj.bndbox.xmin, obj.bndbox.ymax, obj.bndbox.xmax]
outimgs[index] = tc.drawBoundingBox(outimgs[index], bbox, tc.LBL_IDS[obj.name])
## Draw bounding boxes
if ballUBox is not None:
cvimg_n = tc.drawBoundingBox(cvimg_n, ballUBox, 1)
if racketUBox is not None:
cvimg_n = tc.drawBoundingBox(cvimg_n, racketUBox, 2)
show_imgs(cvimg_c, cvimg_n, outimgs)
#if (cnt >= 50):
# assert(False), "Temp forced exit to check work. Remove later."
cnt += 1
cv.destroyAllWindows()
print("Done. Motion Dataset created with {} annotations and images".format(cnt))
| 46.570707
| 131
| 0.612244
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 8,258
| 0.447782
|
18b5cd9e5d6c9c3f826dbcf798680d452eb2f577
| 5,454
|
py
|
Python
|
tests/unit/core/test_core_config.py
|
Mbompr/fromconfig
|
eb34582c79a9a9e3b9e60d41fec2ac6a619e9c27
|
[
"Apache-2.0"
] | 19
|
2021-03-18T16:48:03.000Z
|
2022-03-02T13:09:21.000Z
|
tests/unit/core/test_core_config.py
|
Mbompr/fromconfig
|
eb34582c79a9a9e3b9e60d41fec2ac6a619e9c27
|
[
"Apache-2.0"
] | 3
|
2021-04-23T23:03:29.000Z
|
2021-05-11T14:09:16.000Z
|
tests/unit/core/test_core_config.py
|
Mbompr/fromconfig
|
eb34582c79a9a9e3b9e60d41fec2ac6a619e9c27
|
[
"Apache-2.0"
] | 3
|
2021-04-19T22:05:34.000Z
|
2022-02-21T11:32:16.000Z
|
"""Tests for core.config."""
import json
import yaml
from pathlib import Path
import pytest
import fromconfig
def test_core_config_no_jsonnet(tmpdir, monkeypatch):
"""Test jsonnet missing handling."""
monkeypatch.setattr(fromconfig.core.config, "_jsonnet", None)
# No issue to dump even if missing
config = {"x": 2}
fromconfig.dump(config, str(tmpdir.join("config.jsonnet")))
fromconfig.dump(config, str(tmpdir.join("config.json")))
fromconfig.dump(config, str(tmpdir.join("config.yaml")))
fromconfig.dump(config, str(tmpdir.join("config.yml")))
# No issue to load non-jsonnet files
assert fromconfig.load(str(tmpdir.join("config.json"))) == config
assert fromconfig.load(str(tmpdir.join("config.yaml"))) == config
assert fromconfig.load(str(tmpdir.join("config.yml"))) == config
# Raise import error if reloading from jsonnet
with pytest.raises(ImportError):
fromconfig.load(str(tmpdir.join("config.jsonnet")))
def test_core_config():
"""Test Config."""
config = fromconfig.Config(x=1)
assert config["x"] == 1
assert list(config) == ["x"]
config["x"] = 2
assert config["x"] == 2
def test_core_config_is_json_serializable():
"""Test that Config is json serializable."""
config = fromconfig.Config(x=1)
assert json.dumps(config) == '{"x": 1}'
@pytest.mark.parametrize(
"path,serializer",
[
pytest.param("config.json", json),
pytest.param("config.jsonnet", json),
pytest.param("config.yaml", yaml),
pytest.param("config.yml", yaml),
pytest.param("config.xml", None),
],
)
def test_core_config_load_dump(path, serializer, tmpdir):
"""Test Config.load."""
config = {"x": 1}
path = str(tmpdir.join(path))
if serializer is None:
# Incorrect path (not supported)
with pytest.raises(ValueError):
fromconfig.dump(config, path)
with pytest.raises(ValueError):
fromconfig.load(path)
else:
# Dump config to file
with Path(path).open("w") as file:
if serializer is json:
serializer.dump(config, file, indent=4)
else:
serializer.dump(config, file)
# Read content of the dump
with Path(path).open() as file:
content = file.read()
# Reload
reloaded = fromconfig.load(path)
assert reloaded == config
# Dump with config method and check content is the same as before
fromconfig.dump(reloaded, path)
with Path(path).open() as file:
assert file.read() == content
@pytest.mark.parametrize("config, expected", [pytest.param("foo: bar", {"foo": "bar"})])
def test_core_config_include_loader_on_string(config, expected):
"""Test IncludeLoader."""
assert expected == yaml.load(config, fromconfig.core.config.IncludeLoader)
@pytest.mark.parametrize(
"files, expected",
[
pytest.param(
{"config.yaml": "foo: 1\nbar: !include bar.yaml", "bar.yaml": "2"}, {"foo": 1, "bar": 2}, id="simple"
),
pytest.param(
{"config.yaml": "foo: 1\n<<: !include bar.yaml", "bar.yaml": "bar: 2"},
{"foo": 1, "bar": 2},
id="simple-merge",
),
pytest.param(
{"config.yaml": "foo: 1\n<<: !include bar.yaml", "bar.yaml": "2"}, None, id="simple-merge-invalid"
),
pytest.param(
{"config.yaml": "foo: 1\nbar: !include bar/bar.yaml", "bar/bar.yaml": "2"},
{"foo": 1, "bar": 2},
id="nested",
),
pytest.param(
{"config.yaml": "foo: 1\n<<: !include bar/bar.yaml", "bar/bar.yaml": "bar: 2"},
{"foo": 1, "bar": 2},
id="nested-merge",
),
pytest.param(
{
"config.yaml": "foo: 1\nbar: !include bar/bar.yaml",
"bar/bar.yaml": "!include baz.yaml",
"bar/baz.yaml": "2",
},
{"foo": 1, "bar": 2},
id="nested-twice",
),
pytest.param(
{
"config.yaml": "foo: 1\n<<: !include bar/bar.yaml",
"bar/bar.yaml": "<<: !include baz.yaml",
"bar/baz.yaml": "bar: 2",
},
{"foo": 1, "bar": 2},
id="nested-twice-merge",
),
],
)
def test_core_config_load_include_merge(files, expected, tmpdir):
"""Test include and merge functionality."""
for p, content in files.items():
Path(tmpdir, p).parent.mkdir(parents=True, exist_ok=True)
with Path(tmpdir, p).open("w") as file:
file.write(content)
assert fromconfig.load(Path(tmpdir, "config.yaml")) == expected
@pytest.mark.parametrize(
"config, expected",
[
pytest.param(
{"_attr_": "str", "_args_": "hello"}, fromconfig.Config(_attr_="str", _args_="hello"), id="simple"
),
pytest.param(
{"_config_": {"_attr_": "str", "_args_": "hello"}},
fromconfig.Config(_attr_="str", _args_="hello"),
id="config",
),
pytest.param(
[("_attr_", "str"), ("_args_", "hello")], fromconfig.Config(_attr_="str", _args_="hello"), id="list",
),
],
)
def test_core_config_fromconfig(config, expected):
"""Test Config.fromconfig."""
assert fromconfig.Config.fromconfig(config) == expected
| 31.894737
| 113
| 0.565457
| 0
| 0
| 0
| 0
| 4,088
| 0.749542
| 0
| 0
| 1,666
| 0.305464
|
18b6001fed8371bb91ce9e52ae604dbe21d1ea14
| 5,353
|
py
|
Python
|
release.py
|
dhleong/beholder
|
1459c67907c436f6abc2abcd82c817e177fcd85f
|
[
"MIT"
] | 4
|
2020-03-11T01:35:42.000Z
|
2021-08-31T20:18:22.000Z
|
release.py
|
dhleong/beholder
|
1459c67907c436f6abc2abcd82c817e177fcd85f
|
[
"MIT"
] | 15
|
2018-04-29T20:25:14.000Z
|
2020-03-14T13:44:59.000Z
|
release.py
|
dhleong/beholder
|
1459c67907c436f6abc2abcd82c817e177fcd85f
|
[
"MIT"
] | 1
|
2020-10-27T22:43:46.000Z
|
2020-10-27T22:43:46.000Z
|
#!/usr/bin/env python
#
# Release script for beholder
#
import hashlib
import urllib
from collections import OrderedDict
try:
from hostage import * #pylint: disable=unused-wildcard-import,wildcard-import
except ImportError:
print "!! Release library unavailable."
print "!! Use `pip install hostage` to fix."
print "!! You will also need an API token in .github.token,"
print "!! a .hubrrc config, or `brew install hub` configured."
print "!! A $GITHUB_TOKEN env variable will also work."
exit(1)
#
# Globals
#
notes = File(".last-release-notes")
latestTag = git.Tag.latest()
def sha256(fileUrl, blockSize=65536):
# based on: https://gist.github.com/rji/b38c7238128edf53a181
hasher = hashlib.sha256()
shafp = urllib.urlopen(fileUrl)
for block in iter(lambda: shafp.read(blockSize), b''):
hasher.update(block)
shafp.close()
return hasher.hexdigest()
def formatIssue(issue):
return "- {title} (#{number})\n".format(
number=issue.number,
title=issue.title)
def buildLabeled(labelsToTitles):
"""Given a set of (label, title) tuples, produces an
OrderedDict whose keys are `label`, and whose values are
dictionaries containing 'title' -> `title`, and
'content' -> string. The iteration order of the dictionary
will preserve the ordering of the provided tuples
"""
result = OrderedDict()
for k, v in labelsToTitles:
result[k] = {'title': v, 'content': ''}
return result
def buildDefaultNotes(_):
if not latestTag: return ''
logParams = {
'path': latestTag.name + "..HEAD",
'grep': ["Fix #", "Fixes #", "Closes #"],
'pretty': "format:- %s"}
logParams["invertGrep"] = True
msgs = git.Log(**logParams).output()
contents = ''
lastReleaseDate = latestTag.get_created_date()
if lastReleaseDate.tzinfo:
# pygithub doesn't respect tzinfo, so we have to do it ourselves
lastReleaseDate -= lastReleaseDate.tzinfo.utcoffset(lastReleaseDate)
lastReleaseDate.replace(tzinfo=None)
closedIssues = github.find_issues(state='closed', since=lastReleaseDate)
labeled = buildLabeled([
['feature', "New Features"],
['enhancement', "Enhancements"],
['bug', "Bug Fixes"],
['_default', "Other resolved tickets"],
])
if closedIssues:
for issue in closedIssues:
found = False
for label in labeled.iterkeys():
if label in issue.labels:
labeled[label]['content'] += formatIssue(issue)
found = True
break
if not found:
labeled['_default']['content'] += formatIssue(issue)
for labeledIssueInfo in labeled.itervalues():
if labeledIssueInfo['content']:
contents += "\n**{title}**:\n{content}".format(**labeledIssueInfo)
if msgs: contents += "\n**Notes**:\n" + msgs
return contents.strip()
#
# Verify
#
verify(Grep("stopship", inDir="src").foundAny(silent=False)) \
.then(echoAndDie("I don't think so"))
version = verify(File("src/beholder.go")
.filtersTo(RegexFilter('const Version = "(.*)"'))
).valueElse(echoAndDie("No version!?"))
versionTag = git.Tag(version)
verify(versionTag.exists())\
.then(echoAndDie("Version `%s` already exists!" % version))
#
# Make sure all the tests pass
#
# this syntax recursively checks all subpackages for tests
verify(Execute("go test ./... -v")).succeeds(silent=False).orElse(die())
#
# Build the release notes
#
initialNotes = verify(notes.contents()).valueElse(buildDefaultNotes)
notes.delete()
verify(Edit(notes, withContent=initialNotes).didCreate())\
.orElse(echoAndDie("Aborted due to empty message"))
releaseNotes = notes.contents()
#
# Compile
#
versions = [
# (label, os, arch) tuples
("macOS", "darwin", "amd64"),
("windows-x64", "windows", "amd64"),
]
compiled = []
for (buildLabel, os, arch) in versions:
f = 'bin/beholder-%s-%s' % (version, buildLabel)
if os == "windows":
f += ".exe"
print "Compiling:", f
cmd = 'env GOOS=%s GOARCH=%s go build -v -o %s' % (os, arch, f)
verify(Execute(cmd)).succeeds(silent=False)
compiled.append(f)
#
# Upload to github
#
print "Uploading to Github..."
verify(versionTag).create()
verify(versionTag).push("origin")
gitRelease = github.Release(version)
verify(gitRelease).create(body=releaseNotes)
for f in compiled:
print "Uploading", f
verify(gitRelease).uploadFile(f, 'application/octet-stream')
#
# Update homebrew repo
#
print "Updating homebrew..."
tarUrl = 'https://github.com/dhleong/beholder/archive/%s.tar.gz' % version
tarSha = sha256(tarUrl)
homebrewConfig = github.Config("dhleong/homebrew-tap")
formulaFile = github.RepoFile("/Formula/beholder.rb", config=homebrewConfig)
oldContents = formulaFile.read()
newContents = oldContents
newContents = re.sub('url "[^"]+"', 'url "%s"' % tarUrl, newContents)
newContents = re.sub('sha256 "[^"]+"', 'sha256 "%s"' % tarSha, newContents)
print " url <-", tarUrl
print " sha256 <-", tarSha
commit = 'Update for v%s' % version
verify(formulaFile).write(newContents, commitMessage=commit)
#
# Success! Now, just cleanup and we're done!
#
notes.delete()
print "Done! Published %s" % version
| 27.172589
| 82
| 0.64618
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,964
| 0.366897
|
18b65fdb2a140d38c3ae1d51c5156e9061a7ade5
| 881
|
py
|
Python
|
cmsplugin_cascade/migrations/0003_inlinecascadeelement.py
|
aDENTinTIME/djangocms-cascade
|
c38c1c5ad052dbe233b50fb833ad8e9a919014f2
|
[
"MIT"
] | null | null | null |
cmsplugin_cascade/migrations/0003_inlinecascadeelement.py
|
aDENTinTIME/djangocms-cascade
|
c38c1c5ad052dbe233b50fb833ad8e9a919014f2
|
[
"MIT"
] | null | null | null |
cmsplugin_cascade/migrations/0003_inlinecascadeelement.py
|
aDENTinTIME/djangocms-cascade
|
c38c1c5ad052dbe233b50fb833ad8e9a919014f2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('cmsplugin_cascade', '0002_auto_20150530_1018'),
]
operations = [
migrations.CreateModel(
name='InlineCascadeElement',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('glossary', jsonfield.fields.JSONField(default={}, blank=True)),
('cascade_element', models.ForeignKey(related_name='inline_elements', to='cmsplugin_cascade.CascadeElement', on_delete=models.CASCADE)),
],
options={
'db_table': 'cmsplugin_cascade_inline',
},
bases=(models.Model,),
),
]
| 31.464286
| 152
| 0.611805
| 748
| 0.849035
| 0
| 0
| 0
| 0
| 0
| 0
| 211
| 0.239501
|
18b6ab1df2a80e856e7bccdd1594333d60103c4a
| 366
|
py
|
Python
|
SmartWaiterAPI/API/collections/goeswellwith_operations.py
|
KyrumX/project78-api
|
334b4781a4488cf53b360f75b9f3265e40ebf8b4
|
[
"MIT"
] | null | null | null |
SmartWaiterAPI/API/collections/goeswellwith_operations.py
|
KyrumX/project78-api
|
334b4781a4488cf53b360f75b9f3265e40ebf8b4
|
[
"MIT"
] | null | null | null |
SmartWaiterAPI/API/collections/goeswellwith_operations.py
|
KyrumX/project78-api
|
334b4781a4488cf53b360f75b9f3265e40ebf8b4
|
[
"MIT"
] | null | null | null |
from API.models import GoesWellWith, Menu
def get_goeswellwith_items(menuitem1):
entries = GoesWellWith.objects.filter(menuitem1=menuitem1)
result = []
if entries.count() <= 0:
result.append('None')
return result
else:
for e in entries:
result.append(Menu.objects.get(id=e.menuitem2_id).name)
return result
| 24.4
| 67
| 0.661202
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 0.016393
|
18b77fe12dbcd84b5d365548128c4a03151b1396
| 3,949
|
py
|
Python
|
src/simulator/simulator.py
|
ed741/PathBench
|
50fe138eb1f824f49fe1a862705e435a1c3ec3ae
|
[
"BSD-3-Clause"
] | 46
|
2020-12-25T04:09:15.000Z
|
2022-03-25T12:32:42.000Z
|
src/simulator/simulator.py
|
ed741/PathBench
|
50fe138eb1f824f49fe1a862705e435a1c3ec3ae
|
[
"BSD-3-Clause"
] | 36
|
2020-12-21T16:10:02.000Z
|
2022-01-03T01:42:01.000Z
|
src/simulator/simulator.py
|
judicaelclair/PathBenchURO
|
101e67674efdfa8e27e1cf7787dac9fdf99552fe
|
[
"BSD-3-Clause"
] | 11
|
2021-01-06T23:34:12.000Z
|
2022-03-21T17:21:47.000Z
|
from typing import Optional
from algorithms.basic_testing import BasicTesting
from simulator.controllers.main_controller import MainController
from simulator.controllers.map.map_controller import MapController
from simulator.controllers.gui.gui_controller import GuiController
from simulator.models.main_model import MainModel
from simulator.models.map_model import MapModel
from simulator.services.debug import DebugLevel
from simulator.services.services import Services
from simulator.services.event_manager.events.event import Event
from simulator.services.event_manager.events.reinit_event import ReinitEvent
from simulator.views.main_view import MainView
from simulator.views.map.map_view import MapView
from simulator.views.gui.gui_view import GuiView
from structures import Size
"""
Implementation is done after https://github.com/wesleywerner/mvc-game-design
"""
class Simulator:
"""
The main simulator class
"""
__services: Services
__main: MainModel
__map: MapModel
__main_controller: MainController
__map_controller: MapController
__gui_controller: GuiController
__main_view: MainView
__map_view: MapView
__gui_view: GuiView
def __init__(self, services: Services) -> None:
# init services
self.__services = services
self.__services.ev_manager.register_listener(self)
self.__main = None
self.__map = None
self.__main_controller = None
self.__map_controller = None
self.__gui_controller = None
self.__main_view = None
self.__map_view = None
def start(self) -> Optional[BasicTesting]:
"""
Starts the simulator
:return The testing results if any
"""
if self.__services.settings.simulator_graphics:
return self.__start_with_graphics()
else:
return self.__start_without_graphics()
def __try_setup_map_graphics(self) -> None:
if self.__services.algorithm.instance is not None:
if self.__map_controller is not None:
self.__map_controller.destroy()
if self.__map_view is not None:
self.__map_view.destroy()
self.__map = MapModel(self.__services)
self.__map_view = MapView(self.__services, self.__map, self.__main_view)
self.__map_controller = MapController(self.__map_view, self.__services, self.__map)
def __start_with_graphics(self) -> None:
"""
Starts simulator with graphics
"""
# init models, views, controllers
self.__main = MainModel(self.__services)
# init views
self.__main_view = MainView(self.__services, self.__main, None)
self.__gui_view = GuiView(self.__services, None, self.__main_view)
# init controllers
self.__main_controller = MainController(self.__services, self.__main)
self.__gui_controller = GuiController(self.__gui_view, self.__services,self.__main)
self.__try_setup_map_graphics()
self.__main.run()
def __start_without_graphics(self) -> Optional[BasicTesting]:
"""
Starts simulator without graphics
:return: The test results
"""
self.__services.algorithm.instance.find_path()
return self.__services.algorithm.instance.testing
def notify(self, event: Event) -> None:
if isinstance(event, ReinitEvent):
if self.__map:
"""
self.__map.stop_algorithm()
if self.__map.last_thread:
self.__map.last_thread.join()
"""
self.__map.reset()
self.__services.ev_manager.unregister_listener(self.__map)
self.__services.ev_manager.unregister_tick_listener(self.__map)
self.__try_setup_map_graphics()
@property
def services(self) -> Services:
return self.__services
| 34.640351
| 95
| 0.683971
| 3,072
| 0.777918
| 0
| 0
| 76
| 0.019245
| 0
| 0
| 594
| 0.150418
|
18b7fbb4733a21ef838f96c25af5f53f3a7b8f73
| 1,445
|
py
|
Python
|
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/mrp_byproduct/models/mrp_subproduct.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | 1
|
2019-12-19T01:53:13.000Z
|
2019-12-19T01:53:13.000Z
|
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/mrp_byproduct/models/mrp_subproduct.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | null | null | null |
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/mrp_byproduct/models/mrp_subproduct.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.addons import decimal_precision as dp
class MrpSubProduct(models.Model):
_name = 'mrp.subproduct'
_description = 'Byproduct'
product_id = fields.Many2one('product.product', 'Product', required=True)
product_qty = fields.Float(
'Product Qty',
default=1.0, digits=dp.get_precision('Product Unit of Measure'), required=True)
product_uom_id = fields.Many2one('product.uom', 'Unit of Measure', required=True)
bom_id = fields.Many2one('mrp.bom', 'BoM', ondelete='cascade')
operation_id = fields.Many2one('mrp.routing.workcenter', 'Produced at Operation')
@api.onchange('product_id')
def onchange_product_id(self):
""" Changes UoM if product_id changes. """
if self.product_id:
self.product_uom_id = self.product_id.uom_id.id
@api.onchange('product_uom_id')
def onchange_uom(self):
res = {}
if self.product_uom_id and self.product_id and self.product_uom_id.category_id != self.product_id.uom_id.category_id:
res['warning'] = {
'title': _('Warning'),
'message': _('The Product Unit of Measure you chose has a different category than in the product form.')
}
self.product_uom_id = self.product_id.uom_id.id
return res
| 40.138889
| 125
| 0.665744
| 1,254
| 0.86782
| 0
| 0
| 687
| 0.475433
| 0
| 0
| 482
| 0.333564
|
18b95560e12ae1f8ecbf164d50ad646b8d18c3b3
| 126
|
py
|
Python
|
contacts/urls.py
|
HaraDev001/RealEstate-Backend
|
db2ae8d143bd15fbb49432ae8b14fd3bf8e6dd1c
|
[
"MIT"
] | 2
|
2021-05-17T18:02:36.000Z
|
2021-05-17T18:02:44.000Z
|
contacts/urls.py
|
HaraDev001/RealEstate-Backend
|
db2ae8d143bd15fbb49432ae8b14fd3bf8e6dd1c
|
[
"MIT"
] | null | null | null |
contacts/urls.py
|
HaraDev001/RealEstate-Backend
|
db2ae8d143bd15fbb49432ae8b14fd3bf8e6dd1c
|
[
"MIT"
] | null | null | null |
from django.urls import path
from .views import ContactCreateView
urlpatterns = [
path('',ContactCreateView.as_view()),
]
| 21
| 41
| 0.753968
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0.015873
|
18b9e35412962cc6d7d17f54bab50f62ce2c5c9d
| 410
|
py
|
Python
|
Python_do_zero_Guanabara/04_CondiçõesEmPython/aula/aula15.py
|
HenriqueSOliver/Projetos_Python
|
f18c5a343ad1b746a12bd372298b2debe9bc65ec
|
[
"MIT"
] | null | null | null |
Python_do_zero_Guanabara/04_CondiçõesEmPython/aula/aula15.py
|
HenriqueSOliver/Projetos_Python
|
f18c5a343ad1b746a12bd372298b2debe9bc65ec
|
[
"MIT"
] | null | null | null |
Python_do_zero_Guanabara/04_CondiçõesEmPython/aula/aula15.py
|
HenriqueSOliver/Projetos_Python
|
f18c5a343ad1b746a12bd372298b2debe9bc65ec
|
[
"MIT"
] | null | null | null |
# modelo anterior - Enquanto cont até 10 for verdade, será repetido
cont = 1
while cont <= 10:
print(cont, ' ...', end='')
cont += 1
print('FIM')
# Usando o Enquanto VERDADE ele vai repetir para sempre, temos que colocar uma condição PARA=BREAK
n = s = 0
while True:
n = int(input('Digite um número: [Digite 999 para PARAR] '))
if n == 999:
break
s += n
print(f'A soma vale {s}')
| 27.333333
| 98
| 0.62439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 245
| 0.590361
|
18bb4104d3cd6b1e910557e18aee65ea9222b8ce
| 1,124
|
py
|
Python
|
internal/handlers/lebanon.py
|
fillingthemoon/cartogram-web
|
58b645bca0c22b9bccdb2a5a8213a5a24a7e5958
|
[
"MIT"
] | null | null | null |
internal/handlers/lebanon.py
|
fillingthemoon/cartogram-web
|
58b645bca0c22b9bccdb2a5a8213a5a24a7e5958
|
[
"MIT"
] | 20
|
2019-10-20T11:27:38.000Z
|
2022-03-12T00:28:17.000Z
|
internal/handlers/lebanon.py
|
fillingthemoon/cartogram-web
|
58b645bca0c22b9bccdb2a5a8213a5a24a7e5958
|
[
"MIT"
] | 16
|
2019-08-22T04:49:44.000Z
|
2021-06-09T04:44:57.000Z
|
import settings
import handlers.base_handler
import csv
class CartogramHandler(handlers.base_handler.BaseCartogramHandler):
def get_name(self):
return "Lebanon"
def get_gen_file(self):
return "{}/lbn_processedmap.json".format(settings.CARTOGRAM_DATA_DIR)
def validate_values(self, values):
if len(values) != 8:
return False
for v in values:
if type(v) != float:
return False
return True
def gen_area_data(self, values):
return """1 {} Akkar
2 {} Baalbak-Hermel
3 {} Beirut
4 {} Beqaa
5 {} Mount Lebanon
6 {} Nabatieh
7 {} North
8 {} South""".format(*values)
def expect_geojson_output(self):
return True
def csv_to_area_string_and_colors(self, csvfile):
return self.order_by_example(csv.reader(csvfile), "Governorate", 0, 1, 2, 3, ["Akkar","Baalbak-Hermel","Beirut","Beqaa","Mount Lebanon","Nabatieh","North","South"], [0.0 for i in range(0,8)], {"Akkar":"1","Baalbak-Hermel":"2","Beirut":"3","Beqaa":"4","Mount Lebanon":"5","Nabatieh":"6","North":"7","South":"8"})
| 28.1
| 319
| 0.623665
| 1,066
| 0.948399
| 0
| 0
| 0
| 0
| 0
| 0
| 340
| 0.302491
|
18bbd1f2f3931ba0aa7f9a0bc9c67949e29e02ad
| 11,184
|
py
|
Python
|
routes/GetFeed/insta_crawling 복사본/ScrollFeed.py
|
akalswl14/styltebox_manageweb
|
5d0e33435a7456387d28b6b58762912d0552a717
|
[
"MIT"
] | null | null | null |
routes/GetFeed/insta_crawling 복사본/ScrollFeed.py
|
akalswl14/styltebox_manageweb
|
5d0e33435a7456387d28b6b58762912d0552a717
|
[
"MIT"
] | 2
|
2021-03-31T20:20:47.000Z
|
2021-12-13T20:50:07.000Z
|
routes/GetFeed/insta_crawling 복사본/ScrollFeed.py
|
akalswl14/styltebox_manageweb
|
5d0e33435a7456387d28b6b58762912d0552a717
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import urllib.request
from urllib.request import urlopen # 인터넷 url를 열어주는 패키지
from urllib.parse import quote_plus # 한글을 유니코드 형식으로 변환해줌
from bs4 import BeautifulSoup
from selenium import webdriver # webdriver 가져오기
import time # 크롤링 중 시간 대기를 위한 패키지
from time import sleep
import warnings # 경고메시지 제거 패키지
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
from selenium.webdriver.chrome.options import Options
from MakeExcel import MakeFollowerExcel
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
warnings.filterwarnings(action='ignore') # 경고 메세지 제거
# 인스타 그램 url 생성
baseUrl = "https://www.instagram.com/"
SCROLL_PAUSE_TIME = 1.0
def Login(driver):
# login_section = '//*[@id="react-root"]/section/nav/div/div/div[2]/div/div/div/a[1]'
# driver.find_element_by_xpath(login_section).click()
time.sleep(2)
elem_login = driver.find_element_by_name("username")
elem_login.clear()
elem_login.send_keys('PUT YOUR ID HERE')
elem_login = driver.find_element_by_name('password')
elem_login.clear()
elem_login.send_keys('PUT YOUR PASSWORD HERE')
time.sleep(1)
xpath = '//*[@id="react-root"]/section/main/article/div/div/div/form/div[7]/button'
driver.find_element_by_xpath(xpath).click()
time.sleep(3)
# try:
xpath = '//*[@id="react-root"]/section/main/div/div/div/button'
driver.find_element_by_xpath(xpath).click()
# except:
# pass
time.sleep(4)
def GetFollowers(driver,instaId):
url = baseUrl + instaId
driver.find_element(By.XPATH,'//*[@id="react-root"]/section/main/div/ul/li[2]/a').click()
time.sleep(3)
driver.find_element(By.XPATH,'/html/body/div[5]/div/div[2]/div/div/div/div[3]/a').click()
Login(driver)
driver.find_element(By.XPATH,'//*[@id="react-root"]/section/main/div/ul/li[2]/a').click()
time.sleep(3)
FollowerList = []
while True:
print('스크롤 하면서 Follower페이지의 끝을 찾는 중입니다.')
pageString = driver.page_source
soup = BeautifulSoup(pageString, "lxml")
FollowerElementList = soup.select('.d7ByH')
for follower in FollowerElementList :
FollowerList.append(follower.text)
last_height = driver.execute_script("return document.body.scrollHeight")
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
sleep(SCROLL_PAUSE_TIME)
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
sleep(SCROLL_PAUSE_TIME)
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
FollowerList = list(set(FollowerList))
print(str(len(FollowerList))+"개의 팔로워 수집")
break
else:
last_height = new_height
continue
driver.get(url)
# MakeFollowerExcel(FollowerList)
return FollowerList
def ScrollFeed(driver, instaId):
url = baseUrl + instaId
driver.get(url)
time.sleep(3)
try:
xpath = '//*[@id="link_profile"]/a'
element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.XPATH, xpath))
)
driver.find_element(By.XPATH,xpath).click()
except :
pass
Login(driver)
try :
xpath = '//*[@id="react-root"]/section/nav/div/div/section/div/div[2]/div[4]/button'
element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.XPATH, xpath))
)
driver.find_element(By.XPATH,xpath).click()
except :
pass
pageString = driver.page_source
soup = BeautifulSoup(pageString, "lxml")
OriginalFollowerNum = soup.select('.g47SY.lOXF2')[1].attrs['title']
OriginalFollowerNum = int(OriginalFollowerNum.replace(",",""))
OriginalPostNum = soup.select('.g47SY.lOXF2')[0].text
OriginalPostNum = int(OriginalPostNum.replace(",",""))
print("팔로워 수는 원래 " + str(OriginalFollowerNum)+"개 입니다.")
# FollowerList = GetFollowers(driver,instaId)
time.sleep(3)
reallink = [] # 게시물 url 리스트
pageString = driver.page_source
soup = BeautifulSoup(pageString, "lxml")
print("포스트 갯수는 원래 " + str(OriginalPostNum)+"개 입니다.")
# EX_FeedElementSet = set()
OnScroll = False
while True:
try :
xpath = '//*[@id="react-root"]/section/nav/div/div/section/div/div[2]/div[4]/button'
element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.XPATH, xpath))
)
driver.find_element(By.XPATH,xpath).click()
except :
pass
print('스크롤 하면서 페이지의 끝을 찾는 중입니다.')
pageString = driver.page_source
bsObj = BeautifulSoup(pageString, "lxml")
if OnScroll == False :
FeedElementList = bsObj.select(".v1Nh3.kIKUG._bz0w a")
for EachFeed in FeedElementList :
reallink.append(EachFeed.attrs['href'])
OnScroll = True
else :
FeedElementList = bsObj.select(".v1Nh3.kIKUG._bz0w a")
ListSize = len(FeedElementList)
if ListSize > 12 :
NewStartPoint = ListSize-12
FeedElementList = FeedElementList[NewStartPoint:]
for EachFeed in FeedElementList :
reallink.append(EachFeed.attrs['href'])
last_height = driver.execute_script("return document.body.scrollHeight")
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
sleep(SCROLL_PAUSE_TIME)
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
reallink = list(set(reallink))
if(len(reallink) != OriginalPostNum):
print("현재 모은 url 개수는 "+ str(len(reallink)))
while new_height == last_height :
print("last_height:"+str(last_height)+"/new_height:"+str(new_height))
print('게시글 개수만큼 크롤링되지 않아서 무한 로딩중...!')
last_height = driver.execute_script("return document.body.scrollHeight")
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
sleep(SCROLL_PAUSE_TIME)
new_height = driver.execute_script("return document.body.scrollHeight")
else :
break
else:
last_height = new_height
continue
reallinknum = len(reallink)
print("총"+str(reallinknum)+"개의 데이터.")
#게시물 url 목록을 txt로 저장
f = open('urllist.txt', 'w')
f.write(str(reallink))
f.close()
print("txt저장성공")
# Logout(driver)
return reallink
def Logout(driver):
driver.find_element(By.XPATH,'//*[@id="react-root"]/section/nav/div/div/div[2]/div/div/div[5]/a').click()
sleep(2)
xpath = '//*[@id="react-root"]/section/nav[1]/div/header/div/div[1]/button'
element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.XPATH, xpath))
)
driver.find_element(By.XPATH,xpath).click()
xpath = '//*[@id="react-root"]/section/nav[1]/div/section/div[3]/div/div[4]/div/div/a'
element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.XPATH, xpath))
)
driver.find_element(By.XPATH,xpath).click()
xpath = '/html/body/div[4]/div/div/div[2]/button[1]'
element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.XPATH, xpath))
)
driver.find_element(By.XPATH,xpath).click()
sleep(2)
def Scroll_SomeFeed(driver, brand):
rtndata = []
instaId = brand['instaID']
dataFeedNum = brand['FeedNum']
url = baseUrl + instaId
driver.get(url)
time.sleep(3)
try :
xpath = '//*[@id="react-root"]/section/nav/div/div/section/div/div[2]/div[4]/button'
element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.XPATH, xpath))
)
driver.find_element(By.XPATH,xpath).click()
except :
pass
pageString = driver.page_source
soup = BeautifulSoup(pageString, "lxml")
OriginalFollowerNum = soup.select('.g47SY.lOXF2')[1].attrs['title']
OriginalFollowerNum = int(OriginalFollowerNum.replace(",",""))
OriginalPostNum = soup.select('.g47SY.lOXF2')[0].text
OriginalPostNum = int(OriginalPostNum.replace(",",""))
print("팔로워 수는 원래 " + str(OriginalFollowerNum)+"개 입니다.")
# 팔로워 수 저장
brand['FollowerNum'] = OriginalFollowerNum
NewFeedNum = OriginalPostNum-dataFeedNum
# 처음 크롤링할 경우 20개의 게시물만 크롤링할 것이므로.
if dataFeedNum == 0:
if(OriginalPostNum<20):
NewFeedNum = OriginalPostNum
else:
NewFeedNum = 20
# 새로운 게시물 수 저장
ReviewStatus = brand['ReviewStatus']
if ReviewStatus == 'N':
brand['NewFeedNum'] += NewFeedNum
else:
brand['NewFeedNum'] = NewFeedNum
# 게시물 수 저장
brand['FeedNum'] = OriginalPostNum
if NewFeedNum == 0:
rtndata = [brand,[]]
return rtndata
# FollowerList = GetFollowers(driver,instaId)
time.sleep(3)
reallink = [] # 게시물 url 리스트
pageString = driver.page_source
soup = BeautifulSoup(pageString, "lxml")
print("포스트 갯수는 원래 " + str(OriginalPostNum)+"개 입니다.")
try :
xpath = '//*[@id="react-root"]/section/nav/div/div/section/div/div[2]/div[4]/button'
element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.XPATH, xpath))
)
driver.find_element(By.XPATH,xpath).click()
except :
pass
try:
xpath = '//*[@id="react-root"]/section/main/div/div[4]/div[1]/div/button'
driver.find_element(By.XPATH,xpath).click()
sleep(2)
except:
pass
pageString = driver.page_source
bsObj = BeautifulSoup(pageString, "lxml")
FeedElementList = bsObj.select(".v1Nh3.kIKUG._bz0w a")
while len(FeedElementList) < NewFeedNum:
try :
xpath= '//*[@id="react-root"]/section/main/div/div[3]/div[1]/div/button'
driver.find_element(By.XPATH,xpath).click()
sleep(1)
except:
pass
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
sleep(SCROLL_PAUSE_TIME)
new_height = driver.execute_script("return document.body.scrollHeight")
pageString = driver.page_source
bsObj = BeautifulSoup(pageString, "lxml")
FeedElementList = bsObj.select(".v1Nh3.kIKUG._bz0w a")
cnt = 0
for EachFeed in FeedElementList :
#상위 게시글 20개만 크롤링 할 것임
if cnt == NewFeedNum :
break
reallink.append(EachFeed.attrs['href'])
cnt += 1
reallinknum = len(reallink)
print("총"+str(reallinknum)+"개의 데이터.")
rtndata = [brand,reallink]
return rtndata
| 36.429967
| 109
| 0.630097
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,386
| 0.288465
|
18bca4227b43e8db0e3b74e9fc679d7c822dc33c
| 358
|
py
|
Python
|
option.py
|
ujiro99/python_cli_sample
|
34e39e05722ebba3b539861b6567aeecb93a818f
|
[
"MIT"
] | null | null | null |
option.py
|
ujiro99/python_cli_sample
|
34e39e05722ebba3b539861b6567aeecb93a818f
|
[
"MIT"
] | null | null | null |
option.py
|
ujiro99/python_cli_sample
|
34e39e05722ebba3b539861b6567aeecb93a818f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import click
@click.command()
@click.option('-n', '--name', default='World', help='Greeting partner')
def cmd(name):
"""
Show greeting message.
:type name: str
"""
msg = 'Hello, {name}!'.format(name=name)
click.echo(msg)
def main():
cmd()
if __name__ == '__main__':
main()
| 14.916667
| 71
| 0.572626
| 0
| 0
| 0
| 0
| 231
| 0.645251
| 0
| 0
| 165
| 0.460894
|
18bcc995a7294c17a7102d9ddff9a88a24d958f1
| 27
|
py
|
Python
|
itsnp/__init__.py
|
CaffeineDuck/itsnp-discord-bot
|
73d8fddc282c0fbc3cdaef81eef3efa9dccacfd8
|
[
"MIT"
] | null | null | null |
itsnp/__init__.py
|
CaffeineDuck/itsnp-discord-bot
|
73d8fddc282c0fbc3cdaef81eef3efa9dccacfd8
|
[
"MIT"
] | null | null | null |
itsnp/__init__.py
|
CaffeineDuck/itsnp-discord-bot
|
73d8fddc282c0fbc3cdaef81eef3efa9dccacfd8
|
[
"MIT"
] | null | null | null |
from .bot import ItsnpBot
| 13.5
| 26
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
18be667bef982c766e8e51b2444d4138ae324879
| 7,182
|
py
|
Python
|
mojo/public/tools/bindings/pylib/parse/mojo_lexer_unittest.py
|
Acidburn0zzz/chromium-1
|
4c08f442d2588a2c7cfaa117a55bd87d2ac32f9a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
mojo/public/tools/bindings/pylib/parse/mojo_lexer_unittest.py
|
Acidburn0zzz/chromium-1
|
4c08f442d2588a2c7cfaa117a55bd87d2ac32f9a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
mojo/public/tools/bindings/pylib/parse/mojo_lexer_unittest.py
|
Acidburn0zzz/chromium-1
|
4c08f442d2588a2c7cfaa117a55bd87d2ac32f9a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import mojo_lexer
import unittest
# Try to load the ply module, if not, then assume it is in the third_party
# directory.
try:
# Disable lint check which fails to find the ply module.
# pylint: disable=F0401
from ply import lex
except ImportError:
# This assumes this file is in src/mojo/public/tools/bindings/pylib/parse/.
module_path, module_name = os.path.split(__file__)
third_party = os.path.join(module_path, os.pardir, os.pardir, os.pardir,
os.pardir, os.pardir, os.pardir, 'third_party')
sys.path.append(third_party)
# pylint: disable=F0401
from ply import lex
# This (monkey-patching LexToken to make comparison value-based) is evil, but
# we'll do it anyway. (I'm pretty sure ply's lexer never cares about comparing
# for object identity.)
def _LexTokenEq(self, other):
return self.type == other.type and self.value == other.value and \
self.lineno == other.lineno and self.lexpos == other.lexpos
setattr(lex.LexToken, '__eq__', _LexTokenEq)
def _MakeLexToken(type, value, lineno=1, lexpos=0):
"""Makes a LexToken with the given parameters. (Note that lineno is 1-based,
but lexpos is 0-based.)"""
rv = lex.LexToken()
rv.type, rv.value, rv.lineno, rv.lexpos = type, value, lineno, lexpos
return rv
def _MakeLexTokenForKeyword(keyword, **kwargs):
"""Makes a LexToken for the given keyword."""
return _MakeLexToken(keyword.upper(), keyword.lower(), **kwargs)
class MojoLexerTest(unittest.TestCase):
"""Tests mojo_lexer (in particular, Lexer)."""
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
# Clone all lexer instances from this one, since making a lexer is slow.
self._zygote_lexer = lex.lex(mojo_lexer.Lexer("my_file.mojom"))
def testValidSingleKeywords(self):
"""Tests valid, single keywords."""
self.assertEquals(self._SingleTokenForInput("handle"),
_MakeLexTokenForKeyword("handle"))
self.assertEquals(self._SingleTokenForInput("data_pipe_consumer"),
_MakeLexTokenForKeyword("data_pipe_consumer"))
self.assertEquals(self._SingleTokenForInput("data_pipe_producer"),
_MakeLexTokenForKeyword("data_pipe_producer"))
self.assertEquals(self._SingleTokenForInput("message_pipe"),
_MakeLexTokenForKeyword("message_pipe"))
self.assertEquals(self._SingleTokenForInput("import"),
_MakeLexTokenForKeyword("import"))
self.assertEquals(self._SingleTokenForInput("module"),
_MakeLexTokenForKeyword("module"))
self.assertEquals(self._SingleTokenForInput("struct"),
_MakeLexTokenForKeyword("struct"))
self.assertEquals(self._SingleTokenForInput("interface"),
_MakeLexTokenForKeyword("interface"))
self.assertEquals(self._SingleTokenForInput("enum"),
_MakeLexTokenForKeyword("enum"))
def testValidSingleTokens(self):
"""Tests valid, single (non-keyword) tokens."""
self.assertEquals(self._SingleTokenForInput("asdf"),
_MakeLexToken("NAME", "asdf"))
self.assertEquals(self._SingleTokenForInput("@123"),
_MakeLexToken("ORDINAL", "@123"))
self.assertEquals(self._SingleTokenForInput("456"),
_MakeLexToken("INT_CONST_DEC", "456"))
self.assertEquals(self._SingleTokenForInput("0765"),
_MakeLexToken("INT_CONST_OCT", "0765"))
self.assertEquals(self._SingleTokenForInput("0x01aB2eF3"),
_MakeLexToken("INT_CONST_HEX", "0x01aB2eF3"))
self.assertEquals(self._SingleTokenForInput("123.456"),
_MakeLexToken("FLOAT_CONST", "123.456"))
self.assertEquals(self._SingleTokenForInput("'x'"),
_MakeLexToken("CHAR_CONST", "'x'"))
self.assertEquals(self._SingleTokenForInput("\"hello\""),
_MakeLexToken("STRING_LITERAL", "\"hello\""))
self.assertEquals(self._SingleTokenForInput("+"),
_MakeLexToken("PLUS", "+"))
self.assertEquals(self._SingleTokenForInput("-"),
_MakeLexToken("MINUS", "-"))
self.assertEquals(self._SingleTokenForInput("*"),
_MakeLexToken("TIMES", "*"))
self.assertEquals(self._SingleTokenForInput("/"),
_MakeLexToken("DIVIDE", "/"))
self.assertEquals(self._SingleTokenForInput("%"),
_MakeLexToken("MOD", "%"))
self.assertEquals(self._SingleTokenForInput("|"),
_MakeLexToken("OR", "|"))
self.assertEquals(self._SingleTokenForInput("~"),
_MakeLexToken("NOT", "~"))
self.assertEquals(self._SingleTokenForInput("^"),
_MakeLexToken("XOR", "^"))
self.assertEquals(self._SingleTokenForInput("<<"),
_MakeLexToken("LSHIFT", "<<"))
self.assertEquals(self._SingleTokenForInput(">>"),
_MakeLexToken("RSHIFT", ">>"))
self.assertEquals(self._SingleTokenForInput("="),
_MakeLexToken("EQUALS", "="))
self.assertEquals(self._SingleTokenForInput("=>"),
_MakeLexToken("RESPONSE", "=>"))
self.assertEquals(self._SingleTokenForInput("("),
_MakeLexToken("LPAREN", "("))
self.assertEquals(self._SingleTokenForInput(")"),
_MakeLexToken("RPAREN", ")"))
self.assertEquals(self._SingleTokenForInput("["),
_MakeLexToken("LBRACKET", "["))
self.assertEquals(self._SingleTokenForInput("]"),
_MakeLexToken("RBRACKET", "]"))
self.assertEquals(self._SingleTokenForInput("{"),
_MakeLexToken("LBRACE", "{"))
self.assertEquals(self._SingleTokenForInput("}"),
_MakeLexToken("RBRACE", "}"))
self.assertEquals(self._SingleTokenForInput("<"),
_MakeLexToken("LANGLE", "<"))
self.assertEquals(self._SingleTokenForInput(">"),
_MakeLexToken("RANGLE", ">"))
self.assertEquals(self._SingleTokenForInput(";"),
_MakeLexToken("SEMI", ";"))
self.assertEquals(self._SingleTokenForInput(","),
_MakeLexToken("COMMA", ","))
self.assertEquals(self._SingleTokenForInput("."),
_MakeLexToken("DOT", "."))
def _TokensForInput(self, input):
"""Gets a list of tokens for the given input string."""
lexer = self._zygote_lexer.clone()
lexer.input(input)
rv = []
while True:
tok = lexer.token()
if not tok:
return rv
rv.append(tok)
def _SingleTokenForInput(self, input):
"""Gets the single token for the given input string. (Raises an exception if
the input string does not result in exactly one token.)"""
toks = self._TokensForInput(input)
assert len(toks) == 1
return toks[0]
if __name__ == "__main__":
unittest.main()
| 44.608696
| 80
| 0.632693
| 5,523
| 0.769006
| 0
| 0
| 0
| 0
| 0
| 0
| 1,928
| 0.268449
|
18c2d8a09f275424cdb15f2a256534524b3fa369
| 59
|
py
|
Python
|
glue/admin.py
|
Valchris/AngularJS-Django-Template
|
10c90087984dcd9e6d29380eb4380824e65bcecf
|
[
"MIT"
] | 1
|
2015-07-29T04:28:26.000Z
|
2015-07-29T04:28:26.000Z
|
glue/admin.py
|
Valchris/AngularJS-Django-Template
|
10c90087984dcd9e6d29380eb4380824e65bcecf
|
[
"MIT"
] | null | null | null |
glue/admin.py
|
Valchris/AngularJS-Django-Template
|
10c90087984dcd9e6d29380eb4380824e65bcecf
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from glue.models import *
| 19.666667
| 32
| 0.813559
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
18c72218e5a46e6e788b195ce2de8f4c86c23159
| 444
|
py
|
Python
|
qmt/geometry/geo_data_base.py
|
basnijholt/qmt
|
68f781ff489fd9f5ddc817dacfc8ff3a8fdeb2b4
|
[
"MIT"
] | null | null | null |
qmt/geometry/geo_data_base.py
|
basnijholt/qmt
|
68f781ff489fd9f5ddc817dacfc8ff3a8fdeb2b4
|
[
"MIT"
] | null | null | null |
qmt/geometry/geo_data_base.py
|
basnijholt/qmt
|
68f781ff489fd9f5ddc817dacfc8ff3a8fdeb2b4
|
[
"MIT"
] | null | null | null |
from typing import Any, Dict, List
from qmt.infrastructure import WithParts
class GeoData(WithParts):
def __init__(self, lunit: str = "nm"):
"""Base class for geometry data objects.
Parameters
----------
lunit : str, optional
Length unit for this geometry, by default "nm"
"""
self.lunit: str = lunit
self.build_order: List[str] = []
super().__init__()
| 26.117647
| 58
| 0.572072
| 365
| 0.822072
| 0
| 0
| 0
| 0
| 0
| 0
| 192
| 0.432432
|
18c9fc293f4846928246ba71ec2d917b2627fc7c
| 20,166
|
py
|
Python
|
ANSIBLE/library/eos_routemap.py
|
ayosef/pynet_test
|
1b750a62467fbbcb2436c035ce49d41b435f45ba
|
[
"Apache-2.0"
] | null | null | null |
ANSIBLE/library/eos_routemap.py
|
ayosef/pynet_test
|
1b750a62467fbbcb2436c035ce49d41b435f45ba
|
[
"Apache-2.0"
] | null | null | null |
ANSIBLE/library/eos_routemap.py
|
ayosef/pynet_test
|
1b750a62467fbbcb2436c035ce49d41b435f45ba
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright (c) 2015, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
DOCUMENTATION = """
---
module: eos_routemap
short_description: Manage EOS routemap resources
description:
- This module will manage routemap entries on EOS nodes
version_added: 1.2.0
category: Route Policy
author: Arista EOS+
requirements:
- Arista EOS 4.13.7M or later with command API enabled
- Python Client for eAPI 0.4.0 or later
notes:
- All configuration is idempotent unless otherwise specified
- Supports eos metaparameters for using the eAPI transport
- Supports stateful resource configuration.
options:
name:
description:
- The name of the routemap to manage.
required: true
default: null
choices: []
aliases: []
version_added: 1.2.0
action:
description:
- The action associated with the routemap name.
required: true
default: 'permit'
choices: ['permit','deny']
aliases: []
version_added: 1.2.0
seqno:
description:
- The sequence number of the rule that this entry corresponds to.
required: true
default: null
choices: []
aliases: []
version_added: 1.2.0
description:
description:
- The description for this routemap entry.
required: false
default: null
choices: []
aliases: []
version_added: 1.2.0
match:
description:
- The list of match statements that define the routemap entry. The
match statements should be a comma separated list of match statements
without the word match at the beginning of the string. See the example
below for more information.
required: false
default: null
choices: []
aliases: []
version_added: 1.2.0
set:
description:
- The list of set statements that define the routemap entry. The
set statements should be a comma separated list of set statements
without the word set at the beginning of the string. See the example
below for more information.
required: false
default: null
choices: []
aliases: []
version_added: 1.2.0
continue:
description:
- The statement defines the next routemap clause to evaluate.
required: false
default: null
choices: []
aliases: []
version_added: 1.2.0
"""
EXAMPLES = """
- eos_routemap: name=rm1 action=permit seqno=10
description='this is a great routemap'
match='as 50,interface Ethernet2'
set='tag 100,weight 1000'
continue=20
"""
#<<EOS_COMMON_MODULE_START>>
import syslog
import collections
from ansible.module_utils.basic import *
try:
import pyeapi
PYEAPI_AVAILABLE = True
except ImportError:
PYEAPI_AVAILABLE = False
DEFAULT_SYSLOG_PRIORITY = syslog.LOG_NOTICE
DEFAULT_CONNECTION = 'localhost'
TRANSPORTS = ['socket', 'http', 'https', 'http_local']
class EosConnection(object):
__attributes__ = ['username', 'password', 'host', 'transport', 'port']
def __init__(self, **kwargs):
self.connection = kwargs['connection']
self.transport = kwargs.get('transport')
self.username = kwargs.get('username')
self.password = kwargs.get('password')
self.host = kwargs.get('host')
self.port = kwargs.get('port')
self.config = kwargs.get('config')
def connect(self):
if self.config is not None:
pyeapi.load_config(self.config)
config = dict()
if self.connection is not None:
config = pyeapi.config_for(self.connection)
if not config:
msg = 'Connection name "{}" not found'.format(self.connection)
for key in self.__attributes__:
if getattr(self, key) is not None:
config[key] = getattr(self, key)
if 'transport' not in config:
raise ValueError('Connection must define a transport')
connection = pyeapi.client.make_connection(**config)
node = pyeapi.client.Node(connection, **config)
try:
node.enable('show version')
except (pyeapi.eapilib.ConnectionError, pyeapi.eapilib.CommandError):
raise ValueError('unable to connect to {}'.format(node))
return node
class EosAnsibleModule(AnsibleModule):
meta_args = {
'config': dict(),
'username': dict(),
'password': dict(),
'host': dict(),
'connection': dict(default=DEFAULT_CONNECTION),
'transport': dict(choices=TRANSPORTS),
'port': dict(),
'debug': dict(type='bool', default='false'),
'logging': dict(type='bool', default='true')
}
stateful_args = {
'state': dict(default='present', choices=['present', 'absent']),
}
def __init__(self, stateful=True, autorefresh=False, *args, **kwargs):
kwargs['argument_spec'].update(self.meta_args)
self._stateful = stateful
if stateful:
kwargs['argument_spec'].update(self.stateful_args)
## Ok, so in Ansible 2.0,
## AnsibleModule.__init__() sets self.params and then
## calls self.log()
## (through self._log_invocation())
##
## However, self.log() (overridden in EosAnsibleModule)
## references self._logging
## and self._logging (defined in EosAnsibleModule)
## references self.params.
##
## So ... I'm defining self._logging without "or self.params['logging']"
## *before* AnsibleModule.__init__() to avoid a "ref before def".
##
## I verified that this works with Ansible 1.9.4 and 2.0.0.2.
## The only caveat is that the first log message in
## AnsibleModule.__init__() won't be subject to the value of
## self.params['logging'].
self._logging = kwargs.get('logging')
super(EosAnsibleModule, self).__init__(*args, **kwargs)
self.result = dict(changed=False, changes=dict())
self._debug = kwargs.get('debug') or self.boolean(self.params['debug'])
self._logging = kwargs.get('logging') or self.params['logging']
self.log('DEBUG flag is %s' % self._debug)
self.debug('pyeapi_version', self.check_pyeapi())
self.debug('stateful', self._stateful)
self.debug('params', self.params)
self._attributes = self.map_argument_spec()
self.validate()
self._autorefresh = autorefresh
self._node = EosConnection(**self.params)
self._node.connect()
self._node = self.connect()
self._instance = None
self.desired_state = self.params['state'] if self._stateful else None
self.exit_after_flush = kwargs.get('exit_after_flush')
@property
def instance(self):
if self._instance:
return self._instance
func = self.func('instance')
if not func:
self.fail('Module does not support "instance"')
try:
self._instance = func(self)
except Exception as exc:
self.fail('instance[error]: %s' % exc.message)
self.log("called instance: %s" % self._instance)
return self._instance
@property
def attributes(self):
return self._attributes
@property
def node(self):
return self._node
def check_pyeapi(self):
if not PYEAPI_AVAILABLE:
self.fail('Unable to import pyeapi, is it installed?')
return pyeapi.__version__
def map_argument_spec(self):
"""map_argument_spec maps only the module argument spec to attrs
This method will map the argumentspec minus the meta_args to attrs
and return the attrs. This returns a dict object that includes only
the original argspec plus the stateful_args (if self._stateful=True)
Returns:
dict: Returns a dict object that includes the original
argument_spec plus stateful_args with values minus meta_args
"""
keys = set(self.params).difference(self.meta_args)
attrs = dict()
attrs = dict([(k, self.params[k]) for k in self.params if k in keys])
if 'CHECKMODE' in attrs:
del attrs['CHECKMODE']
return attrs
def validate(self):
for key, value in self.attributes.iteritems():
func = self.func('validate_%s' % key)
if func:
self.attributes[key] = func(value)
def create(self):
if not self.check_mode:
func = self.func('create')
if not func:
self.fail('Module must define "create" function')
return self.invoke(func, self)
def remove(self):
if not self.check_mode:
func = self.func('remove')
if not func:
self.fail('Module most define "remove" function')
return self.invoke(func, self)
def flush(self, exit_after_flush=False):
self.exit_after_flush = exit_after_flush
if self.desired_state == 'present' or not self._stateful:
if self.instance.get('state') == 'absent':
changed = self.create()
self.result['changed'] = changed or True
self.refresh()
# After a create command, flush the running-config
# so we get the latest for any other attributes
self._node._running_config = None
changeset = self.attributes.viewitems() - self.instance.viewitems()
if self._debug:
self.debug('desired_state', self.attributes)
self.debug('current_state', self.instance)
changes = self.update(changeset)
if changes:
self.result['changes'] = changes
self.result['changed'] = True
self._attributes.update(changes)
flush = self.func('flush')
if flush:
self.invoke(flush, self)
elif self.desired_state == 'absent' and self._stateful:
if self.instance.get('state') == 'present':
changed = self.remove()
self.result['changed'] = changed or True
elif self._stateful:
if self.desired_state != self.instance.get('state'):
func = self.func(self.desired_state)
changed = self.invoke(func, self)
self.result['changed'] = changed or True
self.refresh()
# By calling self.instance here we trigger another show running-config
# all which causes delay. Only if debug is enabled do we call this
# since it will display the latest state of the object.
if self._debug:
self.result['instance'] = self.instance
if self.exit_after_flush:
self.exit()
def update(self, changeset):
changes = dict()
for key, value in changeset:
if value is not None:
changes[key] = value
func = self.func('set_%s' % key)
if func and not self.check_mode:
try:
self.invoke(func, self)
except Exception as exc:
self.fail(exc.message)
return changes
def connect(self):
if self.params['config']:
pyeapi.load_config(self.params['config'])
config = dict()
if self.params['connection']:
config = pyeapi.config_for(self.params['connection'])
if not config:
msg = 'Connection name "%s" not found' % self.params['connection']
self.fail(msg)
if self.params['username']:
config['username'] = self.params['username']
if self.params['password']:
config['password'] = self.params['password']
if self.params['transport']:
config['transport'] = self.params['transport']
if self.params['port']:
config['port'] = self.params['port']
if self.params['host']:
config['host'] = self.params['host']
if 'transport' not in config:
self.fail('Connection must define a transport')
connection = pyeapi.client.make_connection(**config)
self.log('Creating connection with autorefresh=%s' % self._autorefresh)
node = pyeapi.client.Node(connection, autorefresh=self._autorefresh,
**config)
try:
resp = node.enable('show version')
self.debug('eos_version', resp[0]['result']['version'])
self.debug('eos_model', resp[0]['result']['modelName'])
except (pyeapi.eapilib.ConnectionError, pyeapi.eapilib.CommandError):
self.fail('unable to connect to %s' % node)
else:
self.log('Connected to node %s' % node)
self.debug('node', str(node))
return node
def config(self, commands):
self.result['changed'] = True
if not self.check_mode:
self.node.config(commands)
def api(self, module):
return self.node.api(module)
def func(self, name):
return globals().get(name)
def invoke(self, func, *args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as exc:
self.fail(exc.message)
def invoke_function(self, name, *args, **kwargs):
func = self.func(name)
if func:
return self.invoke(func, *args, **kwargs)
def fail(self, msg):
self.invoke_function('on_fail', self)
self.log('ERROR: %s' % msg, syslog.LOG_ERR)
self.fail_json(msg=msg)
def exit(self):
self.invoke_function('on_exit', self)
self.log('Module completed successfully')
self.exit_json(**self.result)
def refresh(self):
self._instance = None
def debug(self, key, value):
if self._debug:
if 'debug' not in self.result:
self.result['debug'] = dict()
self.result['debug'][key] = value
def log(self, message, log_args=None, priority=None):
if self._logging:
syslog.openlog('ansible-eos')
priority = priority or DEFAULT_SYSLOG_PRIORITY
syslog.syslog(priority, str(message))
@classmethod
def add_state(cls, name):
cls.stateful_args['state']['choices'].append(name)
#<<EOS_COMMON_MODULE_END>>
def instance(module):
""" Returns an instance of Routemaps based on name, action and sequence
number.
"""
name = module.attributes['name']
action = module.attributes['action']
seqno = int(module.attributes['seqno'])
_instance = dict(name=name, action=action, seqno=seqno, state='absent')
try:
result = module.api('routemaps').get(name)[action][seqno]
except:
result = None
if result:
_instance['state'] = 'present'
_instance['seqno'] = str(seqno)
_instance['set'] = ','.join(result['set'])
desc = result['description']
_instance['description'] = desc if desc else ''
_instance['match'] = ','.join(result['match'])
cont = result['continue']
_instance['continue'] = str(cont) if cont else ''
return _instance
def create(module):
name = module.attributes['name']
action = module.attributes['action']
seqno = int(module.attributes['seqno'])
module.log('Invoked create for eos_routemap[%s %s %s]'
% (name, action, seqno))
module.api('routemaps').create(name, action, seqno)
def remove(module):
name = module.attributes['name']
action = module.attributes['action']
seqno = int(module.attributes['seqno'])
module.log('Invoked remove for eos_routemap[%s %s %s]'
% (name, action, seqno))
module.api('routemaps').delete(name, action, seqno)
def set_description(module):
""" Configures the description for the routemap
"""
name = module.attributes['name']
action = module.attributes['action']
seqno = int(module.attributes['seqno'])
value = module.attributes['description']
module.log('Invoked set_description with %s for eos_routemap[%s %s %s]'
% (value, name, action, seqno))
if value == '':
module.node.api('routemaps').set_description(name, action, seqno,
disable=True)
else:
module.node.api('routemaps').set_description(name, action, seqno, value)
def set_continue(module):
""" Configures the continue value for the routemap
"""
name = module.attributes['name']
action = module.attributes['action']
seqno = int(module.attributes['seqno'])
try:
value = int(module.attributes['continue'])
except:
value = None
module.log('Invoked set_continue for eos_routemap[%s %s %s]'
% (name, action, seqno))
if value is None:
module.node.api('routemaps').set_continue(name, action, seqno,
disable=True)
else:
module.node.api('routemaps').set_continue(name, action, seqno, value)
def set_match(module):
""" Configures the match statements for the routemap
"""
name = module.attributes['name']
action = module.attributes['action']
seqno = int(module.attributes['seqno'])
statements = module.attributes['match'].split(',')
module.log('Invoked set_match for eos_routemap[%s %s %s]'
% (name, action, seqno))
module.node.api('routemaps').set_match_statements(name, action, seqno,
statements)
def set_set(module):
""" Configures the set statements for the routemap
"""
name = module.attributes['name']
action = module.attributes['action']
seqno = int(module.attributes['seqno'])
statements = module.attributes['set'].split(',')
module.log('Invoked set_set for eos_routemap[%s %s %s]'
% (name, action, seqno))
module.node.api('routemaps').set_set_statements(name, action, seqno,
statements)
def main():
""" The main module routine called when the module is run by Ansible
"""
argument_spec = dict(
name=dict(required=True),
action=dict(default='permit', choices=['permit', 'deny']),
seqno=dict(required=True),
description=dict(),
match=dict(),
set=dict()
)
argument_spec['continue'] = dict()
module = EosAnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
module.flush(True)
main()
| 33.1133
| 82
| 0.611475
| 11,540
| 0.57225
| 0
| 0
| 670
| 0.033224
| 0
| 0
| 8,028
| 0.398096
|
18cbef6584ee81c511138c2578efbf19d3e08e5c
| 890
|
py
|
Python
|
setup.py
|
colinfrei/furystoolbox
|
2a8613393a46ad6ae2ad2c2fa86fd255fea96796
|
[
"MIT"
] | 1
|
2020-01-03T00:32:35.000Z
|
2020-01-03T00:32:35.000Z
|
setup.py
|
colinfrei/furystoolbox
|
2a8613393a46ad6ae2ad2c2fa86fd255fea96796
|
[
"MIT"
] | 1
|
2020-02-08T08:54:31.000Z
|
2020-02-08T09:31:30.000Z
|
setup.py
|
colinfrei/furystoolbox
|
2a8613393a46ad6ae2ad2c2fa86fd255fea96796
|
[
"MIT"
] | 1
|
2020-02-08T06:54:29.000Z
|
2020-02-08T06:54:29.000Z
|
"""Setup configuration."""
import setuptools
from furystoolbox import __version__
with open("README.md", "r") as fh:
LONG = fh.read()
REQUIRES = ['click>=7.0',
'requests>=2.21.0',
'PyGithub>=1.43.4']
setuptools.setup(
name="furystoolbox",
version=__version__,
author="Joakim Sorensen",
author_email="ludeeus@gmail.com",
description="A collection of tools.",
long_description=LONG,
long_description_content_type="text/markdown",
url="https://github.com/ludeeus/furystoolbox",
install_requires=REQUIRES,
packages=setuptools.find_packages(),
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
entry_points={
'console_scripts': [
'fury = furystoolbox.cli.cli:CLI'
]
}
)
| 26.176471
| 50
| 0.62809
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 381
| 0.42809
|
18cca0ce2ddedc77fe6c967bfef7de9a4fb88942
| 2,120
|
py
|
Python
|
pythran/tests/cases/sobelfilter.py
|
SylvainCorlay/pythran
|
908ec070d837baf77d828d01c3e35e2f4bfa2bfa
|
[
"BSD-3-Clause"
] | 1
|
2018-03-24T00:33:03.000Z
|
2018-03-24T00:33:03.000Z
|
pythran/tests/cases/sobelfilter.py
|
SylvainCorlay/pythran
|
908ec070d837baf77d828d01c3e35e2f4bfa2bfa
|
[
"BSD-3-Clause"
] | null | null | null |
pythran/tests/cases/sobelfilter.py
|
SylvainCorlay/pythran
|
908ec070d837baf77d828d01c3e35e2f4bfa2bfa
|
[
"BSD-3-Clause"
] | 1
|
2017-03-12T20:32:36.000Z
|
2017-03-12T20:32:36.000Z
|
#skip.runas import Image; im = Image.open("Scribus.gif"); image_list = list(im.getdata()); cols, rows = im.size; res = range(len(image_list)); sobelFilter(image_list, res, cols, rows)
#runas cols = 100; rows = 100 ;image_list=[x%10+y%20 for x in xrange(cols) for y in xrange(rows)]; sobelFilter(image_list, cols, rows)
#bench cols = 1000; rows = 500 ;image_list=[x%10+y%20 for x in xrange(cols) for y in xrange(rows)]; sobelFilter(image_list, cols, rows)
#pythran export sobelFilter(int list, int, int)
def sobelFilter(original_image, cols, rows):
edge_image = range(len(original_image))
for i in xrange(rows):
edge_image[i * cols] = 255
edge_image[((i + 1) * cols) - 1] = 255
for i in xrange(1, cols - 1):
edge_image[i] = 255
edge_image[i + ((rows - 1) * cols)] = 255
for iy in xrange(1, rows - 1):
for ix in xrange(1, cols - 1):
sum_x = 0
sum_y = 0
sum = 0
#x gradient approximation
sum_x += original_image[ix - 1 + (iy - 1) * cols] * -1
sum_x += original_image[ix + (iy - 1) * cols] * -2
sum_x += original_image[ix + 1 + (iy - 1) * cols] * -1
sum_x += original_image[ix - 1 + (iy + 1) * cols] * 1
sum_x += original_image[ix + (iy + 1) * cols] * 2
sum_x += original_image[ix + 1 + (iy + 1) * cols] * 1
sum_x = min(255, max(0, sum_x))
#y gradient approximatio
sum_y += original_image[ix - 1 + (iy - 1) * cols] * 1
sum_y += original_image[ix + 1 + (iy - 1) * cols] * -1
sum_y += original_image[ix - 1 + (iy) * cols] * 2
sum_y += original_image[ix + 1 + (iy) * cols] * -2
sum_y += original_image[ix - 1 + (iy + 1) * cols] * 1
sum_y += original_image[ix + 1 + (iy + 1) * cols] * -1
sum_y = min(255, max(0, sum_y))
#GRADIENT MAGNITUDE APPROXIMATION
sum = abs(sum_x) + abs(sum_y)
#make edges black and background white
edge_image[ix + iy * cols] = 255 - (255 & sum)
return edge_image
| 49.302326
| 183
| 0.544811
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 619
| 0.291981
|
18cd66ae12672c4f05fb7afeb5ea83419646d0b9
| 7,110
|
py
|
Python
|
occam_utils/occam_datasets.py
|
dschinagl/occam
|
f001cc3a0bf56687dc4c4bb79385f5d010cdd43e
|
[
"BSD-3-Clause"
] | 1
|
2022-03-29T07:05:23.000Z
|
2022-03-29T07:05:23.000Z
|
occam_utils/occam_datasets.py
|
dschinagl/occam
|
f001cc3a0bf56687dc4c4bb79385f5d010cdd43e
|
[
"BSD-3-Clause"
] | null | null | null |
occam_utils/occam_datasets.py
|
dschinagl/occam
|
f001cc3a0bf56687dc4c4bb79385f5d010cdd43e
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import torch
from spconv.pytorch.utils import PointToVoxel
from scipy.spatial.transform import Rotation
from pcdet.datasets import DatasetTemplate
class BaseDataset(DatasetTemplate):
"""
OpenPCDet dataset to load and preprocess the point cloud
"""
def __init__(self, data_config, class_names, occam_config):
"""
Parameters
----------
data_config : EasyDict
dataset cfg including data preprocessing properties (OpenPCDet)
class_names :
list of class names (OpenPCDet)
occam_config: EasyDict
sampling properties for attribution map generation, see cfg file
"""
super().__init__(dataset_cfg=data_config, class_names=class_names,
training=False)
self.occam_config = occam_config
def load_and_preprocess_pcl(self, source_file_path):
"""
load given point cloud file and preprocess data according OpenPCDet cfg
Parameters
----------
source_file_path : str
path to point cloud to analyze (bin or npy)
Returns
-------
pcl : ndarray (N, 4)
preprocessed point cloud (x, y, z, intensity)
"""
if source_file_path.split('.')[-1] == 'bin':
points = np.fromfile(source_file_path, dtype=np.float32)
points = points.reshape(-1, 4)
elif source_file_path.split('.')[-1] == 'npy':
points = np.load(source_file_path)
else:
raise NotImplementedError
# FOV crop is usually done using the image
if self.occam_config.FOV_CROP:
angles = np.abs(np.degrees(np.arctan2(points[:, 1], points[:, 0])))
mask = angles <= self.occam_config.FOV_ANGLE
points = points[mask, :]
input_dict = {
'points': points
}
data_dict = self.prepare_data(data_dict=input_dict)
pcl = data_dict['points']
return pcl
class OccamInferenceDataset(DatasetTemplate):
"""
OpenPCDet dataset for occam inference; in each iteration a sub-sampled
point cloud according occam config is generated
"""
def __init__(self, data_config, class_names, occam_config, pcl, nr_it, logger):
"""
Parameters
----------
data_config : EasyDict
dataset cfg including data preprocessing properties (OpenPCDet)
class_names :
list of class names (OpenPCDet)
occam_config: EasyDict
sampling properties for attribution map generation, see cfg file
pcl : ndarray (N, 4)
preprocessed full point cloud
nr_it : int
number of sub-sampling iterations
logger : Logger
"""
super().__init__(
dataset_cfg=data_config, class_names=class_names, training=False,
root_path=None, logger=logger
)
self.occam_config = occam_config
self.pcl = pcl
self.logger = logger
self.nr_it = nr_it
self.sampling_rand_rot = self.occam_config.SAMPLING.RANDOM_ROT
self.sampling_vx_size = np.array(self.occam_config.SAMPLING.VOXEL_SIZE)
self.lbda = self.occam_config.SAMPLING.LAMBDA # see paper
self.sampling_density_coeff = np.array(
self.occam_config.SAMPLING.DENSITY_DISTR_COEFF)
self.sampling_range = self.get_sampling_range(
rand_rot=self.sampling_rand_rot,
pcl=self.pcl,
vx_size=self.sampling_vx_size
)
self.voxel_generator = PointToVoxel(
vsize_xyz=list(self.sampling_vx_size),
coors_range_xyz=list(self.sampling_range),
num_point_features=3,
max_num_points_per_voxel=self.occam_config.SAMPLING.MAX_PTS_PER_VOXEL,
max_num_voxels=self.occam_config.SAMPLING.MAX_VOXELS
)
def get_sampling_range(self, rand_rot, pcl, vx_size):
"""
compute min/max sampling range for given random rotation
Parameters
----------
rand_rot : float
max random rotation before sampling (+/-) in degrees
pcl : ndarray (N, 4)
full point cloud
vx_size : ndarray (3)
voxel size for sampling in x, y, z
Returns
-------
sampling_range : ndarray (6)
min/max sampling range for given rotation
"""
rotmat_pos = Rotation.from_rotvec([0, 0, rand_rot], degrees=True)
rotmat_neg = Rotation.from_rotvec([0, 0, -rand_rot], degrees=True)
rot_pts = np.concatenate(
(np.matmul(rotmat_pos.as_matrix(), pcl[:, :3].T),
np.matmul(rotmat_neg.as_matrix(), pcl[:, :3].T)), axis=1)
min_grid = np.floor(np.min(rot_pts, axis=1) / vx_size) * vx_size - vx_size
max_grid = np.ceil(np.max(rot_pts, axis=1) / vx_size) * vx_size + vx_size
sampling_range = np.concatenate((min_grid, max_grid))
return sampling_range
def __len__(self):
return self.nr_it
def __getitem__(self, index):
if index == self.nr_it:
raise IndexError
# randomly rotate and translate full pcl
rand_transl = np.random.rand(1, 3) * (self.sampling_vx_size[None, :])
rand_transl -= self.sampling_vx_size[None, :] / 2
rand_rot_ = np.random.rand(1) * self.sampling_rand_rot * 2 \
- self.sampling_rand_rot
rand_rot_mat = Rotation.from_rotvec([0, 0, rand_rot_[0]], degrees=True)
rand_rot_mat = rand_rot_mat.as_matrix()
rand_rot_pcl = np.matmul(rand_rot_mat, self.pcl[:, :3].T).T
rand_rot_transl_pcl = rand_rot_pcl + rand_transl
rand_rot_transl_pcl = np.ascontiguousarray(rand_rot_transl_pcl)
# voxelixe full pcl
_, vx_coord, _, pt_vx_id = self.voxel_generator.generate_voxel_with_id(
torch.from_numpy(rand_rot_transl_pcl))
vx_coord, pt_vx_id = vx_coord.numpy(), pt_vx_id.numpy()
vx_coord = vx_coord[:, [2, 1, 0]]
# compute voxel center in original pcl
vx_orig_coord = vx_coord * self.sampling_vx_size[None, :]
vx_orig_coord += self.sampling_range[:3][None, :]
vx_orig_coord += self.sampling_vx_size[None, :] / 2
vx_orig_coord -= rand_transl
vx_orig_coord = np.matmul(np.linalg.inv(rand_rot_mat), vx_orig_coord.T).T
vx_dist = np.linalg.norm(vx_orig_coord, axis=1)
vx_keep_prob = self.lbda * (
np.power(vx_dist, 2) * self.sampling_density_coeff[0]
+ vx_dist * self.sampling_density_coeff[1]
+ self.sampling_density_coeff[2])
vx_keep_ids = np.where(np.random.rand(vx_keep_prob.shape[0]) < vx_keep_prob)[0]
pt_keep_mask = np.in1d(pt_vx_id, vx_keep_ids)
input_dict = {
'points': self.pcl[pt_keep_mask, :],
'mask': pt_keep_mask
}
data_dict = self.prepare_data(data_dict=input_dict)
return data_dict
| 35.909091
| 87
| 0.610689
| 6,936
| 0.975527
| 0
| 0
| 0
| 0
| 0
| 0
| 2,112
| 0.297046
|
18ceea770cb8f269d967cd89240a6533d6cf62a5
| 5,840
|
py
|
Python
|
utils/calibration_module.py
|
choushunn/holography_test
|
79100f8b955683afd47e63e2762d6945d6b14e34
|
[
"CC-BY-3.0"
] | null | null | null |
utils/calibration_module.py
|
choushunn/holography_test
|
79100f8b955683afd47e63e2762d6945d6b14e34
|
[
"CC-BY-3.0"
] | null | null | null |
utils/calibration_module.py
|
choushunn/holography_test
|
79100f8b955683afd47e63e2762d6945d6b14e34
|
[
"CC-BY-3.0"
] | 1
|
2021-12-24T04:18:22.000Z
|
2021-12-24T04:18:22.000Z
|
"""
This is the script containing the calibration module, basically calculating homography matrix.
This code and data is released under the Creative Commons Attribution-NonCommercial 4.0 International license (CC BY-NC.) In a nutshell:
# The license is only for non-commercial use (commercial licenses can be obtained from Stanford).
# The material is provided as-is, with no warranties whatsoever.
# If you publish any code, data, or scientific work based on this, please cite our work.
Technical Paper:
Y. Peng, S. Choi, N. Padmanaban, G. Wetzstein. Neural Holography with Camera-in-the-loop Training. ACM TOG (SIGGRAPH Asia), 2020.
"""
import cv2
import matplotlib.pyplot as plt
import numpy as np
def circle_detect(captured_img, num_circles, spacing, pad_pixels=(0., 0.), show_preview=True):
"""
Detects the circle of a circle board pattern
:param captured_img: captured image
:param num_circles: a tuple of integers, (num_circle_x, num_circle_y)
:param spacing: a tuple of integers, in pixels, (space between circles in x, space btw circs in y direction)
:param show_preview: boolean, default True
:param pad_pixels: coordinate of the left top corner of warped image.
Assuming pad this amount of pixels on the other side.
:return: a tuple, (found_dots, H)
found_dots: boolean, indicating success of calibration
H: a 3x3 homography matrix (numpy)
"""
# Binarization
# org_copy = org.copy() # Otherwise, we write on the original image!
img = (captured_img.copy() * 255).astype(np.uint8)
if len(img.shape) > 2:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.medianBlur(img, 15)
img_gray = img.copy()
img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 121, 0)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15, 15))
img = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
img = 255 - img
# Blob detection
params = cv2.SimpleBlobDetector_Params()
# Change thresholds
params.filterByColor = True
params.minThreshold = 128
# Filter by Area.
params.filterByArea = True
params.minArea = 50
# Filter by Circularity
params.filterByCircularity = True
params.minCircularity = 0.785
# Filter by Convexity
params.filterByConvexity = True
params.minConvexity = 0.87
# Filter by Inertia
params.filterByInertia = False
params.minInertiaRatio = 0.01
detector = cv2.SimpleBlobDetector_create(params)
# Detecting keypoints
# this is redundant for what comes next, but gives us access to the detected dots for debug
keypoints = detector.detect(img)
found_dots, centers = cv2.findCirclesGrid(img, num_circles,
blobDetector=detector, flags=cv2.CALIB_CB_SYMMETRIC_GRID)
# Drawing the keypoints
cv2.drawChessboardCorners(captured_img, num_circles, centers, found_dots)
img_gray = cv2.drawKeypoints(img_gray, keypoints, np.array([]), (0, 255, 0),
cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# Find transformation
H = np.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]], dtype=np.float32)
if found_dots:
# Generate reference points to compute the homography
ref_pts = np.zeros((num_circles[0] * num_circles[1], 1, 2), np.float32)
pos = 0
for i in range(0, num_circles[1]):
for j in range(0, num_circles[0]):
ref_pts[pos, 0, :] = spacing * np.array([j, i]) + np.array(pad_pixels)
pos += 1
H, mask = cv2.findHomography(centers, ref_pts, cv2.RANSAC, 1)
if show_preview:
dsize = [int((num_circs - 1) * space + 2 * pad_pixs)
for num_circs, space, pad_pixs in zip(num_circles, spacing, pad_pixels)]
captured_img_warp = cv2.warpPerspective(captured_img, H, tuple(dsize))
if show_preview:
fig = plt.figure()
ax = fig.add_subplot(223)
ax.imshow(img_gray, cmap='gray')
ax2 = fig.add_subplot(221)
ax2.imshow(img, cmap='gray')
ax3 = fig.add_subplot(222)
ax3.imshow(captured_img, cmap='gray')
if found_dots:
ax4 = fig.add_subplot(224)
ax4.imshow(captured_img_warp, cmap='gray')
plt.show()
return found_dots, H
class Calibration:
def __init__(self, num_circles=(21, 12), spacing_size=(80, 80), pad_pixels=(0, 0)):
self.num_circles = num_circles
self.spacing_size = spacing_size
self.pad_pixels = pad_pixels
self.h_transform = np.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
def calibrate(self, img, show_preview=True):
found_corners, self.h_transform = circle_detect(img, self.num_circles,
self.spacing_size, self.pad_pixels, show_preview)
return found_corners
def get_transform(self):
return self.h_transform
def __call__(self, input_img, img_size=None):
"""
This forward pass returns the warped image.
:param input_img: A numpy grayscale image shape of [H, W].
:param img_size: output size, default None.
:return: output_img: warped image with pre-calculated homography and destination size.
"""
if img_size is None:
img_size = [int((num_circs - 1) * space + 2 * pad_pixs)
for num_circs, space, pad_pixs in zip(self.num_circles, self.spacing_size, self.pad_pixels)]
output_img = cv2.warpPerspective(input_img, self.h_transform, tuple(img_size))
return output_img
| 37.677419
| 136
| 0.638185
| 1,374
| 0.235274
| 0
| 0
| 0
| 0
| 0
| 0
| 2,004
| 0.343151
|
18ceea954bda99122d17bf7b1a926a3bf8227da9
| 270
|
py
|
Python
|
Main/apps.py
|
Naretto95/Django-Vault
|
36fac69873c844bf72732ff635513f0204b7d61a
|
[
"MIT"
] | null | null | null |
Main/apps.py
|
Naretto95/Django-Vault
|
36fac69873c844bf72732ff635513f0204b7d61a
|
[
"MIT"
] | null | null | null |
Main/apps.py
|
Naretto95/Django-Vault
|
36fac69873c844bf72732ff635513f0204b7d61a
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
from django.contrib.admin.apps import AdminConfig
class AdminSiteConfig(AdminConfig):
default_site = 'Main.admin.MyAdminSite'
class MainConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'Main'
| 27
| 56
| 0.781481
| 182
| 0.674074
| 0
| 0
| 0
| 0
| 0
| 0
| 61
| 0.225926
|
18d163664110bd63d5393ef2d5efd9b345f52613
| 38
|
py
|
Python
|
researchutils/task/__init__.py
|
yuishihara/researchutils
|
bb3ec467386d43a1e2282ec6d024216ce4dae841
|
[
"MIT"
] | 1
|
2018-09-06T00:54:49.000Z
|
2018-09-06T00:54:49.000Z
|
researchutils/task/__init__.py
|
yuishihara/researchutils
|
bb3ec467386d43a1e2282ec6d024216ce4dae841
|
[
"MIT"
] | 28
|
2018-08-25T03:54:30.000Z
|
2018-10-14T12:09:47.000Z
|
researchutils/task/__init__.py
|
yuishihara/researchutils
|
bb3ec467386d43a1e2282ec6d024216ce4dae841
|
[
"MIT"
] | null | null | null |
from researchutils.task import plotter
| 38
| 38
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
18d43cd8f5f88ffb19e9b4a5bb9e768fb2646c67
| 220,532
|
py
|
Python
|
venv/lib/python3.8/site-packages/aws_cdk/aws_kinesis/__init__.py
|
harun-vit/aws-cdk-pipelines-demo
|
7e7faeee112c3dca718613fa8a1fba80d2116bac
|
[
"MIT-0"
] | null | null | null |
venv/lib/python3.8/site-packages/aws_cdk/aws_kinesis/__init__.py
|
harun-vit/aws-cdk-pipelines-demo
|
7e7faeee112c3dca718613fa8a1fba80d2116bac
|
[
"MIT-0"
] | null | null | null |
venv/lib/python3.8/site-packages/aws_cdk/aws_kinesis/__init__.py
|
harun-vit/aws-cdk-pipelines-demo
|
7e7faeee112c3dca718613fa8a1fba80d2116bac
|
[
"MIT-0"
] | null | null | null |
'''
# Amazon Kinesis Construct Library
<!--BEGIN STABILITY BANNER-->---


---
<!--END STABILITY BANNER-->
[Amazon Kinesis](https://docs.aws.amazon.com/streams/latest/dev/introduction.html) provides collection and processing of large
[streams](https://aws.amazon.com/streaming-data/) of data records in real time. Kinesis data streams can be used for rapid and continuous data
intake and aggregation.
## Table Of Contents
* [Streams](#streams)
* [Encryption](#encryption)
* [Import](#import)
* [Permission Grants](#permission-grants)
* [Read Permissions](#read-permissions)
* [Write Permissions](#write-permissions)
* [Custom Permissions](#custom-permissions)
* [Metrics](#metrics)
## Streams
Amazon Kinesis Data Streams ingests a large amount of data in real time, durably stores the data, and makes the data available for consumption.
Using the CDK, a new Kinesis stream can be created as part of the stack using the construct's constructor. You may specify the `streamName` to give
your own identifier to the stream. If not, CloudFormation will generate a name.
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
Stream(self, "MyFirstStream",
stream_name="my-awesome-stream"
)
```
You can also specify properties such as `shardCount` to indicate how many shards the stream should choose and a `retentionPeriod`
to specify how long the data in the shards should remain accessible.
Read more at [Creating and Managing Streams](https://docs.aws.amazon.com/streams/latest/dev/working-with-streams.html)
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
Stream(self, "MyFirstStream",
stream_name="my-awesome-stream",
shard_count=3,
retention_period=Duration.hours(48)
)
```
### Encryption
[Stream encryption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesis-stream-streamencryption.html) enables
server-side encryption using an AWS KMS key for a specified stream.
Encryption is enabled by default on your stream with the master key owned by Kinesis Data Streams in regions where it is supported.
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
Stream(self, "MyEncryptedStream")
```
You can enable encryption on your stream with a user-managed key by specifying the `encryption` property.
A KMS key will be created for you and associated with the stream.
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
Stream(self, "MyEncryptedStream",
encryption=StreamEncryption.KMS
)
```
You can also supply your own external KMS key to use for stream encryption by specifying the `encryptionKey` property.
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
import aws_cdk.aws_kms as kms
key = kms.Key(self, "MyKey")
Stream(self, "MyEncryptedStream",
encryption=StreamEncryption.KMS,
encryption_key=key
)
```
### Import
Any Kinesis stream that has been created outside the stack can be imported into your CDK app.
Streams can be imported by their ARN via the `Stream.fromStreamArn()` API
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
stack = Stack(app, "MyStack")
imported_stream = Stream.from_stream_arn(stack, "ImportedStream", "arn:aws:kinesis:us-east-2:123456789012:stream/f3j09j2230j")
```
Encrypted Streams can also be imported by their attributes via the `Stream.fromStreamAttributes()` API
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
from aws_cdk.aws_kms import Key
stack = Stack(app, "MyStack")
imported_stream = Stream.from_stream_attributes(stack, "ImportedEncryptedStream",
stream_arn="arn:aws:kinesis:us-east-2:123456789012:stream/f3j09j2230j",
encryption_key=kms.Key.from_key_arn("arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012")
)
```
### Permission Grants
IAM roles, users or groups which need to be able to work with Amazon Kinesis streams at runtime should be granted IAM permissions.
Any object that implements the `IGrantable` interface (has an associated principal) can be granted permissions by calling:
* `grantRead(principal)` - grants the principal read access
* `grantWrite(principal)` - grants the principal write permissions to a Stream
* `grantReadWrite(principal)` - grants principal read and write permissions
#### Read Permissions
Grant `read` access to a stream by calling the `grantRead()` API.
If the stream has an encryption key, read permissions will also be granted to the key.
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
lambda_role = iam.Role(self, "Role",
assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
description="Example role..."
)
stream = Stream(self, "MyEncryptedStream",
encryption=StreamEncryption.KMS
)
# give lambda permissions to read stream
stream.grant_read(lambda_role)
```
The following read permissions are provided to a service principal by the `grantRead()` API:
* `kinesis:DescribeStreamSummary`
* `kinesis:GetRecords`
* `kinesis:GetShardIterator`
* `kinesis:ListShards`
* `kinesis:SubscribeToShard`
#### Write Permissions
Grant `write` permissions to a stream is provided by calling the `grantWrite()` API.
If the stream has an encryption key, write permissions will also be granted to the key.
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
lambda_role = iam.Role(self, "Role",
assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
description="Example role..."
)
stream = Stream(self, "MyEncryptedStream",
encryption=StreamEncryption.KMS
)
# give lambda permissions to write to stream
stream.grant_write(lambda_role)
```
The following write permissions are provided to a service principal by the `grantWrite()` API:
* `kinesis:ListShards`
* `kinesis:PutRecord`
* `kinesis:PutRecords`
#### Custom Permissions
You can add any set of permissions to a stream by calling the `grant()` API.
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
user = iam.User(stack, "MyUser")
stream = Stream(stack, "MyStream")
# give my user permissions to list shards
stream.grant(user, "kinesis:ListShards")
```
### Metrics
You can use common metrics from your stream to create alarms and/or dashboards. The `stream.metric('MetricName')` method creates a metric with the stream namespace and dimension. You can also use pre-define methods like `stream.metricGetRecordsSuccess()`. To find out more about Kinesis metrics check [Monitoring the Amazon Kinesis Data Streams Service with Amazon CloudWatch](https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html).
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
stream = Stream(stack, "MyStream")
# Using base metric method passing the metric name
stream.metric("GetRecords.Success")
# using pre-defined metric method
stream.metric_get_records_success()
# using pre-defined and overriding the statistic
stream.metric_get_records_success(statistic="Maximum")
```
'''
import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
from ._jsii import *
import aws_cdk.aws_cloudwatch
import aws_cdk.aws_iam
import aws_cdk.aws_kms
import aws_cdk.core
import constructs
@jsii.implements(aws_cdk.core.IInspectable)
class CfnStream(
aws_cdk.core.CfnResource,
metaclass=jsii.JSIIMeta,
jsii_type="@aws-cdk/aws-kinesis.CfnStream",
):
'''A CloudFormation ``AWS::Kinesis::Stream``.
:cloudformationResource: AWS::Kinesis::Stream
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html
'''
def __init__(
self,
scope: aws_cdk.core.Construct,
id: builtins.str,
*,
shard_count: jsii.Number,
name: typing.Optional[builtins.str] = None,
retention_period_hours: typing.Optional[jsii.Number] = None,
stream_encryption: typing.Optional[typing.Union["CfnStream.StreamEncryptionProperty", aws_cdk.core.IResolvable]] = None,
tags: typing.Optional[typing.Sequence[aws_cdk.core.CfnTag]] = None,
) -> None:
'''Create a new ``AWS::Kinesis::Stream``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param shard_count: ``AWS::Kinesis::Stream.ShardCount``.
:param name: ``AWS::Kinesis::Stream.Name``.
:param retention_period_hours: ``AWS::Kinesis::Stream.RetentionPeriodHours``.
:param stream_encryption: ``AWS::Kinesis::Stream.StreamEncryption``.
:param tags: ``AWS::Kinesis::Stream.Tags``.
'''
props = CfnStreamProps(
shard_count=shard_count,
name=name,
retention_period_hours=retention_period_hours,
stream_encryption=stream_encryption,
tags=tags,
)
jsii.create(CfnStream, self, [scope, id, props])
@jsii.member(jsii_name="inspect")
def inspect(self, inspector: aws_cdk.core.TreeInspector) -> None:
'''Examines the CloudFormation resource and discloses attributes.
:param inspector: - tree inspector to collect and process attributes.
'''
return typing.cast(None, jsii.invoke(self, "inspect", [inspector]))
@jsii.member(jsii_name="renderProperties")
def _render_properties(
self,
props: typing.Mapping[builtins.str, typing.Any],
) -> typing.Mapping[builtins.str, typing.Any]:
'''
:param props: -
'''
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "renderProperties", [props]))
@jsii.python.classproperty # type: ignore[misc]
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> builtins.str:
'''The CloudFormation resource type name for this resource class.'''
return typing.cast(builtins.str, jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrArn")
def attr_arn(self) -> builtins.str:
'''
:cloudformationAttribute: Arn
'''
return typing.cast(builtins.str, jsii.get(self, "attrArn"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[builtins.str, typing.Any]:
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.get(self, "cfnProperties"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="tags")
def tags(self) -> aws_cdk.core.TagManager:
'''``AWS::Kinesis::Stream.Tags``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html#cfn-kinesis-stream-tags
'''
return typing.cast(aws_cdk.core.TagManager, jsii.get(self, "tags"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="shardCount")
def shard_count(self) -> jsii.Number:
'''``AWS::Kinesis::Stream.ShardCount``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html#cfn-kinesis-stream-shardcount
'''
return typing.cast(jsii.Number, jsii.get(self, "shardCount"))
@shard_count.setter
def shard_count(self, value: jsii.Number) -> None:
jsii.set(self, "shardCount", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="name")
def name(self) -> typing.Optional[builtins.str]:
'''``AWS::Kinesis::Stream.Name``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html#cfn-kinesis-stream-name
'''
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "name"))
@name.setter
def name(self, value: typing.Optional[builtins.str]) -> None:
jsii.set(self, "name", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="retentionPeriodHours")
def retention_period_hours(self) -> typing.Optional[jsii.Number]:
'''``AWS::Kinesis::Stream.RetentionPeriodHours``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html#cfn-kinesis-stream-retentionperiodhours
'''
return typing.cast(typing.Optional[jsii.Number], jsii.get(self, "retentionPeriodHours"))
@retention_period_hours.setter
def retention_period_hours(self, value: typing.Optional[jsii.Number]) -> None:
jsii.set(self, "retentionPeriodHours", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="streamEncryption")
def stream_encryption(
self,
) -> typing.Optional[typing.Union["CfnStream.StreamEncryptionProperty", aws_cdk.core.IResolvable]]:
'''``AWS::Kinesis::Stream.StreamEncryption``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html#cfn-kinesis-stream-streamencryption
'''
return typing.cast(typing.Optional[typing.Union["CfnStream.StreamEncryptionProperty", aws_cdk.core.IResolvable]], jsii.get(self, "streamEncryption"))
@stream_encryption.setter
def stream_encryption(
self,
value: typing.Optional[typing.Union["CfnStream.StreamEncryptionProperty", aws_cdk.core.IResolvable]],
) -> None:
jsii.set(self, "streamEncryption", value)
@jsii.data_type(
jsii_type="@aws-cdk/aws-kinesis.CfnStream.StreamEncryptionProperty",
jsii_struct_bases=[],
name_mapping={"encryption_type": "encryptionType", "key_id": "keyId"},
)
class StreamEncryptionProperty:
def __init__(
self,
*,
encryption_type: builtins.str,
key_id: builtins.str,
) -> None:
'''
:param encryption_type: ``CfnStream.StreamEncryptionProperty.EncryptionType``.
:param key_id: ``CfnStream.StreamEncryptionProperty.KeyId``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesis-stream-streamencryption.html
'''
self._values: typing.Dict[str, typing.Any] = {
"encryption_type": encryption_type,
"key_id": key_id,
}
@builtins.property
def encryption_type(self) -> builtins.str:
'''``CfnStream.StreamEncryptionProperty.EncryptionType``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesis-stream-streamencryption.html#cfn-kinesis-stream-streamencryption-encryptiontype
'''
result = self._values.get("encryption_type")
assert result is not None, "Required property 'encryption_type' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def key_id(self) -> builtins.str:
'''``CfnStream.StreamEncryptionProperty.KeyId``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesis-stream-streamencryption.html#cfn-kinesis-stream-streamencryption-keyid
'''
result = self._values.get("key_id")
assert result is not None, "Required property 'key_id' is missing"
return typing.cast(builtins.str, result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "StreamEncryptionProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.implements(aws_cdk.core.IInspectable)
class CfnStreamConsumer(
aws_cdk.core.CfnResource,
metaclass=jsii.JSIIMeta,
jsii_type="@aws-cdk/aws-kinesis.CfnStreamConsumer",
):
'''A CloudFormation ``AWS::Kinesis::StreamConsumer``.
:cloudformationResource: AWS::Kinesis::StreamConsumer
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-streamconsumer.html
'''
def __init__(
self,
scope: aws_cdk.core.Construct,
id: builtins.str,
*,
consumer_name: builtins.str,
stream_arn: builtins.str,
) -> None:
'''Create a new ``AWS::Kinesis::StreamConsumer``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param consumer_name: ``AWS::Kinesis::StreamConsumer.ConsumerName``.
:param stream_arn: ``AWS::Kinesis::StreamConsumer.StreamARN``.
'''
props = CfnStreamConsumerProps(
consumer_name=consumer_name, stream_arn=stream_arn
)
jsii.create(CfnStreamConsumer, self, [scope, id, props])
@jsii.member(jsii_name="inspect")
def inspect(self, inspector: aws_cdk.core.TreeInspector) -> None:
'''Examines the CloudFormation resource and discloses attributes.
:param inspector: - tree inspector to collect and process attributes.
'''
return typing.cast(None, jsii.invoke(self, "inspect", [inspector]))
@jsii.member(jsii_name="renderProperties")
def _render_properties(
self,
props: typing.Mapping[builtins.str, typing.Any],
) -> typing.Mapping[builtins.str, typing.Any]:
'''
:param props: -
'''
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "renderProperties", [props]))
@jsii.python.classproperty # type: ignore[misc]
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> builtins.str:
'''The CloudFormation resource type name for this resource class.'''
return typing.cast(builtins.str, jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrConsumerArn")
def attr_consumer_arn(self) -> builtins.str:
'''
:cloudformationAttribute: ConsumerARN
'''
return typing.cast(builtins.str, jsii.get(self, "attrConsumerArn"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrConsumerCreationTimestamp")
def attr_consumer_creation_timestamp(self) -> builtins.str:
'''
:cloudformationAttribute: ConsumerCreationTimestamp
'''
return typing.cast(builtins.str, jsii.get(self, "attrConsumerCreationTimestamp"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrConsumerName")
def attr_consumer_name(self) -> builtins.str:
'''
:cloudformationAttribute: ConsumerName
'''
return typing.cast(builtins.str, jsii.get(self, "attrConsumerName"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrConsumerStatus")
def attr_consumer_status(self) -> builtins.str:
'''
:cloudformationAttribute: ConsumerStatus
'''
return typing.cast(builtins.str, jsii.get(self, "attrConsumerStatus"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrStreamArn")
def attr_stream_arn(self) -> builtins.str:
'''
:cloudformationAttribute: StreamARN
'''
return typing.cast(builtins.str, jsii.get(self, "attrStreamArn"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[builtins.str, typing.Any]:
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.get(self, "cfnProperties"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="consumerName")
def consumer_name(self) -> builtins.str:
'''``AWS::Kinesis::StreamConsumer.ConsumerName``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-streamconsumer.html#cfn-kinesis-streamconsumer-consumername
'''
return typing.cast(builtins.str, jsii.get(self, "consumerName"))
@consumer_name.setter
def consumer_name(self, value: builtins.str) -> None:
jsii.set(self, "consumerName", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="streamArn")
def stream_arn(self) -> builtins.str:
'''``AWS::Kinesis::StreamConsumer.StreamARN``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-streamconsumer.html#cfn-kinesis-streamconsumer-streamarn
'''
return typing.cast(builtins.str, jsii.get(self, "streamArn"))
@stream_arn.setter
def stream_arn(self, value: builtins.str) -> None:
jsii.set(self, "streamArn", value)
@jsii.data_type(
jsii_type="@aws-cdk/aws-kinesis.CfnStreamConsumerProps",
jsii_struct_bases=[],
name_mapping={"consumer_name": "consumerName", "stream_arn": "streamArn"},
)
class CfnStreamConsumerProps:
def __init__(
self,
*,
consumer_name: builtins.str,
stream_arn: builtins.str,
) -> None:
'''Properties for defining a ``AWS::Kinesis::StreamConsumer``.
:param consumer_name: ``AWS::Kinesis::StreamConsumer.ConsumerName``.
:param stream_arn: ``AWS::Kinesis::StreamConsumer.StreamARN``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-streamconsumer.html
'''
self._values: typing.Dict[str, typing.Any] = {
"consumer_name": consumer_name,
"stream_arn": stream_arn,
}
@builtins.property
def consumer_name(self) -> builtins.str:
'''``AWS::Kinesis::StreamConsumer.ConsumerName``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-streamconsumer.html#cfn-kinesis-streamconsumer-consumername
'''
result = self._values.get("consumer_name")
assert result is not None, "Required property 'consumer_name' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def stream_arn(self) -> builtins.str:
'''``AWS::Kinesis::StreamConsumer.StreamARN``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-streamconsumer.html#cfn-kinesis-streamconsumer-streamarn
'''
result = self._values.get("stream_arn")
assert result is not None, "Required property 'stream_arn' is missing"
return typing.cast(builtins.str, result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "CfnStreamConsumerProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="@aws-cdk/aws-kinesis.CfnStreamProps",
jsii_struct_bases=[],
name_mapping={
"shard_count": "shardCount",
"name": "name",
"retention_period_hours": "retentionPeriodHours",
"stream_encryption": "streamEncryption",
"tags": "tags",
},
)
class CfnStreamProps:
def __init__(
self,
*,
shard_count: jsii.Number,
name: typing.Optional[builtins.str] = None,
retention_period_hours: typing.Optional[jsii.Number] = None,
stream_encryption: typing.Optional[typing.Union[CfnStream.StreamEncryptionProperty, aws_cdk.core.IResolvable]] = None,
tags: typing.Optional[typing.Sequence[aws_cdk.core.CfnTag]] = None,
) -> None:
'''Properties for defining a ``AWS::Kinesis::Stream``.
:param shard_count: ``AWS::Kinesis::Stream.ShardCount``.
:param name: ``AWS::Kinesis::Stream.Name``.
:param retention_period_hours: ``AWS::Kinesis::Stream.RetentionPeriodHours``.
:param stream_encryption: ``AWS::Kinesis::Stream.StreamEncryption``.
:param tags: ``AWS::Kinesis::Stream.Tags``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html
'''
self._values: typing.Dict[str, typing.Any] = {
"shard_count": shard_count,
}
if name is not None:
self._values["name"] = name
if retention_period_hours is not None:
self._values["retention_period_hours"] = retention_period_hours
if stream_encryption is not None:
self._values["stream_encryption"] = stream_encryption
if tags is not None:
self._values["tags"] = tags
@builtins.property
def shard_count(self) -> jsii.Number:
'''``AWS::Kinesis::Stream.ShardCount``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html#cfn-kinesis-stream-shardcount
'''
result = self._values.get("shard_count")
assert result is not None, "Required property 'shard_count' is missing"
return typing.cast(jsii.Number, result)
@builtins.property
def name(self) -> typing.Optional[builtins.str]:
'''``AWS::Kinesis::Stream.Name``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html#cfn-kinesis-stream-name
'''
result = self._values.get("name")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def retention_period_hours(self) -> typing.Optional[jsii.Number]:
'''``AWS::Kinesis::Stream.RetentionPeriodHours``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html#cfn-kinesis-stream-retentionperiodhours
'''
result = self._values.get("retention_period_hours")
return typing.cast(typing.Optional[jsii.Number], result)
@builtins.property
def stream_encryption(
self,
) -> typing.Optional[typing.Union[CfnStream.StreamEncryptionProperty, aws_cdk.core.IResolvable]]:
'''``AWS::Kinesis::Stream.StreamEncryption``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html#cfn-kinesis-stream-streamencryption
'''
result = self._values.get("stream_encryption")
return typing.cast(typing.Optional[typing.Union[CfnStream.StreamEncryptionProperty, aws_cdk.core.IResolvable]], result)
@builtins.property
def tags(self) -> typing.Optional[typing.List[aws_cdk.core.CfnTag]]:
'''``AWS::Kinesis::Stream.Tags``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html#cfn-kinesis-stream-tags
'''
result = self._values.get("tags")
return typing.cast(typing.Optional[typing.List[aws_cdk.core.CfnTag]], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "CfnStreamProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.interface(jsii_type="@aws-cdk/aws-kinesis.IStream")
class IStream(aws_cdk.core.IResource, typing_extensions.Protocol):
'''A Kinesis Stream.'''
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="streamArn")
def stream_arn(self) -> builtins.str:
'''The ARN of the stream.
:attribute: true
'''
...
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="streamName")
def stream_name(self) -> builtins.str:
'''The name of the stream.
:attribute: true
'''
...
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="encryptionKey")
def encryption_key(self) -> typing.Optional[aws_cdk.aws_kms.IKey]:
'''Optional KMS encryption key associated with this stream.'''
...
@jsii.member(jsii_name="grant")
def grant(
self,
grantee: aws_cdk.aws_iam.IGrantable,
*actions: builtins.str,
) -> aws_cdk.aws_iam.Grant:
'''Grant the indicated permissions on this stream to the provided IAM principal.
:param grantee: -
:param actions: -
'''
...
@jsii.member(jsii_name="grantRead")
def grant_read(self, grantee: aws_cdk.aws_iam.IGrantable) -> aws_cdk.aws_iam.Grant:
'''Grant read permissions for this stream and its contents to an IAM principal (Role/Group/User).
If an encryption key is used, permission to ues the key to decrypt the
contents of the stream will also be granted.
:param grantee: -
'''
...
@jsii.member(jsii_name="grantReadWrite")
def grant_read_write(
self,
grantee: aws_cdk.aws_iam.IGrantable,
) -> aws_cdk.aws_iam.Grant:
'''Grants read/write permissions for this stream and its contents to an IAM principal (Role/Group/User).
If an encryption key is used, permission to use the key for
encrypt/decrypt will also be granted.
:param grantee: -
'''
...
@jsii.member(jsii_name="grantWrite")
def grant_write(self, grantee: aws_cdk.aws_iam.IGrantable) -> aws_cdk.aws_iam.Grant:
'''Grant write permissions for this stream and its contents to an IAM principal (Role/Group/User).
If an encryption key is used, permission to ues the key to encrypt the
contents of the stream will also be granted.
:param grantee: -
'''
...
@jsii.member(jsii_name="metric")
def metric(
self,
metric_name: builtins.str,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''Return stream metric based from its metric name.
:param metric_name: name of the stream metric.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricGetRecords")
def metric_get_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of records retrieved from the shard, measured over the specified time period.
Minimum, Maximum, and
Average statistics represent the records in a single GetRecords operation for the stream in the specified time
period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricGetRecordsBytes")
def metric_get_records_bytes(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of bytes retrieved from the Kinesis stream, measured over the specified time period.
Minimum, Maximum,
and Average statistics represent the bytes in a single GetRecords operation for the stream in the specified time
period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricGetRecordsIteratorAgeMilliseconds")
def metric_get_records_iterator_age_milliseconds(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The age of the last record in all GetRecords calls made against a Kinesis stream, measured over the specified time period.
Age is the difference between the current time and when the last record of the GetRecords call was written
to the stream. The Minimum and Maximum statistics can be used to track the progress of Kinesis consumer
applications. A value of zero indicates that the records being read are completely caught up with the stream.
The metric defaults to maximum over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricGetRecordsLatency")
def metric_get_records_latency(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The time taken per GetRecords operation, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricGetRecordsSuccess")
def metric_get_records_success(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of successful GetRecords operations per stream, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricIncomingBytes")
def metric_incoming_bytes(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of bytes successfully put to the Kinesis stream over the specified time period.
This metric includes
bytes from PutRecord and PutRecords operations. Minimum, Maximum, and Average statistics represent the bytes in a
single put operation for the stream in the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricIncomingRecords")
def metric_incoming_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of records successfully put to the Kinesis stream over the specified time period.
This metric includes
record counts from PutRecord and PutRecords operations. Minimum, Maximum, and Average statistics represent the
records in a single put operation for the stream in the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricPutRecordBytes")
def metric_put_record_bytes(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of bytes put to the Kinesis stream using the PutRecord operation over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricPutRecordLatency")
def metric_put_record_latency(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The time taken per PutRecord operation, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricPutRecordsBytes")
def metric_put_records_bytes(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of bytes put to the Kinesis stream using the PutRecords operation over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricPutRecordsFailedRecords")
def metric_put_records_failed_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of records rejected due to internal failures in a PutRecords operation per Kinesis data stream, measured over the specified time period.
Occasional internal failures are to be expected and should be retried.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricPutRecordsLatency")
def metric_put_records_latency(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The time taken per PutRecords operation, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricPutRecordsSuccess")
def metric_put_records_success(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of PutRecords operations where at least one record succeeded, per Kinesis stream, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricPutRecordsSuccessfulRecords")
def metric_put_records_successful_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of successful records in a PutRecords operation per Kinesis data stream, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricPutRecordsThrottledRecords")
def metric_put_records_throttled_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of records rejected due to throttling in a PutRecords operation per Kinesis data stream, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricPutRecordsTotalRecords")
def metric_put_records_total_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The total number of records sent in a PutRecords operation per Kinesis data stream, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricPutRecordSuccess")
def metric_put_record_success(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of successful PutRecord operations per Kinesis stream, measured over the specified time period.
Average
reflects the percentage of successful writes to a stream.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricReadProvisionedThroughputExceeded")
def metric_read_provisioned_throughput_exceeded(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of GetRecords calls throttled for the stream over the specified time period.
The most commonly used
statistic for this metric is Average.
When the Minimum statistic has a value of 1, all records were throttled for the stream during the specified time
period.
When the Maximum statistic has a value of 0 (zero), no records were throttled for the stream during the specified
time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricWriteProvisionedThroughputExceeded")
def metric_write_provisioned_throughput_exceeded(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of records rejected due to throttling for the stream over the specified time period.
This metric
includes throttling from PutRecord and PutRecords operations.
When the Minimum statistic has a non-zero value, records were being throttled for the stream during the specified
time period.
When the Maximum statistic has a value of 0 (zero), no records were being throttled for the stream during the
specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
class _IStreamProxy(
jsii.proxy_for(aws_cdk.core.IResource) # type: ignore[misc]
):
'''A Kinesis Stream.'''
__jsii_type__: typing.ClassVar[str] = "@aws-cdk/aws-kinesis.IStream"
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="streamArn")
def stream_arn(self) -> builtins.str:
'''The ARN of the stream.
:attribute: true
'''
return typing.cast(builtins.str, jsii.get(self, "streamArn"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="streamName")
def stream_name(self) -> builtins.str:
'''The name of the stream.
:attribute: true
'''
return typing.cast(builtins.str, jsii.get(self, "streamName"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="encryptionKey")
def encryption_key(self) -> typing.Optional[aws_cdk.aws_kms.IKey]:
'''Optional KMS encryption key associated with this stream.'''
return typing.cast(typing.Optional[aws_cdk.aws_kms.IKey], jsii.get(self, "encryptionKey"))
@jsii.member(jsii_name="grant")
def grant(
self,
grantee: aws_cdk.aws_iam.IGrantable,
*actions: builtins.str,
) -> aws_cdk.aws_iam.Grant:
'''Grant the indicated permissions on this stream to the provided IAM principal.
:param grantee: -
:param actions: -
'''
return typing.cast(aws_cdk.aws_iam.Grant, jsii.invoke(self, "grant", [grantee, *actions]))
@jsii.member(jsii_name="grantRead")
def grant_read(self, grantee: aws_cdk.aws_iam.IGrantable) -> aws_cdk.aws_iam.Grant:
'''Grant read permissions for this stream and its contents to an IAM principal (Role/Group/User).
If an encryption key is used, permission to ues the key to decrypt the
contents of the stream will also be granted.
:param grantee: -
'''
return typing.cast(aws_cdk.aws_iam.Grant, jsii.invoke(self, "grantRead", [grantee]))
@jsii.member(jsii_name="grantReadWrite")
def grant_read_write(
self,
grantee: aws_cdk.aws_iam.IGrantable,
) -> aws_cdk.aws_iam.Grant:
'''Grants read/write permissions for this stream and its contents to an IAM principal (Role/Group/User).
If an encryption key is used, permission to use the key for
encrypt/decrypt will also be granted.
:param grantee: -
'''
return typing.cast(aws_cdk.aws_iam.Grant, jsii.invoke(self, "grantReadWrite", [grantee]))
@jsii.member(jsii_name="grantWrite")
def grant_write(self, grantee: aws_cdk.aws_iam.IGrantable) -> aws_cdk.aws_iam.Grant:
'''Grant write permissions for this stream and its contents to an IAM principal (Role/Group/User).
If an encryption key is used, permission to ues the key to encrypt the
contents of the stream will also be granted.
:param grantee: -
'''
return typing.cast(aws_cdk.aws_iam.Grant, jsii.invoke(self, "grantWrite", [grantee]))
@jsii.member(jsii_name="metric")
def metric(
self,
metric_name: builtins.str,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''Return stream metric based from its metric name.
:param metric_name: name of the stream metric.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metric", [metric_name, props]))
@jsii.member(jsii_name="metricGetRecords")
def metric_get_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of records retrieved from the shard, measured over the specified time period.
Minimum, Maximum, and
Average statistics represent the records in a single GetRecords operation for the stream in the specified time
period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricGetRecords", [props]))
@jsii.member(jsii_name="metricGetRecordsBytes")
def metric_get_records_bytes(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of bytes retrieved from the Kinesis stream, measured over the specified time period.
Minimum, Maximum,
and Average statistics represent the bytes in a single GetRecords operation for the stream in the specified time
period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricGetRecordsBytes", [props]))
@jsii.member(jsii_name="metricGetRecordsIteratorAgeMilliseconds")
def metric_get_records_iterator_age_milliseconds(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The age of the last record in all GetRecords calls made against a Kinesis stream, measured over the specified time period.
Age is the difference between the current time and when the last record of the GetRecords call was written
to the stream. The Minimum and Maximum statistics can be used to track the progress of Kinesis consumer
applications. A value of zero indicates that the records being read are completely caught up with the stream.
The metric defaults to maximum over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricGetRecordsIteratorAgeMilliseconds", [props]))
@jsii.member(jsii_name="metricGetRecordsLatency")
def metric_get_records_latency(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The time taken per GetRecords operation, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricGetRecordsLatency", [props]))
@jsii.member(jsii_name="metricGetRecordsSuccess")
def metric_get_records_success(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of successful GetRecords operations per stream, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricGetRecordsSuccess", [props]))
@jsii.member(jsii_name="metricIncomingBytes")
def metric_incoming_bytes(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of bytes successfully put to the Kinesis stream over the specified time period.
This metric includes
bytes from PutRecord and PutRecords operations. Minimum, Maximum, and Average statistics represent the bytes in a
single put operation for the stream in the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricIncomingBytes", [props]))
@jsii.member(jsii_name="metricIncomingRecords")
def metric_incoming_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of records successfully put to the Kinesis stream over the specified time period.
This metric includes
record counts from PutRecord and PutRecords operations. Minimum, Maximum, and Average statistics represent the
records in a single put operation for the stream in the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricIncomingRecords", [props]))
@jsii.member(jsii_name="metricPutRecordBytes")
def metric_put_record_bytes(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of bytes put to the Kinesis stream using the PutRecord operation over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordBytes", [props]))
@jsii.member(jsii_name="metricPutRecordLatency")
def metric_put_record_latency(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The time taken per PutRecord operation, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordLatency", [props]))
@jsii.member(jsii_name="metricPutRecordsBytes")
def metric_put_records_bytes(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of bytes put to the Kinesis stream using the PutRecords operation over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordsBytes", [props]))
@jsii.member(jsii_name="metricPutRecordsFailedRecords")
def metric_put_records_failed_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of records rejected due to internal failures in a PutRecords operation per Kinesis data stream, measured over the specified time period.
Occasional internal failures are to be expected and should be retried.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordsFailedRecords", [props]))
@jsii.member(jsii_name="metricPutRecordsLatency")
def metric_put_records_latency(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The time taken per PutRecords operation, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordsLatency", [props]))
@jsii.member(jsii_name="metricPutRecordsSuccess")
def metric_put_records_success(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of PutRecords operations where at least one record succeeded, per Kinesis stream, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordsSuccess", [props]))
@jsii.member(jsii_name="metricPutRecordsSuccessfulRecords")
def metric_put_records_successful_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of successful records in a PutRecords operation per Kinesis data stream, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordsSuccessfulRecords", [props]))
@jsii.member(jsii_name="metricPutRecordsThrottledRecords")
def metric_put_records_throttled_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of records rejected due to throttling in a PutRecords operation per Kinesis data stream, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordsThrottledRecords", [props]))
@jsii.member(jsii_name="metricPutRecordsTotalRecords")
def metric_put_records_total_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The total number of records sent in a PutRecords operation per Kinesis data stream, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordsTotalRecords", [props]))
@jsii.member(jsii_name="metricPutRecordSuccess")
def metric_put_record_success(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of successful PutRecord operations per Kinesis stream, measured over the specified time period.
Average
reflects the percentage of successful writes to a stream.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordSuccess", [props]))
@jsii.member(jsii_name="metricReadProvisionedThroughputExceeded")
def metric_read_provisioned_throughput_exceeded(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of GetRecords calls throttled for the stream over the specified time period.
The most commonly used
statistic for this metric is Average.
When the Minimum statistic has a value of 1, all records were throttled for the stream during the specified time
period.
When the Maximum statistic has a value of 0 (zero), no records were throttled for the stream during the specified
time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricReadProvisionedThroughputExceeded", [props]))
@jsii.member(jsii_name="metricWriteProvisionedThroughputExceeded")
def metric_write_provisioned_throughput_exceeded(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of records rejected due to throttling for the stream over the specified time period.
This metric
includes throttling from PutRecord and PutRecords operations.
When the Minimum statistic has a non-zero value, records were being throttled for the stream during the specified
time period.
When the Maximum statistic has a value of 0 (zero), no records were being throttled for the stream during the
specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricWriteProvisionedThroughputExceeded", [props]))
# Adding a "__jsii_proxy_class__(): typing.Type" function to the interface
typing.cast(typing.Any, IStream).__jsii_proxy_class__ = lambda : _IStreamProxy
@jsii.implements(IStream)
class Stream(
aws_cdk.core.Resource,
metaclass=jsii.JSIIMeta,
jsii_type="@aws-cdk/aws-kinesis.Stream",
):
'''A Kinesis stream.
Can be encrypted with a KMS key.
'''
def __init__(
self,
scope: constructs.Construct,
id: builtins.str,
*,
encryption: typing.Optional["StreamEncryption"] = None,
encryption_key: typing.Optional[aws_cdk.aws_kms.IKey] = None,
retention_period: typing.Optional[aws_cdk.core.Duration] = None,
shard_count: typing.Optional[jsii.Number] = None,
stream_name: typing.Optional[builtins.str] = None,
) -> None:
'''
:param scope: -
:param id: -
:param encryption: The kind of server-side encryption to apply to this stream. If you choose KMS, you can specify a KMS key via ``encryptionKey``. If encryption key is not specified, a key will automatically be created. Default: - StreamEncryption.KMS if encrypted Streams are supported in the region or StreamEncryption.UNENCRYPTED otherwise. StreamEncryption.KMS if an encryption key is supplied through the encryptionKey property
:param encryption_key: External KMS key to use for stream encryption. The 'encryption' property must be set to "Kms". Default: - Kinesis Data Streams master key ('/alias/aws/kinesis'). If encryption is set to StreamEncryption.KMS and this property is undefined, a new KMS key will be created and associated with this stream.
:param retention_period: The number of hours for the data records that are stored in shards to remain accessible. Default: Duration.hours(24)
:param shard_count: The number of shards for the stream. Default: 1
:param stream_name: Enforces a particular physical stream name. Default:
'''
props = StreamProps(
encryption=encryption,
encryption_key=encryption_key,
retention_period=retention_period,
shard_count=shard_count,
stream_name=stream_name,
)
jsii.create(Stream, self, [scope, id, props])
@jsii.member(jsii_name="fromStreamArn") # type: ignore[misc]
@builtins.classmethod
def from_stream_arn(
cls,
scope: constructs.Construct,
id: builtins.str,
stream_arn: builtins.str,
) -> IStream:
'''Import an existing Kinesis Stream provided an ARN.
:param scope: The parent creating construct (usually ``this``).
:param id: The construct's name.
:param stream_arn: Stream ARN (i.e. arn:aws:kinesis:::stream/Foo).
'''
return typing.cast(IStream, jsii.sinvoke(cls, "fromStreamArn", [scope, id, stream_arn]))
@jsii.member(jsii_name="fromStreamAttributes") # type: ignore[misc]
@builtins.classmethod
def from_stream_attributes(
cls,
scope: constructs.Construct,
id: builtins.str,
*,
stream_arn: builtins.str,
encryption_key: typing.Optional[aws_cdk.aws_kms.IKey] = None,
) -> IStream:
'''Creates a Stream construct that represents an external stream.
:param scope: The parent creating construct (usually ``this``).
:param id: The construct's name.
:param stream_arn: The ARN of the stream.
:param encryption_key: The KMS key securing the contents of the stream if encryption is enabled. Default: - No encryption
'''
attrs = StreamAttributes(stream_arn=stream_arn, encryption_key=encryption_key)
return typing.cast(IStream, jsii.sinvoke(cls, "fromStreamAttributes", [scope, id, attrs]))
@jsii.member(jsii_name="grant")
def grant(
self,
grantee: aws_cdk.aws_iam.IGrantable,
*actions: builtins.str,
) -> aws_cdk.aws_iam.Grant:
'''Grant the indicated permissions on this stream to the given IAM principal (Role/Group/User).
:param grantee: -
:param actions: -
'''
return typing.cast(aws_cdk.aws_iam.Grant, jsii.invoke(self, "grant", [grantee, *actions]))
@jsii.member(jsii_name="grantRead")
def grant_read(self, grantee: aws_cdk.aws_iam.IGrantable) -> aws_cdk.aws_iam.Grant:
'''Grant read permissions for this stream and its contents to an IAM principal (Role/Group/User).
If an encryption key is used, permission to ues the key to decrypt the
contents of the stream will also be granted.
:param grantee: -
'''
return typing.cast(aws_cdk.aws_iam.Grant, jsii.invoke(self, "grantRead", [grantee]))
@jsii.member(jsii_name="grantReadWrite")
def grant_read_write(
self,
grantee: aws_cdk.aws_iam.IGrantable,
) -> aws_cdk.aws_iam.Grant:
'''Grants read/write permissions for this stream and its contents to an IAM principal (Role/Group/User).
If an encryption key is used, permission to use the key for
encrypt/decrypt will also be granted.
:param grantee: -
'''
return typing.cast(aws_cdk.aws_iam.Grant, jsii.invoke(self, "grantReadWrite", [grantee]))
@jsii.member(jsii_name="grantWrite")
def grant_write(self, grantee: aws_cdk.aws_iam.IGrantable) -> aws_cdk.aws_iam.Grant:
'''Grant write permissions for this stream and its contents to an IAM principal (Role/Group/User).
If an encryption key is used, permission to ues the key to encrypt the
contents of the stream will also be granted.
:param grantee: -
'''
return typing.cast(aws_cdk.aws_iam.Grant, jsii.invoke(self, "grantWrite", [grantee]))
@jsii.member(jsii_name="metric")
def metric(
self,
metric_name: builtins.str,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''Return stream metric based from its metric name.
:param metric_name: name of the stream metric.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metric", [metric_name, props]))
@jsii.member(jsii_name="metricGetRecords")
def metric_get_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of records retrieved from the shard, measured over the specified time period.
Minimum, Maximum, and
Average statistics represent the records in a single GetRecords operation for the stream in the specified time
period.
average
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricGetRecords", [props]))
@jsii.member(jsii_name="metricGetRecordsBytes")
def metric_get_records_bytes(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of bytes retrieved from the Kinesis stream, measured over the specified time period.
Minimum, Maximum,
and Average statistics represent the bytes in a single GetRecords operation for the stream in the specified time
period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricGetRecordsBytes", [props]))
@jsii.member(jsii_name="metricGetRecordsIteratorAgeMilliseconds")
def metric_get_records_iterator_age_milliseconds(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The age of the last record in all GetRecords calls made against a Kinesis stream, measured over the specified time period.
Age is the difference between the current time and when the last record of the GetRecords call was written
to the stream. The Minimum and Maximum statistics can be used to track the progress of Kinesis consumer
applications. A value of zero indicates that the records being read are completely caught up with the stream.
The metric defaults to maximum over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricGetRecordsIteratorAgeMilliseconds", [props]))
@jsii.member(jsii_name="metricGetRecordsLatency")
def metric_get_records_latency(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of successful GetRecords operations per stream, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricGetRecordsLatency", [props]))
@jsii.member(jsii_name="metricGetRecordsSuccess")
def metric_get_records_success(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of successful GetRecords operations per stream, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricGetRecordsSuccess", [props]))
@jsii.member(jsii_name="metricIncomingBytes")
def metric_incoming_bytes(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of bytes successfully put to the Kinesis stream over the specified time period.
This metric includes
bytes from PutRecord and PutRecords operations. Minimum, Maximum, and Average statistics represent the bytes in a
single put operation for the stream in the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricIncomingBytes", [props]))
@jsii.member(jsii_name="metricIncomingRecords")
def metric_incoming_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of records successfully put to the Kinesis stream over the specified time period.
This metric includes
record counts from PutRecord and PutRecords operations. Minimum, Maximum, and Average statistics represent the
records in a single put operation for the stream in the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricIncomingRecords", [props]))
@jsii.member(jsii_name="metricPutRecordBytes")
def metric_put_record_bytes(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of bytes put to the Kinesis stream using the PutRecord operation over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordBytes", [props]))
@jsii.member(jsii_name="metricPutRecordLatency")
def metric_put_record_latency(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The time taken per PutRecord operation, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordLatency", [props]))
@jsii.member(jsii_name="metricPutRecordsBytes")
def metric_put_records_bytes(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of bytes put to the Kinesis stream using the PutRecords operation over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordsBytes", [props]))
@jsii.member(jsii_name="metricPutRecordsFailedRecords")
def metric_put_records_failed_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of records rejected due to internal failures in a PutRecords operation per Kinesis data stream, measured over the specified time period.
Occasional internal failures are to be expected and should be retried.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordsFailedRecords", [props]))
@jsii.member(jsii_name="metricPutRecordsLatency")
def metric_put_records_latency(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The time taken per PutRecords operation, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordsLatency", [props]))
@jsii.member(jsii_name="metricPutRecordsSuccess")
def metric_put_records_success(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of PutRecords operations where at least one record succeeded, per Kinesis stream, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordsSuccess", [props]))
@jsii.member(jsii_name="metricPutRecordsSuccessfulRecords")
def metric_put_records_successful_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of successful records in a PutRecords operation per Kinesis data stream, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordsSuccessfulRecords", [props]))
@jsii.member(jsii_name="metricPutRecordsThrottledRecords")
def metric_put_records_throttled_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of records rejected due to throttling in a PutRecords operation per Kinesis data stream, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordsThrottledRecords", [props]))
@jsii.member(jsii_name="metricPutRecordsTotalRecords")
def metric_put_records_total_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The total number of records sent in a PutRecords operation per Kinesis data stream, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordsTotalRecords", [props]))
@jsii.member(jsii_name="metricPutRecordSuccess")
def metric_put_record_success(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of successful PutRecord operations per Kinesis stream, measured over the specified time period.
Average
reflects the percentage of successful writes to a stream.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordSuccess", [props]))
@jsii.member(jsii_name="metricReadProvisionedThroughputExceeded")
def metric_read_provisioned_throughput_exceeded(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of GetRecords calls throttled for the stream over the specified time period.
The most commonly used
statistic for this metric is Average.
When the Minimum statistic has a value of 1, all records were throttled for the stream during the specified time
period.
When the Maximum statistic has a value of 0 (zero), no records were throttled for the stream during the specified
time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricReadProvisionedThroughputExceeded", [props]))
@jsii.member(jsii_name="metricWriteProvisionedThroughputExceeded")
def metric_write_provisioned_throughput_exceeded(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of records rejected due to throttling for the stream over the specified time period.
This metric
includes throttling from PutRecord and PutRecords operations.
When the Minimum statistic has a non-zero value, records were being throttled for the stream during the specified
time period.
When the Maximum statistic has a value of 0 (zero), no records were being throttled for the stream during the
specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricWriteProvisionedThroughputExceeded", [props]))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="streamArn")
def stream_arn(self) -> builtins.str:
'''The ARN of the stream.'''
return typing.cast(builtins.str, jsii.get(self, "streamArn"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="streamName")
def stream_name(self) -> builtins.str:
'''The name of the stream.'''
return typing.cast(builtins.str, jsii.get(self, "streamName"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="encryptionKey")
def encryption_key(self) -> typing.Optional[aws_cdk.aws_kms.IKey]:
'''Optional KMS encryption key associated with this stream.'''
return typing.cast(typing.Optional[aws_cdk.aws_kms.IKey], jsii.get(self, "encryptionKey"))
@jsii.data_type(
jsii_type="@aws-cdk/aws-kinesis.StreamAttributes",
jsii_struct_bases=[],
name_mapping={"stream_arn": "streamArn", "encryption_key": "encryptionKey"},
)
class StreamAttributes:
def __init__(
self,
*,
stream_arn: builtins.str,
encryption_key: typing.Optional[aws_cdk.aws_kms.IKey] = None,
) -> None:
'''A reference to a stream.
The easiest way to instantiate is to call
``stream.export()``. Then, the consumer can use ``Stream.import(this, ref)`` and
get a ``Stream``.
:param stream_arn: The ARN of the stream.
:param encryption_key: The KMS key securing the contents of the stream if encryption is enabled. Default: - No encryption
'''
self._values: typing.Dict[str, typing.Any] = {
"stream_arn": stream_arn,
}
if encryption_key is not None:
self._values["encryption_key"] = encryption_key
@builtins.property
def stream_arn(self) -> builtins.str:
'''The ARN of the stream.'''
result = self._values.get("stream_arn")
assert result is not None, "Required property 'stream_arn' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def encryption_key(self) -> typing.Optional[aws_cdk.aws_kms.IKey]:
'''The KMS key securing the contents of the stream if encryption is enabled.
:default: - No encryption
'''
result = self._values.get("encryption_key")
return typing.cast(typing.Optional[aws_cdk.aws_kms.IKey], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "StreamAttributes(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.enum(jsii_type="@aws-cdk/aws-kinesis.StreamEncryption")
class StreamEncryption(enum.Enum):
'''What kind of server-side encryption to apply to this stream.'''
UNENCRYPTED = "UNENCRYPTED"
'''Records in the stream are not encrypted.'''
KMS = "KMS"
'''Server-side encryption with a KMS key managed by the user.
If ``encryptionKey`` is specified, this key will be used, otherwise, one will be defined.
'''
MANAGED = "MANAGED"
'''Server-side encryption with a master key managed by Amazon Kinesis.'''
@jsii.data_type(
jsii_type="@aws-cdk/aws-kinesis.StreamProps",
jsii_struct_bases=[],
name_mapping={
"encryption": "encryption",
"encryption_key": "encryptionKey",
"retention_period": "retentionPeriod",
"shard_count": "shardCount",
"stream_name": "streamName",
},
)
class StreamProps:
def __init__(
self,
*,
encryption: typing.Optional[StreamEncryption] = None,
encryption_key: typing.Optional[aws_cdk.aws_kms.IKey] = None,
retention_period: typing.Optional[aws_cdk.core.Duration] = None,
shard_count: typing.Optional[jsii.Number] = None,
stream_name: typing.Optional[builtins.str] = None,
) -> None:
'''Properties for a Kinesis Stream.
:param encryption: The kind of server-side encryption to apply to this stream. If you choose KMS, you can specify a KMS key via ``encryptionKey``. If encryption key is not specified, a key will automatically be created. Default: - StreamEncryption.KMS if encrypted Streams are supported in the region or StreamEncryption.UNENCRYPTED otherwise. StreamEncryption.KMS if an encryption key is supplied through the encryptionKey property
:param encryption_key: External KMS key to use for stream encryption. The 'encryption' property must be set to "Kms". Default: - Kinesis Data Streams master key ('/alias/aws/kinesis'). If encryption is set to StreamEncryption.KMS and this property is undefined, a new KMS key will be created and associated with this stream.
:param retention_period: The number of hours for the data records that are stored in shards to remain accessible. Default: Duration.hours(24)
:param shard_count: The number of shards for the stream. Default: 1
:param stream_name: Enforces a particular physical stream name. Default:
'''
self._values: typing.Dict[str, typing.Any] = {}
if encryption is not None:
self._values["encryption"] = encryption
if encryption_key is not None:
self._values["encryption_key"] = encryption_key
if retention_period is not None:
self._values["retention_period"] = retention_period
if shard_count is not None:
self._values["shard_count"] = shard_count
if stream_name is not None:
self._values["stream_name"] = stream_name
@builtins.property
def encryption(self) -> typing.Optional[StreamEncryption]:
'''The kind of server-side encryption to apply to this stream.
If you choose KMS, you can specify a KMS key via ``encryptionKey``. If
encryption key is not specified, a key will automatically be created.
:default:
- StreamEncryption.KMS if encrypted Streams are supported in the region
or StreamEncryption.UNENCRYPTED otherwise.
StreamEncryption.KMS if an encryption key is supplied through the encryptionKey property
'''
result = self._values.get("encryption")
return typing.cast(typing.Optional[StreamEncryption], result)
@builtins.property
def encryption_key(self) -> typing.Optional[aws_cdk.aws_kms.IKey]:
'''External KMS key to use for stream encryption.
The 'encryption' property must be set to "Kms".
:default:
- Kinesis Data Streams master key ('/alias/aws/kinesis').
If encryption is set to StreamEncryption.KMS and this property is undefined, a new KMS key
will be created and associated with this stream.
'''
result = self._values.get("encryption_key")
return typing.cast(typing.Optional[aws_cdk.aws_kms.IKey], result)
@builtins.property
def retention_period(self) -> typing.Optional[aws_cdk.core.Duration]:
'''The number of hours for the data records that are stored in shards to remain accessible.
:default: Duration.hours(24)
'''
result = self._values.get("retention_period")
return typing.cast(typing.Optional[aws_cdk.core.Duration], result)
@builtins.property
def shard_count(self) -> typing.Optional[jsii.Number]:
'''The number of shards for the stream.
:default: 1
'''
result = self._values.get("shard_count")
return typing.cast(typing.Optional[jsii.Number], result)
@builtins.property
def stream_name(self) -> typing.Optional[builtins.str]:
'''Enforces a particular physical stream name.
:default:
'''
result = self._values.get("stream_name")
return typing.cast(typing.Optional[builtins.str], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "StreamProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
__all__ = [
"CfnStream",
"CfnStreamConsumer",
"CfnStreamConsumerProps",
"CfnStreamProps",
"IStream",
"Stream",
"StreamAttributes",
"StreamEncryption",
"StreamProps",
]
publication.publish()
| 60.635689
| 468
| 0.68824
| 210,958
| 0.956587
| 0
| 0
| 211,844
| 0.960604
| 0
| 0
| 138,807
| 0.629419
|
18d5365ed6c594ed06788598b0b869b72340bab9
| 2,752
|
py
|
Python
|
model.py
|
nupurbaghel/Image_Captioning_CV
|
2af5abe1464006113e38a911ace62faacb9cbbd4
|
[
"MIT"
] | null | null | null |
model.py
|
nupurbaghel/Image_Captioning_CV
|
2af5abe1464006113e38a911ace62faacb9cbbd4
|
[
"MIT"
] | null | null | null |
model.py
|
nupurbaghel/Image_Captioning_CV
|
2af5abe1464006113e38a911ace62faacb9cbbd4
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torchvision.models as models
from torch.nn.utils.rnn import pack_padded_sequence
class EncoderCNN(nn.Module):
def __init__(self, embed_size):
super(EncoderCNN, self).__init__()
resnet = models.resnet50(pretrained=True)
for param in resnet.parameters():
param.requires_grad_(False)
modules = list(resnet.children())[:-1]
self.resnet = nn.Sequential(*modules)
self.embed = nn.Linear(resnet.fc.in_features, embed_size)
self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)
def forward(self, images):
features = self.resnet(images)
features = features.view(features.size(0), -1)
features = self.bn(self.embed(features))
return features
class DecoderRNN(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers=1):
super(DecoderRNN, self).__init__()
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.word_embeddings = nn.Embedding(vocab_size, embed_size)
self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True)
self.linear = nn.Linear(hidden_size, vocab_size)
def forward(self, features, captions):
captions = captions[:, :-1]
#batch_size
batch_size = features.size(0)
#hidden_state and cell state
hidden_state = torch.zeros((1, batch_size, self.hidden_size)).cuda()
cell_state = torch.zeros((1, batch_size, self.hidden_size)).cuda()
# create embedding
embeds = self.word_embeddings(captions)
embeds = torch.cat((features.unsqueeze(1), embeds), dim=1)
# embeddings new shape : (batch_size, captions length - 1, embed_size)
lstm_out, _ = self.lstm(embeds, (hidden_state, cell_state))
outputs = self.linear(lstm_out)
return outputs
def sample(self, inputs, states=None, max_len=20):
" accepts pre-processed image tensor (inputs) and returns predicted sentence (list of tensor ids of length max_len) "
sampled_ids = []
for i in range(max_len): # maximum sampling length
hiddens, states = self.lstm(inputs, states) # (batch_size, 1, hidden_size),
outputs = self.linear(hiddens.squeeze(1)) # (batch_size, vocab_size)
predicted = outputs.max(1)[1]
if predicted.item() == 1:
break
sampled_ids.append(predicted)
inputs = self.word_embeddings(predicted)
inputs = inputs.unsqueeze(1) # (batch_size, 1, embed_size)
return [pred.item() for pred in sampled_ids]
| 43
| 125
| 0.62936
| 2,620
| 0.952035
| 0
| 0
| 0
| 0
| 0
| 0
| 356
| 0.12936
|
18d56845b92528becf4631678e4c6ca21b008e41
| 965
|
py
|
Python
|
BaseTest/click_button_chrome.py
|
lloydtawanda/AzurePriceListWebScrapper
|
0d6e7a38af13cb780a7b04a8832b67a22727e3bc
|
[
"Apache-2.0"
] | 2
|
2019-07-16T13:49:35.000Z
|
2021-06-17T22:21:17.000Z
|
BaseTest/click_button_chrome.py
|
lloydtawanda/AzurePriceListWebScrapper
|
0d6e7a38af13cb780a7b04a8832b67a22727e3bc
|
[
"Apache-2.0"
] | null | null | null |
BaseTest/click_button_chrome.py
|
lloydtawanda/AzurePriceListWebScrapper
|
0d6e7a38af13cb780a7b04a8832b67a22727e3bc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 16 14:36:46 2019
@author: Tawanda
"""
import sys
import argparse
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--driver", help="path to chrome driver")
args = parser.parse_args()
if not args.driver:
print("Please enter a valid path to the chrome driver ( --driver argument )")
sys.exit(1)
browser = webdriver.Chrome(executable_path=args.driver)
browser.implicitly_wait(10)
browser.maximize_window()
try:
browser.get('https://www.oursky.com/')
button = browser.find_element_by_class_name('btn-header')
button.click()
print('=======Button Click test was successful=======')
except NoSuchElementException as ex:
print(f'Error :: No such element : {ex}')
| 28.382353
| 85
| 0.660104
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 338
| 0.350259
|
18d5b7387f5bbbe02061b184773c4b0590414bd7
| 22,854
|
py
|
Python
|
hymo/swmmreport.py
|
lucashtnguyen/hymo
|
956661401b2ac5220a83349ed15bc1d4bb7d60f4
|
[
"BSD-3-Clause"
] | 4
|
2017-12-18T17:43:54.000Z
|
2021-09-29T01:05:33.000Z
|
hymo/swmmreport.py
|
lucashtnguyen/hymo
|
956661401b2ac5220a83349ed15bc1d4bb7d60f4
|
[
"BSD-3-Clause"
] | 30
|
2017-09-26T22:23:33.000Z
|
2021-09-03T16:38:18.000Z
|
hymo/swmmreport.py
|
lucashtnguyen/hymo
|
956661401b2ac5220a83349ed15bc1d4bb7d60f4
|
[
"BSD-3-Clause"
] | 2
|
2017-10-03T01:41:16.000Z
|
2019-12-17T23:42:42.000Z
|
from .base_reader import BaseReader
import pandas as pd
class SWMMReportFile(BaseReader):
"""
A class to read a SWMM model report file.
"""
def __init__(self, path):
"""
Requires:
- path: str, the full file path to the existing SWMM model .inp.
"""
BaseReader.__init__(self, path)
# check units
self.unit = self.orig_file[self.find_line_num('Flow Units')].split('.')[-1].strip().upper()
# check swmm version
self.version = self.orig_file[self.find_line_num('VERSION')].split(' - ')[1].split(' ')[1]
self._headers = _ReportHeaders(self.unit)
# INPUTS == YES Blocks
self._element_count = None
self._raingage_summary = None
self._subcatchment_summary = None
self._node_summary = None
self._link_summary = None
self._cross_section_summary = None
# Continuity Data Blocks
self._runoff_quantity_continuity = None
self._flow_routing_continuity = None
# Results Blocks
self._subcatchment_runoff_results = None
self._node_depth_results = None
self._node_inflow_results = None
self._node_surcharge_results = None
self._node_flooding_results = None
self._storage_volume_results = None
self._outfall_loading_results = None
self._link_flow_results = None
self._flow_classification_results = None
self._conduit_surcharge_results = None
self._link_pollutant_load_results = None
self._startlines = {
#dict = {'block_name': ('rpt_header', n_comment_lines)}
'element_count': ('Element Count', 2),
'raingage_summary': ('Raingage Summary', 5),
'subcatchment_summary': ('Subcatchment Summary', 5),
'node_summary': ('Node Summary', 5),
'link_summary': ('Link Summary', 4),
'cross_section_summary': ('Cross Section Summary', 5),
'subcatchment_runoff': ('Subcatchment Runoff Summary', 8),
'node_depth': ('Node Depth Summary', 8),
'node_inflow': ('Node Inflow Summary', 9),
'node_surcharge': ('Node Surcharge Summary', 9),
'node_flooding': ('Node Flooding Summary', 10),
'storage_volume': ('Storage Volume Summary', 8),
'outfall_loading': ('Outfall Loading Summary', 8), #special conditions at end of block
'link_flow': ('Link Flow Summary', 8),
'flow_classification': ('Flow Classification Summary', 8),
'conduit_surcharge': ('Conduit Surcharge Summary', 8), #special conditions EOF
'link_pollutant_load': ('Link Pollutant Load Summary', 7)
}
@property
def element_count(self):
"""
The number of elements used in your simulation.
Created by INPUTS = YES in [REPORT] section of input file
"""
if self._element_count is None:
names, dtype = self._headers.element_count
self._element_count = self._make_df('element_count', sep='\.+', header=None, index_col=[0], dtype=str, engine='python')
self._element_count.set_index(pd.Index(names), drop=True, inplace=True) # Replace old row names w/ headers
self._element_count.rename(columns={self._element_count.columns.values[0]: 'num_elements'}, inplace=True)
# self._element_count = self._element_count.transpose()
return self._element_count
@property
def raingage_summary(self):
if self._raingage_summary is None:
names, dtype = self._headers.raingage_summary
self._raingage_summary = self._make_df('raingage_summary', sep='\s+', header=None, names=names, index_col=[0], dtype=dtype)
return self._raingage_summary
@property
def subcatchment_summary(self):
#TODO There is a bug in the SWMM Report File generator that doesn't put a space between the Area and Width
# if the Area is too large. We need to split it based on two places after the decimal point.
if self._subcatchment_summary is None:
names, dtype = self._headers.subcatchment_summary
self._subcatchment_summary = self._make_df('subcatchment_summary', sep='\s+', header=None, names=names, index_col=[0], dtype=dtype)
return self._subcatchment_summary
@property
def node_summary(self):
if self._node_summary is None:
names, dtype = self._headers.node_summary
self._node_summary = self._make_df('node_summary', sep='\s+', header=None, names=names, index_col=[0], dtype=dtype)
return self._node_summary
@property
def link_summary(self):
if self._link_summary is None:
names, dtype = self._headers.link_summary
self._link_summary = self._make_df('link_summary', sep='\s+', header=None, names=names, index_col=[0], dtype=dtype)
return self._link_summary
@property
def cross_section_summary(self):
if self._cross_section_summary is None:
names, dtype = self._headers.cross_section_summary
self._cross_section_summary = self._make_df('cross_section_summary', sep='\s+', header=None, names=names, index_col=[0], dtype=dtype)
return self._cross_section_summary
@property
def runoff_quantity_continuity(self):
if self._runoff_quantity_continuity is None:
names, dtype = self._headers.runoff_quantity_continuity
var_conversion = {'Total Precipitation': 'Total_Precipitation', 'Evaporation Loss': 'Evaporation_Loss',
'Infiltration Loss': 'Infiltration_Loss', 'Surface Runoff': 'Surface_Runoff',
'Final Storage': 'Final_Storage', 'Continuity Error (%)': 'Continuity_Error_pcnt'}
self._runoff_quantity_continuity = pd.DataFrame(columns=names)
for var in var_conversion:
line_number = self.find_line_num(var)
data = self.orig_file[line_number].split()
if var != 'Continuity Error (%)':
data = pd.Series([data[3], data[4]], index=[names[0], names[1]], name = var_conversion[var])
else:
data = pd.Series([data[4], data[4]], index=[names[0], names[1]], name = var_conversion[var])
self._runoff_quantity_continuity = self._runoff_quantity_continuity.append(data)
return self._runoff_quantity_continuity
@property
def flow_routing_continuity(self):
if self._flow_routing_continuity is None:
names, dtype = self._headers.flow_routing_continuity
var_conversion = {'Dry Weather Inflow': 'Dry_Weather_Inflow',
'Wet Weather Inflow': 'Wet_Weather_Inflow',
'Groundwater Inflow': 'Groundwater_Inflow',
'RDII Inflow': 'RDII_Inflow',
'External Inflow': 'External_Inflow',
'External Outflow': 'External_Outflow',
'Flooding Loss': 'Flooding_Loss',
'Evaporation Loss': 'Evaporation_Loss',
'Exfiltration Loss': 'Exfiltration_Loss',
'Initial Stored Volume': 'Intial_Stored_Volume',
'Final Stored Volume': 'Final_Stored_Volume',
'Continuity Error (%)': 'Continuity_Error_pcnt'
}
self._flow_routing_continuity = pd.DataFrame(columns=names)
for var in var_conversion:
line_number = self.find_line_num(var)
# There are two 'Evaporation Loss' sections: This will find the second one
if var == 'Evaporation Loss':
subdata = self.orig_file[line_number+1:]
line_number = self.find_line_num(var, lookup=subdata) + line_number
data = list(filter(lambda x: '.' in x, self.orig_file[line_number].split()))
if var != 'Continuity Error (%)':
data = pd.Series([data[1], data[2]], index=[names[0], names[1]], name=var_conversion[var])
# Write the continuity error twice since it has no units
else:
data = pd.Series([data[1], data[1]], index=[names[0], names[1]], name=var_conversion[var])
self._flow_routing_continuity = self._flow_routing_continuity.append(data)
return self._flow_routing_continuity
@property
def subcatchment_runoff_results(self):
"""
The parsed node depth results as a pandas DataFrame
"""
if self._subcatchment_runoff_results is None:
names, dtype = self._headers.subcatchment_runoff_results
self._subcatchment_runoff_results = self._make_df(
'subcatchment_runoff', sep='\s+', header=None, names=names,
index_col=[0], dtype=dtype)
return self._subcatchment_runoff_results
@property
def node_depth_results(self):
"""
The parsed node depth results as a pandas DataFrame
"""
if self._node_depth_results is None:
#TODO check names and make consistent with new properties
names, dtype = self._headers.node_depth_results
self._node_depth_results = self._make_df(
'node_depth', sep='\s+', header=None, names=names,
index_col=[0], dtype=dtype)
return self._node_depth_results
@property
def node_inflow_results(self):
"""
The parsed node inflow results as a pandas DataFrame
"""
if self._node_inflow_results is None:
names, dtype = self._headers.node_inflow_results
self._node_inflow_results = self._make_df(
'node_inflow', sep='\s+', header=None, names=names,
index_col=[0], dtype=dtype)
return self._node_inflow_results
@property
def node_surcharge_results(self):
"""
The parsed node surcharge results as a pandas DataFrame
"""
if self._node_surcharge_results is None:
#TODO check names and make consistent with new properties
names, dtype = self._headers.node_surcharge_results
self._node_surcharge_results = self._make_df(
'node_surcharge', sep='\s+', header=None, names=names,
index_col=[0], dtype=dtype)
return self._node_surcharge_results
@property
def node_flooding_results(self):
if self._node_flooding_results is None:
names, dtype = self._headers.node_flooding_results
self._node_flooding_results = self._make_df(
'node_flooding', sep='\s+', header=None, names=names,
index_col=[0], dtype=dtype)
return self._node_flooding_results
@property
def storage_volume_results(self):
if self._storage_volume_results is None:
names, dtype = self._headers.storage_volume_results
self._storage_volume_results = self._make_df(
'storage_volume', sep='\s+', header=None, names=names,
index_col=[0], dtype=dtype)
return self._storage_volume_results
@property
def outfall_loading_results(self):
if self._outfall_loading_results is None:
# special conditions at end of block
# summary stats -> parse all and drop sep '---'
start_line_str = 'Outfall Loading Summary'
blank_space = 3
n_lines = 3
names = self.infer_columns(start_line_str, blank_space, n_lines)
# "Outfall Node" needs to be joined
n = '_'.join(names[:2])
_ = names.pop(0)
names[0] = n
dtype = {'Outfall_Node': str}
df = self._make_df('outfall_loading', sep='\s+',
header=None, names=names, index_col=[0], dtype=dtype)
# drop sep
drop_from_index = [_ for _ in df.index if '-------------------' in _]
df = df.drop(drop_from_index)
self._outfall_loading_results = df
return self._outfall_loading_results
@property
def link_flow_results(self):
if self._link_flow_results is None:
names, dtype = self._headers.link_flow_results
self._link_flow_results = self._make_df(
'link_flow', sep='\s+', header=None, names=names,
index_col=[0], dtype=dtype)
return self._link_flow_results
@property
def flow_classification_results(self):
if self._flow_classification_results is None:
names, dtype = self._headers.flow_classification_results
self._flow_classification_results = self._make_df(
'flow_classification', sep='\s+', header=None, names=names,
index_col=[0], dtype=dtype)
return self._flow_classification_results
@property
def conduit_surcharge_results(self):
if self._conduit_surcharge_results is None:
# There are some EOF lines that we need to exclude.
# For now the _find_end function detects the end of
# block because of the 2xSpace+return.
names, dtype = self._headers.conduit_surcharge_results
self._conduit_surcharge_results = self._make_df(
'conduit_surcharge', sep='\s+', header=None, names=names,
index_col=[0], dtype=dtype)
return self._conduit_surcharge_results
@property
def link_pollutant_load_results(self):
if self._link_pollutant_load_results is None:
# there will be more than one pollutant
# we will need to think about a proper
# name parser.
start_line_str = 'Link Pollutant Load Summary'
blank_space = 3
n_lines = 2
dtype = {'Link': str}
names = self.infer_columns(start_line_str, blank_space, n_lines)
self._link_pollutant_load_results = self._make_df(
'link_pollutant_load', sep='\s+', header=None, names=names,
index_col=[0], dtype=dtype)
return self._link_pollutant_load_results
class _ReportHeaders(object):
"""
_ReportHeaders: What is my purpose?
Dev: You make headers
_ReportHeaders: Oh my god
"""
def __init__(self, ftype):
self.ftype = ftype.upper().strip()
if self.ftype not in ['CFS', 'LPS']:
e = 'Only "CFS" and "LPS" supported.'
raise ValueError(e)
@property
def element_count(self):
# names are the same for both CFS and LPS
names = [
'Rain_gages', 'Subcatchments',
'Nodes', 'Links',
'Pollutants', 'Land_uses'
]
dtype = {'Rain_gages': str}
return names, dtype
@property
def raingage_summary(self):
names = [ 'Name', 'Data_Source',
'Data_Type', 'Recording_Interval_time',
'Recording_Interval_units'
]
dtype = {'Name': str}
return names, dtype
@property
def subcatchment_summary(self):
names = [ 'Name', 'Area',
'Width', '%Imperv',
'%Slope', 'Rain_Gage',
'Outlet'
]
dtype = {'Name': str}
return names, dtype
@property
def node_summary(self):
names = [ 'Name', 'Type',
'Invert Elev.', 'Max. Depth',
'Ponded_Area', 'External_Inflow'
]
dtype = {'Name': str}
return names, dtype
@property
def link_summary(self):
names = [ 'Name', 'From_Node',
'To_Node', 'Type',
'Length', '%Slope',
'Roughness'
]
dtype = {'Name': str}
return names, dtype
@property
def cross_section_summary(self):
names = ['Conduit', 'Shape',
'Full_Depth', 'Full_Area',
'Hyd._Rad.', 'Max_Width',
'No_of_Barrels', 'Full_Flow'
]
dtype = {'Conduit': str}
return names, dtype
@property
def runoff_quantity_continuity(self):
if self.ftype == 'CFS':
names = ['Volume_acre_feet', 'Depth_inches']
elif self.ftype == 'LPS':
names = ['Volume_hectare_feet', 'Depth_mm']
dtype = {'Volume_acre_feet': str}
return names, dtype
@property
def flow_routing_continuity(self):
if self.ftype == 'CFS':
names = ['Volume_acre_feet', 'Depth_inches']
elif self.type == 'LPS':
names = ['Volume_hectare_feet', 'Depth_mm']
dtype = {'Volume_acre_feet': str}
return names, dtype
@property
def subcatchment_runoff_results(self):
if self.ftype == 'CFS':
names = [
'Subcatchment', 'Total_Precip_in',
'Total_Runon_in', 'Total_Evap_in',
'Total_Infil_in', 'Imperv_Runoff_in',
'Perv_Runoff_in', 'Total_Runoff_in',
'Total_Runoff_mgal', 'Peak_Runoff_CFS',
'Runoff_Coeff']
elif self.ftype == 'LPS':
names = [
'Subcatchment', 'Total_Precip_mm',
'Total_Runon_mm', 'Total_Evap_mm',
'Total_Infil_mm', 'Imperv_Runoff_mm',
'Perv_Runoff_mm', 'Total_Runoff_mm',
'Total_Runoff_mltr', 'Peak_Runoff_LPS',
'Runoff_Coeff']
dtype = {'Subcatchment': str}
return names, dtype
@property
def node_depth_results(self):
if self.ftype == 'CFS':
names = [
'Node', 'Type',
'Average_Depth_Feet', 'Maximum_Depth_Feet',
'Maximum_HGL_Feet', 'Time_of_Max_Occurrence_days',
'Time_of_Max_Occurrence_hours', 'Reported_Max_Depth_Feet'
]
elif self.ftype == 'LPS':
names = [
'Node', 'Type',
'Average_Depth_Meters', 'Maximum_Depth_Meters',
'Maximum_HGL_Meters', 'Time_of_Max_Occurrence_days',
'Time_of_Max_Occurrence_hours', 'Reported_Max_Depth_Meters'
]
dtype = {'Node': str}
return names, dtype
@property
def node_inflow_results(self):
if self.ftype == 'CFS':
names = [
'Node', 'Type',
'Maximum_Lateral_Inflow_CFS', 'Maximum_Total_Inflow_CFS',
'Time_of_Max_Occurrence_days', 'Time_of_Max_Occurrence_hours',
'Lateral_Inflow_Volume_mgals', 'Total_Inflow_Volume_mgals',
'Flow_Balance_Error_Percent', 'flag'
]
elif self.ftype == 'LPS':
names = [
'Node', 'Type',
'Maximum_Lateral_Inflow_LPS', 'Maximum_Total_Inflow_LPS',
'Time_of_Max_Occurrence_days', 'Time_of_Max_Occurrence_hours',
'Lateral_Inflow_Volume_mltr', 'Total_Inflow_Volume_mltr',
'Flow_Balance_Error_Percent', 'flag'
]
dtype = {'Node': str}
return names, dtype
@property
def node_surcharge_results(self):
if self.ftype == 'CFS':
names = [
'Node', 'Type',
'Hours_Surcharged', 'Max_Height_Above_Crown_Feet',
'Min_Depth_Below_Rim_Feet'
]
elif self.ftype == 'LPS':
names = [
'Node', 'Type',
'Hours_Surcharged', 'Max_Height_Above_Crown_Meters',
'Min_Depth_Below_Rim_Meters'
]
dtype = {'Node': str}
return names, dtype
@property
def node_flooding_results(self):
if self.ftype == 'CFS':
names = [
'Node',
'Hours_Flooded', 'Maximum_Rate_CFS',
'Time_of_Max_Occurrence_days', 'Time_of_Max_Occurrence_hours',
'Total_Flood_Volume_mgal', 'Maximum_Ponded_Depth_Feet'
]
elif self.ftype == 'LPS':
names = [
'Node',
'Hours_Flooded', 'Maximum_Rate_LPS',
'Time_of_Max_Occurrence_days', 'Time_of_Max_Occurrence_hours',
'Total_Flood_Volume_mltr', 'Maximum_Ponded_Depth_Meters'
]
dtype = {'Node': str}
return names, dtype
@property
def storage_volume_results(self):
if self.ftype == 'CFS':
names = [
'Storage_Unit', 'Average_Volume_1000_ft3',
'Avg_Pcnt_Full', 'Evap_Pcnt_Loss',
'Exfil_Pcnt_Loss', 'Maximum_Volume_1000_ft3',
'Max_Pcnt_Full', 'Time_of_Max_Occurrence_days',
'Time_of_Max_Occurrence_hours', 'Maximum_Outflow_CFS'
]
elif self.ftype == 'LPS':
names = [
'Storage_Unit', 'Average_Volume_1000_m3',
'Avg_Pcnt_Full', 'Evap_Pcnt_Loss',
'Exfil_Pcnt_Loss', 'Maximum_Volume_1000_m3',
'Max_Pcnt_Full', 'Time_of_Max_Occurrence_days',
'Time_of_Max_Occurrence_hours', 'Maximum_Outflow_LPS'
]
dtype = {'Storage_Unit': str}
return names, dtype
@property
def link_flow_results(self):
if self.ftype == 'CFS':
names = [
'Link', 'Type',
'Maximum_Flow_CFS', 'Time_of_Max_Occurrence_days',
'Time_of_Max_Occurrence_hours', 'Maximum_Veloc_ftsec',
'Max_Full_Flow', 'Max_Full_Depth'
]
elif self.ftype == 'LPS':
names = [
'Link', 'Type',
'Maximum_Flow_LPS', 'Time_of_Max_Occurrence_days',
'Time_of_Max_Occurrence_hours', 'Maximum_Veloc_msec',
'Max_Full_Flow', 'Max_Full_Depth'
]
dtype = {'Link': str}
return names, dtype
@property
def flow_classification_results(self):
names = [
'Conduit', 'Adjusted_Actual_Length',
'Fraction_of_Time_Dry', 'Fraction_of_Time_Up_Dry',
'Fraction_of_Time_Down_Dry', 'Fraction_of_Time_Sub_Crit',
'Fraction_of_Time_Sup_Crit', 'Fraction_of_Time_Up_Crit',
'Fraction_of_Time_Down_Crit', 'Fraction_of_Time_Norm_Ltd',
'Fraction_of_Time_Inlet_Ctrl',
]
dtype = {'Conduit': str}
return names, dtype
@property
def conduit_surcharge_results(self):
names = [
'Conduit', 'Hours_Full_Both_Ends',
'Hours_Full_Upstream', 'Hours_Full_Dnstream',
'Hours_Above_Full_Normal_Flow', 'Hours_Capacity_Limited',
]
dtype = {'Conduit': str}
return names, dtype
| 36.801932
| 141
| 0.586899
| 22,792
| 0.997287
| 0
| 0
| 19,559
| 0.855824
| 0
| 0
| 7,279
| 0.3185
|
18d5cbf8a3d63285ac1fed2569f0fc69a3422e0e
| 25,917
|
py
|
Python
|
tbip.py
|
n-longuetmarx/tbip
|
c6f137167aec8075c2ae98183cdf4c5e7dbc700a
|
[
"MIT"
] | null | null | null |
tbip.py
|
n-longuetmarx/tbip
|
c6f137167aec8075c2ae98183cdf4c5e7dbc700a
|
[
"MIT"
] | null | null | null |
tbip.py
|
n-longuetmarx/tbip
|
c6f137167aec8075c2ae98183cdf4c5e7dbc700a
|
[
"MIT"
] | null | null | null |
"""Learn ideal points with the text-based ideal point model (TBIP).
Let y_{dv} denote the counts of word v in document d. Let x_d refer to the
ideal point of the author of document d. Then we model:
theta, beta ~ Gamma(alpha, alpha)
x, eta ~ N(0, 1)
y_{dv} ~ Pois(sum_k theta_dk beta_kv exp(x_d * eta_kv).
We perform variational inference to provide estimates for the posterior
distribution of each latent variable. We take reparameterization gradients,
using a lognormal variational family for the positive variables (theta, beta)
and a normal variational family for the real variables (x, eta).
The directory `data/{data_name}/clean/` should have the following four files:
1. `counts.npz`: a [num_documents, num_words] sparse matrix containing the
word counts for each document.
2. `author_indices.npy`: a [num_documents] vector where each entry is an
integer in the set {0, 1, ..., num_authors - 1}, indicating the author of
the corresponding document in `counts.npz`.
3. `vocabulary.txt`: a [num_words]-length file where each line is a string
denoting the corresponding word in the vocabulary.
4. `author_map.txt`: a [num_authors]-length file where each line is a string
denoting the name of an author in the corpus.
We provide more details in our paper [1].
#### References
[1]: Keyon Vafa, Suresh Naidu, David Blei. Text-Based Ideal Points. In
_Conference of the Association for Computational Linguistics_, 2020.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import time
from absl import flags
import numpy as np
import scipy.sparse as sparse
import tensorflow as tf
import tensorflow_probability as tfp
flags.DEFINE_float("learning_rate",
default=0.01,
help="Adam learning rate.")
flags.DEFINE_integer("max_steps",
default=1000000,
help="Number of training steps to run.")
flags.DEFINE_integer("num_topics",
default=50,
help="Number of topics.")
flags.DEFINE_integer("batch_size",
default=1024,
help="Batch size.")
flags.DEFINE_integer("num_samples",
default=1,
help="Number of samples to use for ELBO approximation.")
flags.DEFINE_enum("counts_transformation",
default="nothing",
enum_values=["nothing", "binary", "sqrt", "log"],
help="Transformation used on counts data.")
flags.DEFINE_boolean("pre_initialize_parameters",
default=True,
help="Whether to use pre-initialized document and topic "
"intensities (with Poisson factorization).")
flags.DEFINE_string("data",
default="senate-speeches-114",
help="Data source being used.")
flags.DEFINE_integer("senate_session",
default=113,
help="Senate session (used only when data is "
"'senate-speech-comparisons'.")
flags.DEFINE_integer("print_steps",
default=500,
help="Number of steps to print and save results.")
flags.DEFINE_integer("seed",
default=123,
help="Random seed to be used.")
FLAGS = flags.FLAGS
def build_input_pipeline(data_dir,
batch_size,
random_state,
counts_transformation="nothing"):
"""Load data and build iterator for minibatches.
Args:
data_dir: The directory where the data is located. There must be four
files inside the rep: `counts.npz`, `author_indices.npy`,
`author_map.txt`, and `vocabulary.txt`.
batch_size: The batch size to use for training.
random_state: A NumPy `RandomState` object, used to shuffle the data.
counts_transformation: A string indicating how to transform the counts.
One of "nothing", "binary", "log", or "sqrt".
"""
counts = sparse.load_npz(os.path.join(data_dir, "counts.npz"))
num_documents, num_words = counts.shape
author_indices = np.load(
os.path.join(data_dir, "author_indices.npy")).astype(np.int32)
num_authors = np.max(author_indices + 1)
author_map = np.loadtxt(os.path.join(data_dir, "author_map.txt"),
dtype=str,
delimiter="\n",
encoding='latin-1')
# Shuffle data.
documents = random_state.permutation(num_documents)
shuffled_author_indices = author_indices[documents]
shuffled_counts = counts[documents]
# Apply counts transformation.
if counts_transformation == "nothing":
count_values = shuffled_counts.data
elif counts_transformation == "binary":
count_values = np.int32(shuffled_counts.data > 0)
elif counts_transformation == "log":
count_values = np.round(np.log(1 + shuffled_counts.data))
elif counts_transformation == "sqrt":
count_values = np.round(np.sqrt(shuffled_counts.data))
else:
raise ValueError("Unrecognized counts transformation.")
# Store counts as sparse tensor so it occupies less memory.
shuffled_counts = tf.SparseTensor(
indices=np.array(shuffled_counts.nonzero()).T,
values=count_values,
dense_shape=shuffled_counts.shape)
dataset = tf.data.Dataset.from_tensor_slices(
(documents, shuffled_counts, shuffled_author_indices))
batches = dataset.repeat().batch(batch_size).prefetch(batch_size)
iterator = batches.make_one_shot_iterator()
vocabulary = np.loadtxt(os.path.join(data_dir, "vocabulary.txt"),
dtype=str,
delimiter="\n",
comments="<!-")
total_counts_per_author = np.bincount(
author_indices,
weights=np.array(np.sum(counts, axis=1)).flatten())
counts_per_document_per_author = (
total_counts_per_author / np.bincount(author_indices))
# Author weights is how much lengthy each author's opinion over average is.
author_weights = (counts_per_document_per_author /
np.mean(np.sum(counts, axis=1))).astype(np.float32)
return (iterator, author_weights, vocabulary, author_map,
num_documents, num_words, num_authors)
def build_lognormal_variational_parameters(initial_document_loc,
initial_objective_topic_loc,
num_documents,
num_words,
num_topics):
"""
Build document and objective topic lognormal variational parameters.
Args:
initial_document_loc: A [num_documents, num_topics] NumPy array containing
the initial document intensity means.
initial_objective_topic_loc: A [num_topics, num_words] NumPy array
containing the initial objective topic means.
num_documents: Number of documents in the data set.
num_words: Number of words in the data set.
num_topics: Number of topics.
Returns:
document_loc: A Variable object with shape [num_documents, num_topics].
document_scale: A positive Variable object with shape [num_documents,
num_topics].
objective_topic_loc: A Variable object with shape [num_topics, num_words].
objective_topic_scale: A positive Variable object with shape [num_topics,
num_words].
"""
document_loc = tf.get_variable(
"document_loc",
initializer=tf.constant(np.log(initial_document_loc)))
objective_topic_loc = tf.get_variable(
"objective_topic_loc",
initializer=tf.constant(np.log(initial_objective_topic_loc)))
document_scale_logit = tf.get_variable(
"document_scale_logit",
shape=[num_documents, num_topics],
initializer=tf.initializers.random_normal(mean=0, stddev=1.),
dtype=tf.float32)
objective_topic_scale_logit = tf.get_variable(
"objective_topic_scale_logit",
shape=[num_topics, num_words],
initializer=tf.initializers.random_normal(mean=0, stddev=1.),
dtype=tf.float32)
document_scale = tf.nn.softplus(document_scale_logit)
objective_topic_scale = tf.nn.softplus(objective_topic_scale_logit)
tf.summary.histogram("params/document_loc", document_loc)
tf.summary.histogram("params/objective_topic_loc", objective_topic_loc)
tf.summary.histogram("params/document_scale", document_scale)
tf.summary.histogram("params/objective_topic_scale", objective_topic_scale)
return (document_loc, document_scale,
objective_topic_loc, objective_topic_scale)
def print_topics(neutral_mean, negative_mean, positive_mean, vocabulary):
"""Get neutral and ideological topics to be used for Tensorboard.
Args:
neutral_mean: The mean of the neutral topics, a NumPy matrix with shape
[num_topics, num_words].
negative_mean: The mean of the negative topics, a NumPy matrix with shape
[num_topics, num_words].
positive_mean: The mean of the positive topics, a NumPy matrix with shape
[num_topics, num_words].
vocabulary: A list of the vocabulary with shape [num_words].
Returns:
topic_strings: A list of the negative, neutral, and positive topics.
"""
num_topics, num_words = neutral_mean.shape
words_per_topic = 10
top_neutral_words = np.argsort(-neutral_mean, axis=1)
top_negative_words = np.argsort(-negative_mean, axis=1)
top_positive_words = np.argsort(-positive_mean, axis=1)
topic_strings = []
for topic_idx in range(num_topics):
neutral_start_string = "Neutral {}:".format(topic_idx)
neutral_row = [vocabulary[word] for word in
top_neutral_words[topic_idx, :words_per_topic]]
neutral_row_string = ", ".join(neutral_row)
neutral_string = " ".join([neutral_start_string, neutral_row_string])
positive_start_string = "Positive {}:".format(topic_idx)
positive_row = [vocabulary[word] for word in
top_positive_words[topic_idx, :words_per_topic]]
positive_row_string = ", ".join(positive_row)
positive_string = " ".join([positive_start_string, positive_row_string])
negative_start_string = "Negative {}:".format(topic_idx)
negative_row = [vocabulary[word] for word in
top_negative_words[topic_idx, :words_per_topic]]
negative_row_string = ", ".join(negative_row)
negative_string = " ".join([negative_start_string, negative_row_string])
topic_strings.append(" \n".join(
[negative_string, neutral_string, positive_string]))
return np.array(topic_strings)
def print_ideal_points(ideal_point_loc, author_map):
"""Print ideal point ordering for Tensorboard."""
return ", ".join(author_map[np.argsort(ideal_point_loc)])
def get_log_prior(samples, prior):
"""Return log prior of sampled Gaussians.
Args:
samples: A `Tensor` with shape `[num_samples, :, :]`.
prior: String representing prior distribution.
Returns:
log_prior: A `Tensor` with shape `[num_samples]`, with the log priors
summed across latent dimensions.
"""
if prior == 'normal':
prior_distribution = tfp.distributions.Normal(loc=0., scale=1.)
elif prior == 'gamma':
prior_distribution = tfp.distributions.Gamma(concentration=0.3, rate=0.3)
log_prior = tf.reduce_sum(prior_distribution.log_prob(samples),
axis=[1, 2])
return log_prior
def get_elbo(counts,
document_indices,
author_indices,
author_weights,
document_distribution,
objective_topic_distribution,
ideological_topic_distribution,
ideal_point_distribution,
num_documents,
batch_size,
num_samples=1):
"""Approximate variational Lognormal ELBO using reparameterization.
Args:
counts: A matrix with shape `[batch_size, num_words]`.
document_indices: An int-vector with shape `[batch_size]`.
author_indices: An int-vector with shape `[batch_size]`.
author_weights: A vector with shape `[num_authors]`, constituting how
lengthy the opinion is above average.
document_distribution: A positive `Distribution` object with parameter
shape `[num_documents, num_topics]`.
objective_topic_distribution: A positive `Distribution` object with
parameter shape `[num_topics, num_words]`.
ideological_topic_distribution: A positive `Distribution` object with
parameter shape `[num_topics, num_words]`.
ideal_point_distribution: A `Distribution` object over [0, 1] with
parameter_shape `[num_authors]`.
num_documents: The number of documents in the total data set (used to
calculate log-likelihood scale).
batch_size: Batch size (used to calculate log-likelihood scale).
num_samples: Number of Monte-Carlo samples.
Returns:
elbo: A scalar representing a Monte-Carlo sample of the ELBO. This value is
averaged across samples and summed across batches.
"""
document_samples = document_distribution.sample(num_samples)
objective_topic_samples = objective_topic_distribution.sample(num_samples)
ideological_topic_samples = ideological_topic_distribution.sample(
num_samples)
ideal_point_samples = ideal_point_distribution.sample(num_samples)
_, num_topics, _ = objective_topic_samples.get_shape().as_list()
ideal_point_log_prior = tfp.distributions.Normal(
loc=0.,
scale=1.)
ideal_point_log_prior = tf.reduce_sum(
ideal_point_log_prior.log_prob(ideal_point_samples), axis=[1,2])
document_log_prior = get_log_prior(document_samples, 'gamma')
objective_topic_log_prior = get_log_prior(objective_topic_samples, 'gamma')
ideological_topic_log_prior = get_log_prior(ideological_topic_samples,
'normal')
log_prior = (document_log_prior +
objective_topic_log_prior +
ideological_topic_log_prior +
ideal_point_log_prior)
selected_document_samples = tf.gather(document_samples,
document_indices,
axis=1)
selected_ideal_points = tf.gather(ideal_point_samples,
author_indices,
axis=1)
selected_ideological_topic_samples = tf.exp(
# replace by a column
selected_ideal_points[:, :, :, tf.newaxis] *
ideological_topic_samples[:, tf.newaxis, :, :])
# Normalize by how lengthy the author's opinion is.
selected_author_weights = tf.gather(author_weights, author_indices)
selected_ideological_topic_samples = (
selected_author_weights[tf.newaxis, :, tf.newaxis, tf.newaxis] *
selected_ideological_topic_samples)
document_entropy = -tf.reduce_sum(
document_distribution.log_prob(document_samples),
axis=[1, 2])
objective_topic_entropy = -tf.reduce_sum(
objective_topic_distribution.log_prob(objective_topic_samples),
axis=[1, 2])
ideological_topic_entropy = -tf.reduce_sum(
ideological_topic_distribution.log_prob(ideological_topic_samples),
axis=[1, 2])
ideal_point_entropy = -tf.reduce_sum(
ideal_point_distribution.log_prob(ideal_point_samples),
axis=1)
entropy = (document_entropy +
objective_topic_entropy +
ideological_topic_entropy +
ideal_point_entropy)
rate = tf.reduce_sum(
selected_document_samples[:, :, :, tf.newaxis] *
objective_topic_samples[:, tf.newaxis, :, :] *
selected_ideological_topic_samples[:, :, :, :],
axis=2)
count_distribution = tfp.distributions.Poisson(rate=rate)
# Need to un-sparsify the counts to evaluate log-likelihood.
count_log_likelihood = count_distribution.log_prob(
tf.sparse.to_dense(counts))
count_log_likelihood = tf.reduce_sum(count_log_likelihood, axis=[1, 2])
# Adjust for the fact that we're only using a minibatch.
count_log_likelihood = count_log_likelihood * (num_documents / batch_size)
elbo = log_prior + count_log_likelihood + entropy
elbo = tf.reduce_mean(elbo)
tf.summary.scalar("elbo/elbo", elbo)
tf.summary.scalar("elbo/log_prior", tf.reduce_mean(log_prior))
tf.summary.scalar("elbo/count_log_likelihood",
tf.reduce_mean(count_log_likelihood))
tf.summary.scalar("elbo/entropy", tf.reduce_mean(entropy))
return elbo
def main(argv):
del argv
tf.set_random_seed(FLAGS.seed)
random_state = np.random.RandomState(FLAGS.seed)
project_dir = os.path.abspath(os.path.dirname(__file__))
source_dir = os.path.join(project_dir, "data/{}".format(FLAGS.data))
# For model comparisons, we must also specify a Senate session.
if FLAGS.data == "senate-speech-comparisons":
source_dir = os.path.join(
source_dir, "tbip/{}".format(FLAGS.senate_session))
# As described in the docstring, the data directory must have the following
# files: counts.npz, author_indices.npy, vocabulary.txt, author_map.txt.
data_dir = os.path.join(source_dir, "clean")
save_dir = os.path.join(source_dir, "tbip-fits")
if tf.gfile.Exists(save_dir):
tf.logging.warn("Deleting old log directory at {}".format(save_dir))
tf.gfile.DeleteRecursively(save_dir)
tf.gfile.MakeDirs(save_dir)
(iterator, author_weights, vocabulary, author_map,
num_documents, num_words, num_authors) = build_input_pipeline(
data_dir,
FLAGS.batch_size,
random_state,
FLAGS.counts_transformation)
document_indices, counts, author_indices = iterator.get_next()
if FLAGS.pre_initialize_parameters:
fit_dir = os.path.join(source_dir, "pf-fits")
fitted_document_shape = np.load(
os.path.join(fit_dir, "document_shape.npy")).astype(np.float32)
fitted_document_rate = np.load(
os.path.join(fit_dir, "document_rate.npy")).astype(np.float32)
fitted_topic_shape = np.load(
os.path.join(fit_dir, "topic_shape.npy")).astype(np.float32)
fitted_topic_rate = np.load(
os.path.join(fit_dir, "topic_rate.npy")).astype(np.float32)
initial_document_loc = fitted_document_shape / fitted_document_rate
initial_objective_topic_loc = fitted_topic_shape / fitted_topic_rate
else:
initial_document_loc = np.float32(
np.exp(random_state.randn(num_documents, FLAGS.num_topics)))
initial_objective_topic_loc = np.float32(
np.exp(random_state.randn(FLAGS.num_topics, num_words)))
# Initialize lognormal variational parameters.
(document_loc, document_scale, objective_topic_loc,
objective_topic_scale) = build_lognormal_variational_parameters(
initial_document_loc,
initial_objective_topic_loc,
num_documents,
num_words,
FLAGS.num_topics)
document_distribution = tfp.distributions.LogNormal(
loc=document_loc,
scale=document_scale)
objective_topic_distribution = tfp.distributions.LogNormal(
loc=objective_topic_loc,
scale=objective_topic_scale)
ideological_topic_loc = tf.get_variable(
"ideological_topic_loc",
shape=[FLAGS.num_topics, num_words],
dtype=tf.float32)
ideological_topic_scale_logit = tf.get_variable(
"ideological_topic_scale_logit",
shape=[FLAGS.num_topics, num_words],
dtype=tf.float32)
ideological_topic_scale = tf.nn.softplus(ideological_topic_scale_logit)
tf.summary.histogram("params/ideological_topic_loc", ideological_topic_loc)
tf.summary.histogram("params/ideological_topic_scale",
ideological_topic_scale)
ideological_topic_distribution = tfp.distributions.Normal(
loc=ideological_topic_loc,
scale=ideological_topic_scale)
ideal_point_loc = tf.get_variable(
"ideal_point_loc",
shape=[num_authors],
dtype=tf.float32)
ideal_point_scale_logit = tf.get_variable(
"ideal_point_scale_logit",
initializer=tf.initializers.random_normal(mean=0, stddev=1.),
shape=[num_authors],
dtype=tf.float32)
ideal_point_scale = tf.nn.softplus(ideal_point_scale_logit)
ideal_point_distribution = tfp.distributions.Normal(
loc=ideal_point_loc,
scale=ideal_point_scale)
tf.summary.histogram("params/ideal_point_loc",
tf.reshape(ideal_point_loc, [-1]))
tf.summary.histogram("params/ideal_point_scale",
tf.reshape(ideal_point_scale, [-1]))
elbo = get_elbo(counts,
document_indices,
author_indices,
author_weights,
document_distribution,
objective_topic_distribution,
ideological_topic_distribution,
ideal_point_distribution,
num_documents,
FLAGS.batch_size,
num_samples=FLAGS.num_samples)
loss = -elbo
tf.summary.scalar("loss", loss)
optim = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
train_op = optim.minimize(loss)
"""
For each (k,v), we want to evaluate E[beta_kv], E[beta_kv * exp(eta_kv)],
and E[beta_kv * exp(-eta_kv)], where the expectations are with respect to the
variational distributions. Like the paper, beta refers to the obective topic
and eta refers to the ideological topic.
Dropping the indices and denoting by mu_b the objective topic location and
sigma_b the objective topic scale, we have E[beta] = exp(mu + sigma_b^2 / 2),
using the mean of a lognormal distribution.
Denoting by mu_e the ideological topic location and sigma_e the ideological
topic scale, we have E[beta * exp(eta)] = E[beta]E[exp(eta)] by the
mean-field assumption. exp(eta) is lognormal distributed, so E[exp(eta)] =
exp(mu_e + sigma_e^2 / 2). Thus, E[beta * exp(eta)] =
exp(mu_b + mu_e + (sigma_b^2 + sigma_e^2) / 2).
Finally, E[beta * exp(-eta)] =
exp(mu_b - mu_e + (sigma_b^2 + sigma_e^2) / 2).
Because we only care about the orderings of topics, we can drop the exponents
from the means.
"""
neutral_mean = objective_topic_loc + objective_topic_scale ** 2 / 2
positive_mean = (objective_topic_loc +
ideological_topic_loc +
(objective_topic_scale ** 2 +
ideological_topic_scale ** 2) / 2)
negative_mean = (objective_topic_loc -
ideological_topic_loc +
(objective_topic_scale ** 2 +
ideological_topic_scale ** 2) / 2)
positive_mean_at_two = (objective_topic_loc +
2*ideological_topic_loc +
(objective_topic_scale ** 2 +
2*ideological_topic_scale ** 2) / 2)
negative_mean_at_two = (objective_topic_loc -
2*ideological_topic_loc +
(objective_topic_scale ** 2 +
2*ideological_topic_scale ** 2) / 2)
topics = tf.py_func(
functools.partial(print_topics, vocabulary=vocabulary),
[neutral_mean, negative_mean, positive_mean],
tf.string,
stateful=False)
ideal_point_list = tf.py_func(
functools.partial(print_ideal_points, author_map=author_map),
[ideal_point_loc],
tf.string, stateful=False)
tf.summary.text("topics", topics)
tf.summary.text("ideal_points", ideal_point_list)
summary = tf.summary.merge_all()
init = tf.global_variables_initializer()
with tf.Session() as sess:
summary_writer = tf.summary.FileWriter(save_dir, sess.graph)
sess.run(init)
start_time = time.time()
for step in range(FLAGS.max_steps):
(_, elbo_val) = sess.run([train_op, elbo])
duration = (time.time() - start_time) / (step + 1)
if step % FLAGS.print_steps == 0:
print("Step: {:>3d} ELBO: {:.3f} ({:.3f} sec)".format(
step, elbo_val, duration))
summary_str = sess.run(summary)
summary_writer.add_summary(summary_str, step)
summary_writer.flush()
if step % 1000 == 0 or step == FLAGS.max_steps - 1:
param_save_dir = os.path.join(save_dir, "params/")
if not tf.gfile.Exists(param_save_dir):
tf.gfile.MakeDirs(param_save_dir)
(ideological_topic_loc_val, ideological_topic_scale_val,
ideal_point_loc_val, ideal_point_scale_val) = sess.run([
ideological_topic_loc, ideological_topic_scale,
ideal_point_loc, ideal_point_scale])
(document_loc_val, document_scale_val, objective_topic_loc_val,
objective_topic_scale_val, ideological_topic_loc_val,
ideological_topic_scale_val, ideal_point_loc_val,
ideal_point_scale_val) = sess.run([
document_loc, document_scale, objective_topic_loc,
objective_topic_scale, ideological_topic_loc,
ideological_topic_scale, ideal_point_loc, ideal_point_scale])
np.save(os.path.join(param_save_dir, "document_loc"),
document_loc_val)
np.save(os.path.join(param_save_dir, "document_scale"),
document_scale_val)
np.save(os.path.join(param_save_dir, "objective_topic_loc"),
objective_topic_loc_val)
np.save(os.path.join(param_save_dir, "objective_topic_scale"),
objective_topic_scale_val)
np.save(os.path.join(param_save_dir, "ideological_topic_loc"),
ideological_topic_loc_val)
np.save(os.path.join(param_save_dir, "ideological_topic_scale"),
ideological_topic_scale_val)
np.save(os.path.join(param_save_dir, "ideal_point_loc"),
ideal_point_loc_val)
np.save(os.path.join(param_save_dir, "ideal_point_scale"),
ideal_point_scale_val)
if __name__ == "__main__":
tf.app.run()
| 42.141463
| 80
| 0.680904
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 8,396
| 0.323957
|
18d6578d8c4bdcf3e1695a1c9ddbac250283e282
| 6,138
|
py
|
Python
|
calc/gui.py
|
tatarskiy-welder/tax_calc
|
827ec6e174ffc9cfc13e24427307a8a6b85123e0
|
[
"MIT"
] | null | null | null |
calc/gui.py
|
tatarskiy-welder/tax_calc
|
827ec6e174ffc9cfc13e24427307a8a6b85123e0
|
[
"MIT"
] | null | null | null |
calc/gui.py
|
tatarskiy-welder/tax_calc
|
827ec6e174ffc9cfc13e24427307a8a6b85123e0
|
[
"MIT"
] | null | null | null |
from tkinter import *
from tax_profiler import TaxProfile
from tkinter import messagebox as mb
class Example(Frame, TaxProfile):
def __init__(self, parent):
TaxProfile.__init__(self)
Frame.__init__(self, parent, background="lightblue")
parent.minsize(width=500, height=200)
parent.maxsize(width=500, height=200)
self.parent = parent
self.initUI()
def get_those_numbers(self, event):
try:
self.set_revenue_last(int(self.entry1.get()))
self.set_usn_paid(int(self.entry2.get()))
self.set_oms_paid(int(self.entry3.get()))
self.set_pfr_paid(int(self.entry4.get()))
except ValueError:
mb.showerror("Error", "Введите все данные числами")
return
self.top.destroy()
def kvartal_windows(self):
try:
self.kvartal = int(self.entry_kvartal.get())
except ValueError:
mb.showerror("Error", "Введите квартал числом (1-4)")
if self.kvartal < 1 or self.kvartal > 4:
mb.showerror("Error", "Введите квартал числом (1-4)")
return
self.top_start.destroy()
if self.kvartal == 1:
return
self.top = Toplevel(width=650, height=250)
self.top.minsize(200, 400)
self.top.title("Начало работы")
label1 = Message(
self.top, text="Данные за предыдущие кварталы", bg="lightblue", bd=5,
relief="groove", font=("Helvetica", 12))
label1.pack()
label2 = Message(self.top, text="Введите доход:", bg="lightblue", bd=5,
relief="groove", font=("Helvetica", 12))
label2.pack()
self.entry1 = Entry(self.top)
self.entry1.pack()
label3 = Message(self.top, text="Введите УСН:", bg="lightblue", bd=5,
relief="groove", font=("Helvetica", 11))
label3.pack()
self.entry2 = Entry(self.top)
self.entry2.pack()
label4 = Message(self.top, text="Введите ПФР:", bg="lightblue", bd=5,
relief="groove", font=("Helvetica", 11))
label4.pack()
self.entry3 = Entry(self.top)
self.entry3.pack()
label5 = Message(self.top, text="Введите ФФОМС:", bg="lightblue", bd=5,
relief="groove", font=("Helvetica", 11))
label5.pack()
self.entry4 = Entry(self.top)
self.entry4.pack()
button = Button(self.top, text="Далее")
button.pack()
button.bind("<Button-1>", self.get_those_numbers)
def start_window(self):
self.top_start = Toplevel()
self.top_start.title("Начало работы")
self.top_start.minsize(150, 100)
self.top_start.maxsize(150, 100)
msg = Message(self.top_start, text="Введите текущий квартал")
msg.pack()
self.entry_kvartal = Entry(self.top_start)
self.entry_kvartal.pack()
button = Button(
self.top_start, text="Далее",
command=self.kvartal_windows)
button.pack()
def output(self, event):
default = "0"
self.entry_fond["text"] = default
self.entry_pfr["text"] = default
self.entry_usn["text"] = default
try:
self.set_revenue(int(self.entry_dohod.get()))
if int(self.entry_dohod.get()) <= 0:
mb.showerror("Error", "Введите число в графу доход")
else:
self.entry_fond["text"] = self.get_oms()
self.entry_pfr["text"] = self.get_pfr()
self.entry_usn["text"] = self.get_usn()
except ValueError:
mb.showerror("Error", "Введите число в графу доход")
def initUI(self):
self.parent.title("Калькулятор налогов")
self.pack(fill=BOTH, expand=True)
self.columnconfigure(4, weight=2)
dohod = Label(self, text="Доход:", bg="lightblue", bd=5,
relief="groove", font=("Helvetica", 12))
dohod.grid(sticky=W, pady=4, padx=10, column=0, row=1)
nalog = Label(self, text="Налоги:", bg="lightblue", bd=5,
relief="groove", font=("Helvetica", 12))
nalog.grid(sticky=W, pady=10, padx=10, column=2, row=0)
usn = Label(self, text="УСН:", bg="lightblue", bd=5,
relief="groove", font=("Helvetica", 12))
usn.grid(sticky=W, pady=4, padx=10, column=2, row=1)
pfr = Label(self, text="ПФР:", bg="lightblue", bd=5,
relief="groove", font=("Helvetica", 12))
pfr.grid(sticky=W, pady=4, padx=10, column=2, row=2)
fond = Label(self, text="ФФОМС:", bg="lightblue", bd=5,
relief="groove", font=("Helvetica", 12))
fond.grid(sticky=W + N, pady=4, padx=10, column=2, row=3)
self.entry_dohod = Entry(self)
self.entry_dohod.grid(sticky=W, pady=4, padx=5, column=1, row=1)
self.entry_usn = Label(self, text=self.get_usn(), bg="white", width=15)
self.entry_usn.grid(sticky=W + N, pady=4, padx=5, column=3, row=1)
self.entry_pfr = Label(self, text=self.get_pfr(), width=15, bg="white")
self.entry_pfr.grid(sticky=W + N, pady=4, padx=5, column=3, row=2)
self.entry_fond = Label(
self, text=self.get_oms(), width=15, bg="white")
self.entry_fond.grid(sticky=W + N, pady=4, padx=5, column=3, row=3)
ras = Button(self, text="Рассчитать", width=30)
ras.grid(row=3, column=0, columnspan=2, sticky=W + S + E + N, padx=10)
self.start_window()
ras.bind("<Button-1>", self.output)
self.centerWindow()
def centerWindow(self):
w = 650
h = 250
sw = self.parent.winfo_screenwidth()
sh = self.parent.winfo_screenheight()
x = (sw - w) / 2
y = (sh - h) / 2
self.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))
def main():
root = Tk()
root.iconbitmap(r'py.ico')
app = Example(root)
root.resizable(width=False, height=False)
root.mainloop()
if __name__ == '__main__':
main()
| 33.540984
| 81
| 0.564679
| 6,133
| 0.954998
| 0
| 0
| 0
| 0
| 0
| 0
| 1,122
| 0.174712
|
18d67d5d9fabdd711ac5fef81a528edb66bc9e9b
| 136
|
py
|
Python
|
lms_python/lms_app/admin.py
|
gabrielmdsantos/LMSBD
|
dff3001a560f8cccb938957bf2d5732d4ae3d163
|
[
"Apache-2.0"
] | null | null | null |
lms_python/lms_app/admin.py
|
gabrielmdsantos/LMSBD
|
dff3001a560f8cccb938957bf2d5732d4ae3d163
|
[
"Apache-2.0"
] | null | null | null |
lms_python/lms_app/admin.py
|
gabrielmdsantos/LMSBD
|
dff3001a560f8cccb938957bf2d5732d4ae3d163
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from lms_app.models import Professor
admin.site.register(Professor)
# Register your models here.
| 22.666667
| 37
| 0.794118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 29
| 0.213235
|
18d7a6360616dabd7740bc58273af43b8634ecfa
| 5,573
|
py
|
Python
|
pymedextcore/normalize.py
|
equipe22/pymedext_core
|
578e32fdc015c9b75f566d9e58a8fade889879e7
|
[
"Apache-2.0"
] | 1
|
2021-02-04T10:33:00.000Z
|
2021-02-04T10:33:00.000Z
|
pymedextcore/normalize.py
|
equipe22/pymedext_core
|
578e32fdc015c9b75f566d9e58a8fade889879e7
|
[
"Apache-2.0"
] | 4
|
2020-12-17T09:16:24.000Z
|
2021-03-26T10:40:30.000Z
|
pymedextcore/normalize.py
|
equipe22/pymedext_core
|
578e32fdc015c9b75f566d9e58a8fade889879e7
|
[
"Apache-2.0"
] | 1
|
2020-12-17T12:32:50.000Z
|
2020-12-17T12:32:50.000Z
|
#!/usr/bin/env python3
from .document import Document
from intervaltree import Interval,IntervalTree
# from .annotationGraph import AnnotationGraph
import logging
logger = logging.getLogger(__name__)
class normalize:
def __setSentencesAndRawText(Document,rootNode):
"""Build an intervalTree of Annotations from a Document
:param Document: a Document
:param rootNode: type to filter Document
:returns: tree,sentencepose,raw_textpos,annotGraph
:rtype: intervalTree,dict,dict,dict
"""
__raw_textpos=dict()
__sentencepos=dict()
__tree=IntervalTree()
annotsGraph=dict()
for thisAnnotation in Document.annotations:
thisSpan =str(thisAnnotation.span[0])+"_"+str(thisAnnotation.span[1])
if thisAnnotation.type =="raw_text" and "id" not in __raw_textpos.keys():
__raw_textpos={"source_ID":thisAnnotation.source_ID,"id":thisAnnotation.ID,"type":thisAnnotation.type}
logger.debug(__raw_textpos)
if thisAnnotation.type == rootNode:
if thisSpan not in __sentencepos.keys():
thisAnnotation.source_ID=__raw_textpos["id"]
__tree[thisAnnotation.span[0]:thisAnnotation.span[1]]={
"annotation":[{"type":thisAnnotation.type,"value":thisAnnotation}]}
__sentencepos[thisSpan]=thisAnnotation.ID
annotsGraph[thisSpan]=[thisAnnotation]
return(__tree,__sentencepos,__raw_textpos,annotsGraph)
#filtrer les fonctions en fonction du syntagmes
#
def __buildTree(Document,__tree, __sentencepos, __raw_textpos, annotsGraph, otherSegments, rootNode):
"""Build tree from Document
:param Document:
:param __tree:
:param __sentencepos:
:param __raw_textpos:
:param annotsGraph:
:param otherSegments:
:param rootNode:
:returns:
:rtype:
"""
for thisAnnotation in Document.annotations:
start = thisAnnotation.span[0]
end = thisAnnotation.span[1]
thisSpan=str(start)+"_"+str(end)
if thisAnnotation.type in otherSegments:
thisAnnotation.source_ID=__sentencepos[thisSpan]
findSentence=__tree[start+1:end-1]
__tree[start:end]={"annotation":[{"type":thisAnnotation.type,"value":thisAnnotation}]}
if thisAnnotation.type not in otherSegments and thisAnnotation.type not in [rootNode,"raw_text"] :
thisAnnotation.source_ID=__raw_textpos["id"]
__tree[start:end]={"annotation":[{"type":thisAnnotation.type,"value":thisAnnotation}]}
return(Document, __tree, __sentencepos)
#filterEntities stay until i resolve the entity declaration issue
def __buildGraph(Document, __tree, __sentencepos, thisGraph,filterEntities):
"""Build Graph from intervaltree and Doc
:param Document:
:param __tree:
:param __sentencepos:
:param thisGraph:
:param filterEntities:
:returns:
:rtype:
"""
lenentities=[]
grousentences=[]
typeliste=[]
if len(__sentencepos.keys()) >0:
for thisAnnotation in __sentencepos.keys():
thisSpan = thisAnnotation.split("_")
start = int(thisSpan[0])
end = int(thisSpan[1])
thisMatch=__tree.overlap(start,end)
entities=[]
for interval in thisMatch:
for annot in interval.data["annotation"]:
# print(annot["value"].to_dict())
annot["value"].set_root(Document.annotations[0])
if annot["value"].span[0] == start and annot["value"].span[1] == end:
# print("add properties")
thisGraph[thisAnnotation][0].add_property(annot["value"])
elif annot["value"].isEntity == True and annot["value"].span[0] > start and annot["value"].span[1] < end:
thisGraph[thisAnnotation][0].add_child(annot["value"])
# lenentities.append(len(entities))
Document.annotations[0].add_child(thisGraph[thisAnnotation][0])
else:
for interval in __tree:
for annot in interval.data["annotation"]:
# print(annot["value"].to_dict())
annot.set_root(Document.annotations[0])
Document.annotations[0].add_child(annot)
return(Document)
@staticmethod
def uri(Document,otherSegments=["drwh_family","hypothesis"],rootNode="drwh_sentences", filterEntities=['drugs_fast', 'cui']):
"""uri Normalization
:param Document:
:param otherSegments:
:param "hypothesis"]:
:param rootNode:
:param filterEntities:
:param 'cui']:
:returns:
:rtype:
"""
# __raw_textpos=dict()
# normalize.__sentencepos=dict()
# normalize.__tree=IntervalTree()
__tree, __sentencepos, __raw_textpos, thisGraph=normalize.__setSentencesAndRawText(Document,rootNode)
Document, __tree, __sentencepos = normalize.__buildTree(Document,__tree, __sentencepos, __raw_textpos,thisGraph, otherSegments, rootNode)
Document = normalize.__buildGraph(Document, __tree, __sentencepos, thisGraph,filterEntities)
return(Document,__tree, __sentencepos)
| 42.869231
| 145
| 0.61134
| 5,369
| 0.963395
| 0
| 0
| 902
| 0.161852
| 0
| 0
| 1,647
| 0.295532
|
18d8e4a9db3824bc1bf6d57f22782a4ffcc36549
| 93
|
py
|
Python
|
phr/dnireniec/apps.py
|
richardqa/django-ex
|
e5b8585f28a97477150ac5daf5e55c74b70d87da
|
[
"CC0-1.0"
] | null | null | null |
phr/dnireniec/apps.py
|
richardqa/django-ex
|
e5b8585f28a97477150ac5daf5e55c74b70d87da
|
[
"CC0-1.0"
] | null | null | null |
phr/dnireniec/apps.py
|
richardqa/django-ex
|
e5b8585f28a97477150ac5daf5e55c74b70d87da
|
[
"CC0-1.0"
] | null | null | null |
from django.apps import AppConfig
class DnireniecConfig(AppConfig):
name = 'dnireniec'
| 15.5
| 33
| 0.763441
| 56
| 0.602151
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 0.11828
|
18d91850121d98d86b712bda14df3f044488a26e
| 479
|
py
|
Python
|
Exercício feitos pela primeira vez/ex004colorido.py
|
Claayton/pythonExerciciosLinux
|
696cdb16983638418bd0d0d4fe44dc72662b9c97
|
[
"MIT"
] | 1
|
2021-01-23T15:43:34.000Z
|
2021-01-23T15:43:34.000Z
|
Exercício feitos pela primeira vez/ex004colorido.py
|
Claayton/pythonExerciciosLinux
|
696cdb16983638418bd0d0d4fe44dc72662b9c97
|
[
"MIT"
] | null | null | null |
Exercício feitos pela primeira vez/ex004colorido.py
|
Claayton/pythonExerciciosLinux
|
696cdb16983638418bd0d0d4fe44dc72662b9c97
|
[
"MIT"
] | null | null | null |
#Ex004b
algo = (input('\033[34m''Digite algo: ''\033[m'))
print('São letras ou palavras?: \033[33m{}\033[m'.format(algo.isalpha()))
print('Está em maiúsculo?: \033[34m{}\033[m'.format(algo.isupper()))
print('Está em minúsculo?: \033[35m{}\033[m'.format(algo.islower()))
print('Está captalizada?: \033[36m{}\033[m'.format(algo.istitle()))
print('Só tem espaço?: \033[31m{}\033[m'.format(algo.isspace()))
print('É numérico?: \033[32m{}\033[m'.format(algo.isnumeric()))
print('xD')
| 47.9
| 73
| 0.668058
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 275
| 0.562372
|
18da93de7ae1c7f1f8c72d039c0ee8611ca41811
| 1,444
|
py
|
Python
|
utilities_common/util_base.py
|
pettershao-ragilenetworks/sonic-utilities
|
553936b61a677b95a45a797c0e3ccdaf015cce94
|
[
"Apache-2.0"
] | null | null | null |
utilities_common/util_base.py
|
pettershao-ragilenetworks/sonic-utilities
|
553936b61a677b95a45a797c0e3ccdaf015cce94
|
[
"Apache-2.0"
] | null | null | null |
utilities_common/util_base.py
|
pettershao-ragilenetworks/sonic-utilities
|
553936b61a677b95a45a797c0e3ccdaf015cce94
|
[
"Apache-2.0"
] | null | null | null |
import os
import sonic_platform
# Constants ====================================================================
PDDF_SUPPORT_FILE = '/usr/share/sonic/platform/pddf_support'
# Helper classs
class UtilHelper(object):
def __init__(self):
pass
# try get information from platform API and return a default value if caught NotImplementedError
def try_get(self, callback, default=None):
"""
Handy function to invoke the callback and catch NotImplementedError
:param callback: Callback to be invoked
:param default: Default return value if exception occur
:return: Default return value if exception occur else return value of the callback
"""
try:
ret = callback()
if ret is None:
ret = default
except NotImplementedError:
ret = default
return ret
# Instantiate platform-specific Chassis class
def load_platform_chassis(self):
chassis = None
# Load 2.0 platform API chassis class
try:
chassis = sonic_platform.platform.Platform().get_chassis()
except Exception as e:
raise Exception("Failed to load chassis due to {}".format(repr(e)))
return chassis
# Check for PDDF mode enabled
def check_pddf_mode(self):
if os.path.exists(PDDF_SUPPORT_FILE):
return True
else:
return False
| 28.88
| 100
| 0.606648
| 1,248
| 0.864266
| 0
| 0
| 0
| 0
| 0
| 0
| 670
| 0.463989
|
18dbd268ee84904b28a7b1eab62ddc99c40934ff
| 2,900
|
py
|
Python
|
consensus_engine/tests/test_view_create_proposal.py
|
jonsaunders-git/consensus_engine
|
6fc2b3df7b342d4dff919969329c8b586e33a9d3
|
[
"MIT"
] | null | null | null |
consensus_engine/tests/test_view_create_proposal.py
|
jonsaunders-git/consensus_engine
|
6fc2b3df7b342d4dff919969329c8b586e33a9d3
|
[
"MIT"
] | 4
|
2021-06-05T00:03:14.000Z
|
2021-09-22T19:41:03.000Z
|
consensus_engine/tests/test_view_create_proposal.py
|
jonsaunders-git/consensus_engine
|
6fc2b3df7b342d4dff919969329c8b586e33a9d3
|
[
"MIT"
] | null | null | null |
from django.test import TestCase, RequestFactory
from .mixins import TwoUserMixin, ProposalGroupMixin, ViewMixin
from django.utils import timezone
from consensus_engine.views import CreateProposalView
from consensus_engine.forms import ProposalForm
from consensus_engine.models import Proposal
from django.core.exceptions import PermissionDenied
class CreateProposalViewTest(TwoUserMixin, TestCase,
ProposalGroupMixin, ViewMixin):
path = '/proposals/new/'
form = ProposalForm
view = CreateProposalView
def setUp(self):
self.factory = RequestFactory()
TwoUserMixin.setUp(self)
def test_create_proposal(self):
dt = timezone.now()
self.assertTrue(Proposal.objects.filter(
proposal_name='test proposal').count() == 0)
self.getValidView({'proposal_name': 'test proposal',
'proposal_description': 'test description'}, postargs={'options': '0'})
q = Proposal.objects.filter(proposal_name='test proposal')
self.assertTrue(q.count() == 1)
p = q.first()
self.assertTrue(p.proposal_description == 'test description')
self.assertTrue(p.date_proposed <= timezone.now()
and p.date_proposed >= dt)
self.assertTrue(p.owned_by == self.user)
self.assertTrue(p.proposal_group is None)
def test_create_proposal_within_group(self):
pg = self.create_proposal_group()
dt = timezone.now()
self.assertTrue(Proposal.objects.filter(
proposal_name='test proposal').count() == 0)
self.getValidView(data={'proposal_name': 'test proposal',
'proposal_description': 'test description'},
viewkwargs={'proposal_group_id': pg.id}, postargs={'options': '0'})
q = Proposal.objects.filter(proposal_name='test proposal')
self.assertTrue(q.count() == 1)
p = q.first()
self.assertTrue(p.proposal_description == 'test description')
self.assertTrue(p.date_proposed <= timezone.now()
and p.date_proposed >= dt)
self.assertTrue(p.owned_by == self.user)
self.assertTrue(p.proposal_group == pg)
def test_create_proposal_within_group_not_member(self):
pg = self.create_proposal_group(owned_by=self.user2)
self.assertTrue(Proposal.objects.filter(
proposal_name='test proposal').count() == 0)
with self.assertRaises(PermissionDenied,
msg="Adding a Proposal to a group you are not a member of is not allowed"):
self.getValidView(data={'proposal_name': 'test proposal',
'proposal_description': 'test description'},
viewkwargs={'proposal_group_id': pg.id}, postargs={'options': '0'})
| 46.774194
| 106
| 0.632759
| 2,550
| 0.87931
| 0
| 0
| 0
| 0
| 0
| 0
| 481
| 0.165862
|
18dc89f687d6010723363d00fb4079f119453e21
| 290
|
py
|
Python
|
tests/jdi_uitests_webtests/main/page_objects/w3c_site/w3c_site.py
|
jdi-testing/jdi-python
|
7c0607b97d4d44b27ea8f532d47c68b8dd00e6f7
|
[
"MIT"
] | 5
|
2020-02-14T10:32:01.000Z
|
2021-07-22T08:20:28.000Z
|
tests/jdi_uitests_webtests/main/page_objects/w3c_site/w3c_site.py
|
jdi-testing/jdi-python
|
7c0607b97d4d44b27ea8f532d47c68b8dd00e6f7
|
[
"MIT"
] | 54
|
2018-07-27T14:07:33.000Z
|
2021-11-08T09:24:16.000Z
|
tests/jdi_uitests_webtests/main/page_objects/w3c_site/w3c_site.py
|
jdi-testing/jdi-python
|
7c0607b97d4d44b27ea8f532d47c68b8dd00e6f7
|
[
"MIT"
] | 1
|
2021-01-20T14:31:52.000Z
|
2021-01-20T14:31:52.000Z
|
from JDI.web.selenium.elements.composite.web_site import WebSite
from tests.jdi_uitests_webtests.main.page_objects.w3c_site.frame_page import FramePage
class W3cSite(WebSite):
domain = "https://www.w3schools.com"
frame_page = FramePage(url="/tags/tag_button.asp", domain=domain)
| 32.222222
| 86
| 0.793103
| 135
| 0.465517
| 0
| 0
| 0
| 0
| 0
| 0
| 49
| 0.168966
|
18dca1ce28f6ce9649a6e926a3f6be554544907d
| 1,382
|
py
|
Python
|
tests/scrapers/test_scraper_composite.py
|
oluiscabral/stockopedia-scraper
|
1050206d7a534f0e57eee84a5187615dc0af6bd9
|
[
"MIT"
] | null | null | null |
tests/scrapers/test_scraper_composite.py
|
oluiscabral/stockopedia-scraper
|
1050206d7a534f0e57eee84a5187615dc0af6bd9
|
[
"MIT"
] | null | null | null |
tests/scrapers/test_scraper_composite.py
|
oluiscabral/stockopedia-scraper
|
1050206d7a534f0e57eee84a5187615dc0af6bd9
|
[
"MIT"
] | null | null | null |
'''
@author: oluiscabral
'''
import unittest
from creationals.scraper_factory import ScraperFactory
from helpers.webdriver_factory import WebdriverFactory
from actioners.login_control import LoginControl
from ui.login_ui import LoginUI
from data_structure.data_ref import DataRef
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.wd = WebdriverFactory.create()
cls.login_control = LoginControl(cls.wd, LoginUI())
cls.login_control.force_login()
@classmethod
def tearDownClass(cls):
cls.wd.close()
cls.wd = None
def test_stockreport(self):
stockreport_scraper = ScraperFactory.create('stockreport', Test.login_control)
result = stockreport_scraper.scrap(Test.wd, DataRef('csl-ASX:CSL'))
self.assertEqual(11, len(result))
def test_compare(self):
compare_scraper = ScraperFactory.create('compare', Test.login_control)
result = compare_scraper.scrap(Test.wd, DataRef('csl-ASX:CSL'))
self.assertEqual(1, len(result))
def test_singletable(self):
balance_scraper = ScraperFactory.create('balance', Test.login_control)
result=balance_scraper.scrap(Test.wd, DataRef('csl-ASX:CSL'))
self.assertEqual(1, len(result))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| 34.55
| 86
| 0.700434
| 1,004
| 0.726483
| 0
| 0
| 265
| 0.191751
| 0
| 0
| 152
| 0.109986
|
18dcab3c94de533e1fad537525409735b1a45b43
| 22,917
|
py
|
Python
|
deepx/backend/tensorflow.py
|
sharadmv/deepx
|
07470e7a579a63427de1d5ff90b9fd00d3f54b61
|
[
"MIT"
] | 74
|
2015-11-13T02:26:37.000Z
|
2021-07-29T11:00:45.000Z
|
deepx/backend/tensorflow.py
|
sharadmv/deepx
|
07470e7a579a63427de1d5ff90b9fd00d3f54b61
|
[
"MIT"
] | 21
|
2015-12-12T20:33:55.000Z
|
2019-04-03T02:49:42.000Z
|
deepx/backend/tensorflow.py
|
sharadmv/deepx
|
07470e7a579a63427de1d5ff90b9fd00d3f54b61
|
[
"MIT"
] | 19
|
2015-11-23T10:07:01.000Z
|
2021-08-30T17:06:00.000Z
|
import copy
import logging
import numpy as np
import six
import tensorflow as tf
from functools import wraps
from contextlib import contextmanager
from .backend_base import BackendBase, FunctionBase, DeviceDecorator
try:
from tensorflow.contrib.distributions import fill_triangular
except:
print("Cannot find fill_triangular")
class TensorflowFunction(FunctionBase):
def __init__(self, *args, **kwargs):
super(TensorflowFunction, self).__init__(*args, **kwargs)
with tf.control_dependencies(self.outputs):
self.updates = [tf.assign(k, v) for k, v in self.updates]
def __call__(self, *inputs):
feed_dict = self.feed_dict(*inputs)
result = self.session.get_current_session().run(self.outputs + self.updates, feed_dict=feed_dict)
if len(self.outputs) == 1:
return result[0]
return result[:len(self.outputs)]
@six.add_metaclass(DeviceDecorator)
class TensorflowBackend(BackendBase):
def __init__(self, **kwargs):
super(TensorflowBackend, self).__init__(**kwargs)
self.core = tf
self._sessions = []
self.set_default_device(self.gpu() if tf.test.is_gpu_available() else self.cpu())
# General purpose methods
@classmethod
def use_device(cls, method):
@wraps(method)
def func(self, *args, **kwargs):
with tf.device(self.get_current_device()):
result = method(self, *args, **kwargs)
return result
return func
def enable_eager(self):
tf.enable_eager_execution()
def cpu(self, id=0):
return 'cpu/:%u' % id
def gpu(self, id=0):
return 'gpu/:%u' % id
@property
def int32(self):
return tf.int32
@property
def float32(self):
return tf.float32
def _placeholder(self, dtype=None, shape=None, name=None):
with self._device(self.get_current_device()):
return tf.placeholder(dtype, shape=shape, name=name)
def _variable(self, initial_value=None, trainable=True, name=None):
with self._device(self.get_current_device()):
return tf.Variable(initial_value=initial_value, trainable=trainable, name=name)
def _device(self, name):
return tf.device(name)
def create_session(self, graph=None, **kwargs):
allow_growth = kwargs.pop('allow_growth', False)
config_proto = tf.ConfigProto(**kwargs)
config_proto.gpu_options.allow_growth = allow_growth
sess = tf.Session(graph=graph, config=config_proto)
self._initialize(sess)
return sess
@contextmanager
def session(self, **kwargs):
with self.create_session(**kwargs) as sess:
self._sessions.append(sess)
self._initialize(sess)
yield sess
self._sessions.pop()
def interactive_session(self, graph=None, **kwargs):
config_proto = tf.ConfigProto(**kwargs)
sess = tf.InteractiveSession(config=config_proto, graph=graph)
self._initialize(sess)
return sess
def get_current_session(self):
if len(self._sessions) == 0:
raise Exception('No current session')
return self._sessions[-1]
def _initialize(self, sess):
sess.run(tf.local_variables_initializer())
sess.run(tf.global_variables_initializer())
# Unified interface
def cast(self, x, dtype):
return tf.cast(x, dtype)
def dtype(self, x):
return x.dtype
def shape(self, x):
return tf.shape(x)
def rank(self, x):
return tf.rank(x)
def abs(self, x):
return tf.abs(x)
def set_value(self, x, value):
tf.assign(x, np.asarray(value)).op.run(session=self.get_current_session())
def zeros(self, shape, dtype=None, name=None):
dtype = dtype or self.floatx()
return tf.zeros(shape, dtype=dtype, name=name)
def zeros_like(self, x, dtype=None, name=None):
return tf.zeros_like(x, dtype=dtype, name=name)
def ones(self, shape, dtype=None, name=None):
dtype = dtype or self.floatx()
return tf.ones(shape, dtype=dtype, name=name)
def ones_like(self, x, dtype=None, name=None):
return tf.ones_like(x, dtype=dtype, name=name)
def random_normal(self, shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
dtype = dtype or self.floatx()
return tf.random_normal(shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed)
def random_truncated_normal(self, shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
dtype = dtype or self.floatx()
return tf.truncated_normal(shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed)
def random_uniform(self, shape, minval=0, maxval=None, dtype=None, seed=None):
dtype = dtype or self.floatx()
return tf.random_uniform(shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)
def random_binomial(self, shape, p=0.5, dtype=None):
dtype = dtype or self.floatx()
return tf.where(tf.random_uniform(shape, dtype=dtype) <= p,
tf.ones(shape, dtype=dtype),
tf.zeros(shape, dtype=dtype))
def random_gamma(self, shape, alpha, beta=None):
return tf.random_gamma(shape, alpha, beta=beta)
pass
def tanh(self, x, name=None):
return tf.tanh(x, name=name)
def sigmoid(self, x, name=None):
return tf.sigmoid(x, name=name)
def relu(self, x, alpha=0., name=None):
return tf.nn.relu(x, name=name)
def softmax(self, x, T=1.0):
return tf.nn.softmax(x)
def softplus(self, x):
return tf.nn.softplus(x)
def dropout(self, x, p, seed=None):
retain_prob = 1. - p
if seed is None:
seed = np.random.randint(10e6)
return tf.nn.dropout(x * 1., retain_prob, seed=seed)
def conv2d(self, x, kernel, strides=(1, 1), border_mode='same',
image_shape=None, filter_shape=None):
'''
Run on cuDNN if available.
border_mode: string, "same" or "valid".
dim_ordering: whether to use Theano or TensorFlow dimension ordering
in inputs/kernels/ouputs.
'''
if border_mode == 'same':
padding = 'SAME'
elif border_mode == 'valid':
padding = 'VALID'
else:
raise Exception('Invalid border mode: ' + str(border_mode))
# strides = strides# + (1,)
if self.floatx() == 'float64':
x = tf.cast(x, 'float32')
kernel = tf.cast(kernel, 'float32')
x = tf.nn.convolution(input=x, filter=kernel, strides=strides, padding=padding,
data_format='NHWC')
if self.floatx() == 'float64':
x = tf.cast(x, 'float64')
return x
def conv2d_transpose(self, x, kernel, dim_out, strides=(1, 1), border_mode='same'):
if border_mode == 'same':
padding = 'SAME'
elif border_mode == 'valid':
padding = 'VALID'
else:
raise Exception('Invalid border mode: ' + str(border_mode))
output_shape = [self.shape(x)[0]] + list(dim_out)
strides = (1,) + strides + (1,)
if self.floatx() == 'float64':
x = tf.cast(x, 'float32')
kernel = tf.cast(kernel, 'float32')
x = tf.nn.conv2d_transpose(x, kernel, output_shape, strides, padding=padding)
if self.floatx() == 'float64':
x = tf.cast(x, 'float64')
return x
def pool2d(self, x, pool_size, strides=(1, 1),
border_mode='valid', pool_mode='max'):
'''
pool_size: tuple of 2 integers.
strides: tuple of 2 integers.
border_mode: one of "valid", "same".
dim_ordering: one of "th", "tf".
'''
if border_mode == 'same':
padding = 'SAME'
elif border_mode == 'valid':
padding = 'VALID'
else:
raise Exception('Invalid border mode: ' + str(border_mode))
strides = (1,) + strides + (1,)
pool_size = (1,) + pool_size + (1,)
if self.floatx() == 'float64':
x = tf.cast(x, 'float32')
if pool_mode == 'max':
x = tf.nn.max_pool(x, pool_size, strides, padding=padding)
elif pool_mode == 'avg':
x = tf.nn.avg_pool(x, pool_size, strides, padding=padding)
else:
raise Exception('Invalid pooling mode: ' + str(pool_mode))
if self.floatx() == 'float64':
x = tf.cast(x, 'float64')
return x
def flatten(self, x, leading=1):
leading_dim = self.shape(x)[:leading]
new_shape = tf.concat([leading_dim, [-1]], 0)
return tf.reshape(x, new_shape)
def split(self, x, num_splits, axis=None):
axis = axis % len(x.get_shape())
return tf.split(x, num_splits, axis=axis)
def reshape(self, x, shape):
return tf.reshape(x, shape)
def sum(self, x, axis=None, keepdims=False):
if x.dtype.base_dtype == tf.bool:
x = tf.cast(x, self.floatx())
return tf.reduce_sum(x, axis=axis, keepdims=keepdims)
def prod(self, x, axis=None, keepdims=False):
return tf.reduce_prod(x, axis=axis, keepdims=keepdims)
def mean(self, x, axis=None, keepdims=False):
if axis is not None and axis < 0:
axis = axis % len(x.get_shape())
if x.dtype.base_dtype == tf.bool:
x = tf.cast(x, self.floatx())
return tf.reduce_mean(x, axis=axis, keepdims=keepdims)
def batch_norm(self, x, beta, gamma):
mean, variance = tf.nn.moments(x, [0])
normed = tf.nn.batch_normalization(tf.identity(x), mean, variance, beta, gamma, self.epsilon())
return normed
def log(self, x):
return tf.log(x)
def log1p(self, x):
return tf.log1p(x)
def exp(self, x):
return tf.exp(x)
def pow(self, x, a):
return tf.pow(x, a)
def mul(self, x, y):
return tf.multiply(x, y)
def sqrt(self, x):
x = tf.clip_by_value(x,
tf.cast(0., dtype=self.floatx()),
tf.cast(np.inf, dtype=self.floatx()))
return tf.sqrt(x)
def categorical_crossentropy(self, output, target, from_logits=False, axis=-1):
if not from_logits:
# scale preds so that the class probas of each sample sum to 1
output = output / tf.reduce_sum(output, axis, True)
# manual computation of crossentropy
output = tf.clip_by_value(output, self.epsilon(), 1. - self.epsilon())
return -tf.reduce_sum(target * tf.log(output), axis)
else:
return tf.nn.softmax_cross_entropy_with_logits_v2(logits=output, labels=target)
def binary_crossentropy(self, output, target, from_logits=False):
if from_logits:
return tf.nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)
else:
raise NotImplementedError
def concatenate(self, tensors, axis=-1):
return tf.concat(tensors, axis=axis)
def sort(self, tensor):
values, indices = tf.nn.top_k(-tensor, k=tf.shape(tensor)[0])
return -values, indices
def argmin(self, tensor, axis=0):
return tf.argmin(tensor, axis=axis)
def map(self, function, input):
return tf.map_fn(function, input)
def rnn(self, step_function, input, initial_states, **kwargs):
num_dims = self.rank(input)
perm = self.concat([[1, 0], self.range(2, num_dims)])
input = self.transpose(input, perm)
def step(state, input_):
output, state = step_function(input_, state, **kwargs)
return state
result = tf.scan(step, input, initial_states)[0]
return self.transpose(result, perm)
def while_loop(self, condition, body, loop_vars, **kwargs):
return tf.while_loop(condition, body, loop_vars)
def scan(self, fn, elems, initializer=None):
return tf.scan(fn, elems, initializer=initializer, back_prop=True)
def logdet(self, A, **kwargs):
A = (A + self.matrix_transpose(A)) / 2.
term = tf.log(tf.matrix_diag_part(self.cholesky(A, **kwargs)))
return 2 * tf.reduce_sum(term, -1)
def einsum(self, subscripts, *operands):
return tf.einsum(subscripts, *operands)
def cholesky(self, A, lower=True, warn=True, correct=False):
assert lower is True
# Gradient through py_func adapted from https://gist.github.com/harpone/3453185b41d8d985356cbe5e57d67342
def py_func(func, inp, Tout, stateful=True, name=None, grad=None):
rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))
tf.RegisterGradient(rnd_name)(grad)
g = tf.get_default_graph()
with g.gradient_override_map({'PyFunc': rnd_name, 'PyFuncStateless': rnd_name}):
return tf.py_func(func, inp, Tout, stateful=stateful, name=name)
def correction(A):
A_new, del_ = A.copy(), 1e-4
while True:
try:
np.linalg.cholesky(A_new)
break
except np.linalg.linalg.LinAlgError:
if warn:
logging.warn('[Cholesky] singular matrix, adding diagonal {}'.format(del_))
A_new = A + del_ * np.eye(A.shape[-1]).astype(self.floatx())
del_ *= 2
return A_new
def _correction_grad(op, grad):
A = op.inputs[0]
return grad
if correct:
shape = A.get_shape()
A = py_func(correction, [A], A.dtype, grad=_correction_grad)
A.set_shape(shape)
return tf.cholesky(A)
# Tensorflow interface
def placeholder(self, dtype, shape=None, name=None):
return self._placeholder(dtype=dtype, shape=shape, name=name)
def variable(self, initial_value=None, trainable=True, name=None):
return self._variable(initial_value=initial_value, trainable=trainable, name=name)
def assign(self, a, b):
return tf.assign(a, b)
def to_float(self, x):
return tf.cast(x, self.floatx())
def constant(self, value, dtype=None, shape=None):
return tf.constant(value, dtype=dtype, shape=shape)
def get_shape(self, x):
return [a.value for a in tf.convert_to_tensor(x).get_shape()]
def get_value(self, variable):
return self.get_current_session().run(variable)
def concat(self, values, axis=-1):
return tf.concat(values, axis=axis)
def gather(self, params, indices):
return tf.gather(params, indices)
def gather_nd(self, params, indices):
return tf.gather_nd(params, indices)
def equal(self, x, y):
return tf.equal(x, y)
def logical_and(self, x, y):
return tf.logical_and(x, y)
def matmul(self, a, b, transpose_a=False, transpose_b=False, a_is_sparse=False, b_is_sparse=False, name=None):
return tf.matmul(a, b, transpose_a=transpose_a, transpose_b=transpose_b, a_is_sparse=a_is_sparse, name=name)
def trace(self, a):
return tf.trace(a)
def transpose(self, a, perm=None):
return tf.transpose(a, perm=perm)
def matrix_transpose(self, a):
return tf.matrix_transpose(a)
def matrix_diag(self, a):
return tf.matrix_diag(a)
def matrix_diag_part(self, a):
return tf.matrix_diag_part(a)
def set_diag(self, input, diagonal):
return tf.linalg.set_diag(input, diagonal)
def band_part(self, input, num_lower, num_upper):
return tf.linalg.band_part(input, num_lower, num_upper)
def vec(self, A):
A = self.matrix_transpose(A)
leading_dim = self.shape(A)[:-2]
return self.reshape(A, self.concat([
leading_dim,
[-1]
], 0))
def unvec(self, v, m, n):
leading_dim = self.shape(v)[:-1]
return self.matrix_transpose(self.reshape(v, self.concat([
leading_dim,
[n, m]
], 0)))
def kronecker(self, A, B):
C = (A[..., None, None] * B[..., None, None, :, :])
blocks = [
tf.unstack(a, axis=-3 % len(a.shape)) for a in
tf.unstack(C, axis=-4 % len(C.shape))
]
return tf.concat([
tf.concat(a, -1) for a in blocks
], -2)
def block_sum(self, X, m, n):
leading_dim = self.shape(X)[:-2]
block_sum = self.zeros(self.concat([leading_dim, [m, m]], 0))
for i in range(n):
block_sum += X[..., i*m:(i+1)*m, i*m:(i+1)*m]
return block_sum
def block_trace(self, X, m, n):
blocks = []
for i in range(n):
blocks.append([])
for j in range(n):
block = self.trace(X[..., i*m:(i+1)*m, j*m:(j+1)*m])
blocks[-1].append(block)
return self.pack([
self.pack([
b for b in block
])
for block in blocks
])
def kronecker_vec(self, X, m, n):
leading_dim = tf.shape(X)[:-2]
blocks = []
for i in range(n):
blocks.append([])
for j in range(m):
idx = i * m + j
block = tf.matrix_transpose(tf.reshape(X[..., idx, :], tf.concat([leading_dim, [n, m]], 0)))
blocks[-1].append(block)
return tf.concat([tf.concat(b, -2) for b in blocks], -1)
def lower_triangular(self, a):
return fill_triangular(a)
def matrix_inverse(self, a):
return tf.matrix_inverse(a)
def expand_dims(self, x, dim=-1):
return tf.expand_dims(x, dim)
def tile(self, input, multiples):
return tf.tile(input, multiples)
def gradients(self, loss, variables):
return tf.gradients(loss, variables)
def square(self, x):
return tf.square(x)
def clip_by_value(self, x, low, high):
return tf.clip_by_value(x, low, high)
def stack(self, values, axis=0, name='stack'):
return tf.stack(values, axis=axis, name=name)
def unstack(self, values, num=None, axis=0, name='unstack'):
return tf.unstack(values, num=num, axis=axis, name=name)
def pack(self, *args, **kwargs):
return self.stack(*args, **kwargs)
def unpack(self, *args, **kwargs):
return self.unstack(*args, **kwargs)
def reduce_max(self, x, axis=None, keepdims=False):
return tf.reduce_max(x, axis=axis, keepdims=keepdims)
def reduce_logsumexp(self, x, axis=None, keepdims=False):
return tf.reduce_logsumexp(x, axis=axis, keepdims=keepdims)
def matrix_solve(self, matrix, rhs, adjoint=None):
return tf.matrix_solve(matrix, rhs, adjoint=adjoint)
# Theano interface
def dim(self, x):
return len(x.get_shape())
def scalar(self, name=None, dtype=None, shape=[]):
dtype = dtype or self.floatx()
return self._placeholder(dtype=dtype, shape=shape, name=name)
def vector(self, name=None, dtype=None, shape=[None]):
dtype = dtype or self.floatx()
return self._placeholder(dtype=dtype, shape=shape, name=name)
def matrix(self, name=None, dtype=None, shape=[None, None]):
dtype = dtype or self.floatx()
return self._placeholder(dtype=dtype, shape=shape, name=name)
def tensor3(self, name=None, dtype=None, shape=[None, None, None]):
dtype = dtype or self.floatx()
return self._placeholder(dtype=dtype, shape=shape, name=name)
def tensor4(self, name=None, dtype=None, shape=[None, None, None, None]):
dtype = dtype or self.floatx()
return self._placeholder(dtype=dtype, shape=shape, name=name)
def shared(self, value, name=None):
return self._variable(initial_value=value, name=name)
def arange(self, start, stop=None, step=None):
return self.range(start, stop=stop, step=step)
def sparse_dot(self, x, y):
return tf.sparse_tensor_dense_matmul(x, y)
def dot(self, x, y):
if len(x.get_shape()) != len(y.get_shape()):
len_y = len(y.get_shape())
new_y_shape = tf.concat([tf.shape(x)[:-len_y], tf.shape(y)], 0)
y = tf.broadcast_to(y, new_y_shape)
return tf.matmul(x, y)
def outer(self, x, y):
if len(x.get_shape()) == 0:
return x * y
return x[...,:,None] * y[...,None,:]
def eye(self, d, batch_shape=None):
return tf.eye(d, batch_shape=batch_shape)
def function(self, inputs, outputs, updates=[]):
return TensorflowFunction(self, inputs, outputs, updates)
def grad(self, loss, variables):
return tf.gradients(loss, variables)
def sqr(self, x):
return tf.square(x)
def argmax(self, x, axis=None):
return tf.argmax(x, axis=axis)
def max(self, x, axis=None, keepdims=False):
return tf.reduce_max(x, axis=axis, keepdims=keepdims)
def logsumexp(self, x, axis=None, keepdims=False):
return tf.reduce_logsumexp(x, axis=axis, keepdims=keepdims)
def switch(self, condition, then_expression, else_expression):
'''Switches between two operations depending on a scalar value (int or bool).
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
# Arguments
condition: scalar tensor.
then_expression: TensorFlow operation.
else_expression: TensorFlow operation.
'''
return tf.where(condition, then_expression, else_expression)
def alloc(self, value, shape, unbroadcast=None, dtype=None):
dtype = dtype or self.floatx()
vals = tf.fill(tf.stack(shape), np.array(value).astype(dtype))
new_shape = []
for s in shape:
if isinstance(s, tf.Tensor):
new_shape.append(None)
else:
new_shape.append(s)
vals.set_shape(new_shape)
return vals
def range(self, start, limit=None, delta=1):
if limit is None:
return tf.range(start, delta=delta)
return tf.range(start, limit, delta=delta)
def solve(self, a, b):
return tf.matrix_solve(a, b)
def one_hot(self, indices, depth):
return tf.one_hot(indices, depth)
# Science methods
def gammaln(self, x):
return tf.lgamma(x)
def multigammaln(self, a, p):
p = self.to_float(p)
p_ = self.cast(p, 'int32')
a = a[..., None]
i = self.to_float(self.range(1, p_ + 1))
term1 = p * (p - 1) / 4. * self.log(np.pi)
term2 = self.gammaln(a - (i - 1) / 2.)
return term1 + self.sum(term2, axis=-1)
def digamma(self, a):
return tf.digamma(a)
| 33.455474
| 116
| 0.595322
| 22,541
| 0.983593
| 211
| 0.009207
| 22,018
| 0.960771
| 0
| 0
| 1,613
| 0.070384
|
18dcc7a079d7a14db43a4e9f8cd6c7a80e6794d0
| 90,257
|
py
|
Python
|
netharn/util/mplutil.py
|
JoshuaBeard/netharn
|
90773542c47363e663ee58f20fd151eb89bc313b
|
[
"Apache-2.0"
] | null | null | null |
netharn/util/mplutil.py
|
JoshuaBeard/netharn
|
90773542c47363e663ee58f20fd151eb89bc313b
|
[
"Apache-2.0"
] | null | null | null |
netharn/util/mplutil.py
|
JoshuaBeard/netharn
|
90773542c47363e663ee58f20fd151eb89bc313b
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import, division, print_function
import cv2
import pandas as pd
import numpy as np
import six
import ubelt as ub
from six.moves import zip_longest
from os.path import join, dirname
import warnings
def multi_plot(xdata=None, ydata=[], **kwargs):
r"""
plots multiple lines, bars, etc...
This is the big function that implements almost all of the heavy lifting in
this file. Any function not using this should probably find a way to use
it. It is pretty general and relatively clean.
Args:
xdata (ndarray): can also be a list of arrays
ydata (list or dict of ndarrays): can also be a single array
**kwargs:
Misc:
fnum, pnum, use_legend, legend_loc
Labels:
xlabel, ylabel, title, figtitle
ticksize, titlesize, legendsize, labelsize
Grid:
gridlinewidth, gridlinestyle
Ticks:
num_xticks, num_yticks, tickwidth, ticklength, ticksize
Data:
xmin, xmax, ymin, ymax, spread_list
# can append _list to any of these
# these can be dictionaries if ydata was also a dict
plot_kw_keys = ['label', 'color', 'marker', 'markersize',
'markeredgewidth', 'linewidth', 'linestyle']
any plot_kw key can be a scalar (corresponding to all ydatas),
a list if ydata was specified as a list, or a dict if ydata was
specified as a dict.
kind = ['bar', 'plot', ...]
if kind='plot':
spread
if kind='bar':
stacked, width
References:
matplotlib.org/examples/api/barchart_demo.html
CommandLine:
python -m netharn.util.mplutil multi_plot:0 --show
python -m netharn.util.mplutil multi_plot:1 --show
Example:
>>> autompl()
>>> xdata = [1, 2, 3, 4, 5]
>>> ydata_list = [[1, 2, 3, 4, 5], [3, 3, 3, 3, 3], [5, 4, np.nan, 2, 1], [4, 3, np.nan, 1, 0]]
>>> kwargs = {'label': ['spamΣ', 'eggs', 'jamµ', 'pram'], 'linestyle': '-'}
>>> #fig = multi_plot(xdata, ydata_list, title='$\phi_1(\\vec{x})$', xlabel='\nfds', **kwargs)
>>> fig = multi_plot(xdata, ydata_list, title='ΣΣΣµµµ', xlabel='\nfdsΣΣΣµµµ', **kwargs)
>>> show_if_requested()
Example:
>>> autompl()
>>> fig1 = multi_plot([1, 2, 3], [4, 5, 6])
>>> fig2 = multi_plot([1, 2, 3], [4, 5, 6], fnum=4)
>>> show_if_requested()
"""
import matplotlib as mpl
from matplotlib import pyplot as plt
ydata_list = ydata
if isinstance(ydata_list, dict):
# Special case where ydata is a dictionary
if isinstance(xdata, six.string_types):
# Special-er case where xdata is specified in ydata
xkey = xdata
ykeys = set(ydata_list.keys()) - {xkey}
xdata = ydata_list[xkey]
else:
ykeys = list(ydata_list.keys())
# Normalize input
ydata_list = list(ub.take(ydata_list, ykeys))
kwargs['label_list'] = kwargs.get('label_list', ykeys)
else:
ykeys = None
def is_listlike(data):
flag = isinstance(data, (list, np.ndarray, tuple, pd.Series))
flag &= hasattr(data, '__getitem__') and hasattr(data, '__len__')
return flag
def is_list_of_scalars(data):
if is_listlike(data):
if len(data) > 0 and not is_listlike(data[0]):
return True
return False
def is_list_of_lists(data):
if is_listlike(data):
if len(data) > 0 and is_listlike(data[0]):
return True
return False
# allow ydata_list to be passed without a container
if is_list_of_scalars(ydata_list):
ydata_list = [np.array(ydata_list)]
if xdata is None:
xdata = list(range(len(ydata_list[0])))
num_lines = len(ydata_list)
# Transform xdata into xdata_list
if is_list_of_lists(xdata):
xdata_list = [np.array(xd, copy=True) for xd in xdata]
else:
xdata_list = [np.array(xdata, copy=True)] * num_lines
fnum = ensure_fnum(kwargs.get('fnum', None))
pnum = kwargs.get('pnum', None)
kind = kwargs.get('kind', 'plot')
transpose = kwargs.get('transpose', False)
def parsekw_list(key, kwargs, num_lines=num_lines, ykeys=ykeys):
""" copies relevant plot commands into plot_list_kw """
if key in kwargs:
val_list = kwargs[key]
elif key + '_list' in kwargs:
warnings.warn('*_list is depricated, just use kwarg {}'.format(key))
val_list = kwargs[key + '_list']
elif key + 's' in kwargs:
# hack, multiple ways to do something
warnings.warn('*s depricated, just use kwarg {}'.format(key))
val_list = kwargs[key + 's']
else:
val_list = None
if val_list is not None:
if isinstance(val_list, dict):
if ykeys is None:
raise ValueError('ydata is not a dict, but a property was.')
else:
val_list = [val_list[key] for key in ykeys]
if not isinstance(val_list, list):
val_list = [val_list] * num_lines
return val_list
# Parse out arguments to ax.plot
plot_kw_keys = ['label', 'color', 'marker', 'markersize',
'markeredgewidth', 'linewidth', 'linestyle', 'alpha']
# hackish / extra args that dont go to plot, but help
extra_plot_kw_keys = ['spread_alpha', 'autolabel', 'edgecolor', 'fill']
plot_kw_keys += extra_plot_kw_keys
plot_ks_vals = [parsekw_list(key, kwargs) for key in plot_kw_keys]
plot_list_kw = dict([
(key, vals)
for key, vals in zip(plot_kw_keys, plot_ks_vals) if vals is not None
])
if 'color' not in plot_list_kw:
plot_list_kw['color'] = distinct_colors(num_lines)
if kind == 'plot':
if 'marker' not in plot_list_kw:
plot_list_kw['marker'] = distinct_markers(num_lines)
if 'spread_alpha' not in plot_list_kw:
plot_list_kw['spread_alpha'] = [.2] * num_lines
if kind == 'bar':
# Remove non-bar kwargs
for key in ['markeredgewidth', 'linewidth', 'marker', 'markersize', 'linestyle']:
plot_list_kw.pop(key, None)
stacked = kwargs.get('stacked', False)
width_key = 'height' if transpose else 'width'
if 'width_list' in kwargs:
plot_list_kw[width_key] = kwargs['width_list']
else:
width = kwargs.get('width', .9)
# if width is None:
# # HACK: need variable width
# # width = np.mean(np.diff(xdata_list[0]))
# width = .9
if not stacked:
width /= num_lines
#plot_list_kw['orientation'] = ['horizontal'] * num_lines
plot_list_kw[width_key] = [width] * num_lines
spread_list = kwargs.get('spread_list', None)
if spread_list is None:
pass
# nest into a list of dicts for each line in the multiplot
valid_keys = list(set(plot_list_kw.keys()) - set(extra_plot_kw_keys))
valid_vals = list(ub.dict_take(plot_list_kw, valid_keys))
plot_kw_list = [dict(zip(valid_keys, vals)) for vals in zip(*valid_vals)]
extra_kw_keys = [key for key in extra_plot_kw_keys if key in plot_list_kw]
extra_kw_vals = list(ub.dict_take(plot_list_kw, extra_kw_keys))
extra_kw_list = [dict(zip(extra_kw_keys, vals)) for vals in zip(*extra_kw_vals)]
# Get passed in axes or setup a new figure
ax = kwargs.get('ax', None)
if ax is None:
doclf = kwargs.get('doclf', False)
fig = figure(fnum=fnum, pnum=pnum, docla=False, doclf=doclf)
ax = plt.gca()
else:
plt.sca(ax)
fig = ax.figure
# +---------------
# Draw plot lines
ydata_list = np.array(ydata_list)
if transpose:
if kind == 'bar':
plot_func = ax.barh
elif kind == 'plot':
def plot_func(_x, _y, **kw):
return ax.plot(_y, _x, **kw)
else:
plot_func = getattr(ax, kind) # usually ax.plot
assert len(ydata_list) > 0, 'no ydata'
#assert len(extra_kw_list) == len(plot_kw_list), 'bad length'
#assert len(extra_kw_list) == len(ydata_list), 'bad length'
_iter = enumerate(zip_longest(xdata_list, ydata_list, plot_kw_list, extra_kw_list))
for count, (_xdata, _ydata, plot_kw, extra_kw) in _iter:
ymask = np.isfinite(_ydata)
ydata_ = _ydata.compress(ymask)
xdata_ = _xdata.compress(ymask)
if kind == 'bar':
if stacked:
# Plot bars on top of each other
xdata_ = xdata_
else:
# Plot bars side by side
baseoffset = (width * num_lines) / 2
lineoffset = (width * count)
offset = baseoffset - lineoffset # Fixeme for more histogram bars
xdata_ = xdata_ - offset
# width_key = 'height' if transpose else 'width'
# plot_kw[width_key] = np.diff(xdata)
objs = plot_func(xdata_, ydata_, **plot_kw)
if kind == 'bar':
if extra_kw is not None and 'edgecolor' in extra_kw:
for rect in objs:
rect.set_edgecolor(extra_kw['edgecolor'])
if extra_kw is not None and extra_kw.get('autolabel', False):
# FIXME: probably a more cannonical way to include bar
# autolabeling with tranpose support, but this is a hack that
# works for now
for rect in objs:
if transpose:
numlbl = width = rect.get_width()
xpos = width + ((_xdata.max() - _xdata.min()) * .005)
ypos = rect.get_y() + rect.get_height() / 2.
ha, va = 'left', 'center'
else:
numlbl = height = rect.get_height()
xpos = rect.get_x() + rect.get_width() / 2.
ypos = 1.05 * height
ha, va = 'center', 'bottom'
barlbl = '%.3f' % (numlbl,)
ax.text(xpos, ypos, barlbl, ha=ha, va=va)
# print('extra_kw = %r' % (extra_kw,))
if kind == 'plot' and extra_kw.get('fill', False):
ax.fill_between(_xdata, ydata_, alpha=plot_kw.get('alpha', 1.0),
color=plot_kw.get('color', None)) # , zorder=0)
if spread_list is not None:
# Plots a spread around plot lines usually indicating standard
# deviation
_xdata = np.array(_xdata)
spread = spread_list[count]
ydata_ave = np.array(ydata_)
y_data_dev = np.array(spread)
y_data_max = ydata_ave + y_data_dev
y_data_min = ydata_ave - y_data_dev
ax = plt.gca()
spread_alpha = extra_kw['spread_alpha']
ax.fill_between(_xdata, y_data_min, y_data_max, alpha=spread_alpha,
color=plot_kw.get('color', None)) # , zorder=0)
# L________________
#max_y = max(np.max(y_data), max_y)
#min_y = np.min(y_data) if min_y is None else min(np.min(y_data), min_y)
ydata = _ydata # HACK
xdata = _xdata # HACK
if transpose:
#xdata_list = ydata_list
ydata = xdata
# Hack / Fix any transpose issues
def transpose_key(key):
if key.startswith('x'):
return 'y' + key[1:]
elif key.startswith('y'):
return 'x' + key[1:]
elif key.startswith('num_x'):
# hackier, fixme to use regex or something
return 'num_y' + key[5:]
elif key.startswith('num_y'):
# hackier, fixme to use regex or something
return 'num_x' + key[5:]
else:
return key
kwargs = {transpose_key(key): val for key, val in kwargs.items()}
# Setup axes labeling
title = kwargs.get('title', None)
xlabel = kwargs.get('xlabel', '')
ylabel = kwargs.get('ylabel', '')
def none_or_unicode(text):
return None if text is None else ub.ensure_unicode(text)
xlabel = none_or_unicode(xlabel)
ylabel = none_or_unicode(ylabel)
title = none_or_unicode(title)
# Initial integration with mpl rcParams standards
mplrc = mpl.rcParams.copy()
mplrc.update({
# 'legend.fontsize': custom_figure.LEGEND_SIZE,
# 'axes.titlesize': custom_figure.TITLE_SIZE,
# 'axes.labelsize': custom_figure.LABEL_SIZE,
# 'legend.facecolor': 'w',
# 'font.family': 'sans-serif',
# 'xtick.labelsize': custom_figure.TICK_SIZE,
# 'ytick.labelsize': custom_figure.TICK_SIZE,
})
mplrc.update(kwargs.get('rcParams', {}))
titlesize = kwargs.get('titlesize', mplrc['axes.titlesize'])
labelsize = kwargs.get('labelsize', mplrc['axes.labelsize'])
legendsize = kwargs.get('legendsize', mplrc['legend.fontsize'])
xticksize = kwargs.get('ticksize', mplrc['xtick.labelsize'])
yticksize = kwargs.get('ticksize', mplrc['ytick.labelsize'])
family = kwargs.get('fontfamily', mplrc['font.family'])
tickformat = kwargs.get('tickformat', None)
ytickformat = kwargs.get('ytickformat', tickformat)
xtickformat = kwargs.get('xtickformat', tickformat)
# 'DejaVu Sans','Verdana', 'Arial'
weight = kwargs.get('fontweight', None)
if weight is None:
weight = 'normal'
labelkw = {
'fontproperties': mpl.font_manager.FontProperties(
weight=weight,
family=family, size=labelsize)
}
ax.set_xlabel(xlabel, **labelkw)
ax.set_ylabel(ylabel, **labelkw)
tick_fontprop = mpl.font_manager.FontProperties(family=family,
weight=weight)
if tick_fontprop is not None:
for ticklabel in ax.get_xticklabels():
ticklabel.set_fontproperties(tick_fontprop)
for ticklabel in ax.get_yticklabels():
ticklabel.set_fontproperties(tick_fontprop)
if xticksize is not None:
for ticklabel in ax.get_xticklabels():
ticklabel.set_fontsize(xticksize)
if yticksize is not None:
for ticklabel in ax.get_yticklabels():
ticklabel.set_fontsize(yticksize)
if xtickformat is not None:
# mpl.ticker.StrMethodFormatter # newstyle
# mpl.ticker.FormatStrFormatter # oldstyle
ax.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter(xtickformat))
if ytickformat is not None:
ax.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter(ytickformat))
xtick_kw = ytick_kw = {
'width': kwargs.get('tickwidth', None),
'length': kwargs.get('ticklength', None),
}
xtick_kw = {k: v for k, v in xtick_kw.items() if v is not None}
ytick_kw = {k: v for k, v in ytick_kw.items() if v is not None}
ax.xaxis.set_tick_params(**xtick_kw)
ax.yaxis.set_tick_params(**ytick_kw)
#ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%d'))
# Setup axes limits
if 'xlim' in kwargs:
xlim = kwargs['xlim']
if xlim is not None:
if 'xmin' not in kwargs and 'xmax' not in kwargs:
kwargs['xmin'] = xlim[0]
kwargs['xmax'] = xlim[1]
else:
raise ValueError('use xmax, xmin instead of xlim')
if 'ylim' in kwargs:
ylim = kwargs['ylim']
if ylim is not None:
if 'ymin' not in kwargs and 'ymax' not in kwargs:
kwargs['ymin'] = ylim[0]
kwargs['ymax'] = ylim[1]
else:
raise ValueError('use ymax, ymin instead of ylim')
xmin = kwargs.get('xmin', ax.get_xlim()[0])
xmax = kwargs.get('xmax', ax.get_xlim()[1])
ymin = kwargs.get('ymin', ax.get_ylim()[0])
ymax = kwargs.get('ymax', ax.get_ylim()[1])
text_type = six.text_type
if text_type(xmax) == 'data':
xmax = max([xd.max() for xd in xdata_list])
if text_type(xmin) == 'data':
xmin = min([xd.min() for xd in xdata_list])
# Setup axes ticks
num_xticks = kwargs.get('num_xticks', None)
num_yticks = kwargs.get('num_yticks', None)
if num_xticks is not None:
# TODO check if xdata is integral
if xdata.dtype.kind == 'i':
xticks = np.linspace(np.ceil(xmin), np.floor(xmax),
num_xticks).astype(np.int32)
else:
xticks = np.linspace((xmin), (xmax), num_xticks)
ax.set_xticks(xticks)
if num_yticks is not None:
if ydata.dtype.kind == 'i':
yticks = np.linspace(np.ceil(ymin), np.floor(ymax),
num_yticks).astype(np.int32)
else:
yticks = np.linspace((ymin), (ymax), num_yticks)
ax.set_yticks(yticks)
force_xticks = kwargs.get('force_xticks', None)
if force_xticks is not None:
xticks = np.array(sorted(ax.get_xticks().tolist() + force_xticks))
ax.set_xticks(xticks)
yticklabels = kwargs.get('yticklabels', None)
if yticklabels is not None:
# Hack ONLY WORKS WHEN TRANSPOSE = True
# Overrides num_yticks
ax.set_yticks(ydata)
ax.set_yticklabels(yticklabels)
xticklabels = kwargs.get('xticklabels', None)
if xticklabels is not None:
# Overrides num_xticks
ax.set_xticks(xdata)
ax.set_xticklabels(xticklabels)
xtick_rotation = kwargs.get('xtick_rotation', None)
if xtick_rotation is not None:
[lbl.set_rotation(xtick_rotation)
for lbl in ax.get_xticklabels()]
ytick_rotation = kwargs.get('ytick_rotation', None)
if ytick_rotation is not None:
[lbl.set_rotation(ytick_rotation)
for lbl in ax.get_yticklabels()]
# Axis padding
xpad = kwargs.get('xpad', None)
ypad = kwargs.get('ypad', None)
xpad_factor = kwargs.get('xpad_factor', None)
ypad_factor = kwargs.get('ypad_factor', None)
if xpad is None and xpad_factor is not None:
xpad = (xmax - xmin) * xpad_factor
if ypad is None and ypad_factor is not None:
ypad = (ymax - ymin) * ypad_factor
xpad = 0 if xpad is None else xpad
ypad = 0 if ypad is None else ypad
ypad_high = kwargs.get('ypad_high', ypad)
ypad_low = kwargs.get('ypad_low', ypad)
xpad_high = kwargs.get('xpad_high', xpad)
xpad_low = kwargs.get('xpad_low', xpad)
xmin, xmax = (xmin - xpad_low), (xmax + xpad_high)
ymin, ymax = (ymin - ypad_low), (ymax + ypad_high)
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
xscale = kwargs.get('xscale', None)
yscale = kwargs.get('yscale', None)
if yscale is not None:
ax.set_yscale(yscale)
if xscale is not None:
ax.set_xscale(xscale)
gridlinestyle = kwargs.get('gridlinestyle', None)
gridlinewidth = kwargs.get('gridlinewidth', None)
gridlines = ax.get_xgridlines() + ax.get_ygridlines()
if gridlinestyle:
for line in gridlines:
line.set_linestyle(gridlinestyle)
if gridlinewidth:
for line in gridlines:
line.set_linewidth(gridlinewidth)
# Setup title
if title is not None:
titlekw = {
'fontproperties': mpl.font_manager.FontProperties(
family=family,
weight=weight,
size=titlesize)
}
ax.set_title(title, **titlekw)
use_legend = kwargs.get('use_legend', 'label' in valid_keys)
legend_loc = kwargs.get('legend_loc', 'best')
legend_alpha = kwargs.get('legend_alpha', 1.0)
if use_legend:
legendkw = {
'alpha': legend_alpha,
'fontproperties': mpl.font_manager.FontProperties(
family=family,
weight=weight,
size=legendsize)
}
legend(loc=legend_loc, ax=ax, **legendkw)
figtitle = kwargs.get('figtitle', None)
if figtitle is not None:
set_figtitle(figtitle, fontfamily=family, fontweight=weight,
size=kwargs.get('figtitlesize'))
use_darkbackground = kwargs.get('use_darkbackground', None)
lightbg = kwargs.get('lightbg', None)
if lightbg is None:
lightbg = True
if use_darkbackground is None:
use_darkbackground = not lightbg
if use_darkbackground:
_dark_background(force=use_darkbackground is True)
# TODO: return better info
return fig
def figure(fnum=None, pnum=(1, 1, 1), title=None, figtitle=None, doclf=False,
docla=False, projection=None, **kwargs):
"""
http://matplotlib.org/users/gridspec.html
Args:
fnum (int): fignum = figure number
pnum (int, str, or tuple(int, int, int)): plotnum = plot tuple
title (str): (default = None)
figtitle (None): (default = None)
docla (bool): (default = False)
doclf (bool): (default = False)
Returns:
mpl.Figure: fig
CommandLine:
python -m netharn.util.mplutil figure:0 --show
Example:
>>> autompl()
>>> import matplotlib.pyplot as plt
>>> fnum = 1
>>> fig = figure(fnum, (2, 2, 1))
>>> plt.gca().text(0.5, 0.5, "ax1", va="center", ha="center")
>>> fig = figure(fnum, (2, 2, 2))
>>> plt.gca().text(0.5, 0.5, "ax2", va="center", ha="center")
>>> show_if_requested()
Example:
>>> autompl()
>>> import matplotlib.pyplot as plt
>>> fnum = 1
>>> fig = figure(fnum, (2, 2, 1))
>>> plt.gca().text(0.5, 0.5, "ax1", va="center", ha="center")
>>> fig = figure(fnum, (2, 2, 2))
>>> plt.gca().text(0.5, 0.5, "ax2", va="center", ha="center")
>>> fig = figure(fnum, (2, 4, (1, slice(1, None))))
>>> plt.gca().text(0.5, 0.5, "ax3", va="center", ha="center")
>>> show_if_requested()
"""
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
def ensure_fig(fnum=None):
if fnum is None:
try:
fig = plt.gcf()
except Exception as ex:
fig = plt.figure()
else:
try:
fig = plt.figure(fnum)
except Exception as ex:
fig = plt.gcf()
return fig
def _convert_pnum_int_to_tup(int_pnum):
# Convert pnum to tuple format if in integer format
nr = int_pnum // 100
nc = int_pnum // 10 - (nr * 10)
px = int_pnum - (nr * 100) - (nc * 10)
pnum = (nr, nc, px)
return pnum
def _pnum_to_subspec(pnum):
if isinstance(pnum, six.string_types):
pnum = list(pnum)
nrow, ncols, plotnum = pnum
# if kwargs.get('use_gridspec', True):
# Convert old pnums to gridspec
gs = gridspec.GridSpec(nrow, ncols)
if isinstance(plotnum, (tuple, slice, list)):
subspec = gs[plotnum]
else:
subspec = gs[plotnum - 1]
return (subspec,)
def _setup_subfigure(pnum):
if isinstance(pnum, int):
pnum = _convert_pnum_int_to_tup(pnum)
axes_list = fig.get_axes()
if docla or len(axes_list) == 0:
if pnum is not None:
assert pnum[0] > 0, 'nRows must be > 0: pnum=%r' % (pnum,)
assert pnum[1] > 0, 'nCols must be > 0: pnum=%r' % (pnum,)
subspec = _pnum_to_subspec(pnum)
ax = fig.add_subplot(*subspec, projection=projection)
if len(axes_list) > 0:
ax.cla()
else:
ax = plt.gca()
else:
if pnum is not None:
subspec = _pnum_to_subspec(pnum)
ax = plt.subplot(*subspec)
else:
ax = plt.gca()
fig = ensure_fig(fnum)
if doclf:
fig.clf()
if pnum is not None:
_setup_subfigure(pnum)
# Set the title / figtitle
if title is not None:
ax = plt.gca()
ax.set_title(title)
if figtitle is not None:
fig.suptitle(figtitle)
return fig
def pandas_plot_matrix(df, rot=90, ax=None, grid=True, label=None,
zerodiag=False,
cmap='viridis', showvals=False, logscale=True):
import matplotlib as mpl
import copy
from matplotlib import pyplot as plt
if ax is None:
fig = figure(fnum=1, pnum=(1, 1, 1))
fig.clear()
ax = plt.gca()
ax = plt.gca()
values = df.values
if zerodiag:
values = values.copy()
values = values - np.diag(np.diag(values))
# aximg = ax.imshow(values, interpolation='none', cmap='viridis')
if logscale:
from matplotlib.colors import LogNorm
vmin = df[df > 0].min().min()
norm = LogNorm(vmin=vmin, vmax=values.max())
else:
norm = None
cmap = copy.copy(mpl.cm.get_cmap(cmap)) # copy the default cmap
cmap.set_bad((0, 0, 0))
aximg = ax.matshow(values, interpolation='none', cmap=cmap, norm=norm)
# aximg = ax.imshow(values, interpolation='none', cmap='viridis', norm=norm)
# ax.imshow(values, interpolation='none', cmap='viridis')
ax.grid(False)
cax = plt.colorbar(aximg, ax=ax)
if label is not None:
cax.set_label(label)
ax.set_xticks(list(range(len(df.index))))
ax.set_xticklabels([lbl[0:100] for lbl in df.index])
for lbl in ax.get_xticklabels():
lbl.set_rotation(rot)
for lbl in ax.get_xticklabels():
lbl.set_horizontalalignment('center')
ax.set_yticks(list(range(len(df.columns))))
ax.set_yticklabels([lbl[0:100] for lbl in df.columns])
for lbl in ax.get_yticklabels():
lbl.set_horizontalalignment('right')
for lbl in ax.get_yticklabels():
lbl.set_verticalalignment('center')
# Grid lines around the pixels
if grid:
offset = -.5
xlim = [-.5, len(df.columns)]
ylim = [-.5, len(df.index)]
segments = []
for x in range(ylim[1]):
xdata = [x + offset, x + offset]
ydata = ylim
segment = list(zip(xdata, ydata))
segments.append(segment)
for y in range(xlim[1]):
xdata = xlim
ydata = [y + offset, y + offset]
segment = list(zip(xdata, ydata))
segments.append(segment)
bingrid = mpl.collections.LineCollection(segments, color='w', linewidths=1)
ax.add_collection(bingrid)
if showvals:
x_basis = np.arange(len(df.columns))
y_basis = np.arange(len(df.index))
x, y = np.meshgrid(x_basis, y_basis)
for c, r in zip(x.flatten(), y.flatten()):
val = df.iloc[r, c]
ax.text(c, r, val, va='center', ha='center', color='white')
return ax
def axes_extent(axs, pad=0.0):
"""
Get the full extent of a group of axes, including axes labels, tick labels,
and titles.
"""
import itertools as it
import matplotlib as mpl
def axes_parts(ax):
yield ax
for label in ax.get_xticklabels():
if label.get_text():
yield label
for label in ax.get_yticklabels():
if label.get_text():
yield label
xlabel = ax.get_xaxis().get_label()
ylabel = ax.get_yaxis().get_label()
for label in (xlabel, ylabel, ax.title):
if label.get_text():
yield label
items = it.chain.from_iterable(axes_parts(ax) for ax in axs)
extents = [item.get_window_extent() for item in items]
#mpl.transforms.Affine2D().scale(1.1)
extent = mpl.transforms.Bbox.union(extents)
extent = extent.expanded(1.0 + pad, 1.0 + pad)
return extent
def extract_axes_extents(fig, combine=False, pad=0.0):
# Make sure we draw the axes first so we can
# extract positions from the text objects
import matplotlib as mpl
fig.canvas.draw()
# Group axes that belong together
atomic_axes = []
seen_ = set([])
for ax in fig.axes:
if ax not in seen_:
atomic_axes.append([ax])
seen_.add(ax)
dpi_scale_trans_inv = fig.dpi_scale_trans.inverted()
axes_bboxes_ = [axes_extent(axs, pad) for axs in atomic_axes]
axes_extents_ = [extent.transformed(dpi_scale_trans_inv) for extent in axes_bboxes_]
# axes_extents_ = axes_bboxes_
if combine:
# Grab include extents of figure text as well
# FIXME: This might break on OSX
# http://stackoverflow.com/questions/22667224/bbox-backend
renderer = fig.canvas.get_renderer()
for mpl_text in fig.texts:
bbox = mpl_text.get_window_extent(renderer=renderer)
extent_ = bbox.expanded(1.0 + pad, 1.0 + pad)
extent = extent_.transformed(dpi_scale_trans_inv)
# extent = extent_
axes_extents_.append(extent)
axes_extents = mpl.transforms.Bbox.union(axes_extents_)
else:
axes_extents = axes_extents_
# if True:
# axes_extents.x0 = 0
# # axes_extents.y1 = 0
return axes_extents
def adjust_subplots(left=None, right=None, bottom=None, top=None, wspace=None,
hspace=None, fig=None):
"""
Kwargs:
left (float): left side of the subplots of the figure
right (float): right side of the subplots of the figure
bottom (float): bottom of the subplots of the figure
top (float): top of the subplots of the figure
wspace (float): width reserved for blank space between subplots
hspace (float): height reserved for blank space between subplots
"""
from matplotlib import pyplot as plt
kwargs = dict(left=left, right=right, bottom=bottom, top=top,
wspace=wspace, hspace=hspace)
kwargs = {k: v for k, v in kwargs.items() if v is not None}
if fig is None:
fig = plt.gcf()
subplotpars = fig.subplotpars
adjust_dict = subplotpars.__dict__.copy()
del adjust_dict['validate']
adjust_dict.update(kwargs)
fig.subplots_adjust(**adjust_dict)
def render_figure_to_image(fig, **savekw):
import io
import cv2
import matplotlib as mpl
axes_extents = extract_axes_extents(fig)
extent = mpl.transforms.Bbox.union(axes_extents)
with io.BytesIO() as stream:
# This call takes 23% - 15% of the time depending on settings
fig.savefig(stream, bbox_inches=extent, **savekw)
# fig.savefig(stream, **savekw)
stream.seek(0)
data = np.fromstring(stream.getvalue(), dtype=np.uint8)
im_bgra = cv2.imdecode(data, cv2.IMREAD_UNCHANGED)
return im_bgra
def savefig2(fig, fpath, **kwargs):
"""
Does a tight layout and saves the figure with transparency
"""
import matplotlib as mpl
if 'transparent' not in kwargs:
kwargs['transparent'] = True
if 'extent' not in kwargs:
axes_extents = extract_axes_extents(fig)
extent = mpl.transforms.Bbox.union(axes_extents)
kwargs['extent'] = extent
fig.savefig(fpath, **kwargs)
def copy_figure_to_clipboard(fig):
"""
References:
https://stackoverflow.com/questions/17676373/python-matplotlib-pyqt-copy-image-to-clipboard
"""
print('Copying figure %d to the clipboard' % fig.number)
import matplotlib as mpl
app = mpl.backends.backend_qt5.qApp
QtGui = mpl.backends.backend_qt5.QtGui
im_bgra = render_figure_to_image(fig, transparent=True)
im_rgba = cv2.cvtColor(im_bgra, cv2.COLOR_BGRA2RGBA)
im = im_rgba
QImage = QtGui.QImage
qim = QImage(im.data, im.shape[1], im.shape[0], im.strides[0], QImage.Format_RGBA8888)
clipboard = app.clipboard()
clipboard.setImage(qim)
# size = fig.canvas.size()
# width, height = size.width(), size.height()
# qim = QtGui.QImage(fig.canvas.buffer_rgba(), width, height, QtGui.QImage.Format_ARGB32)
# QtWidgets = mpl.backends.backend_qt5.QtWidgets
# pixmap = QtWidgets.QWidget.grab(fig.canvas)
# clipboard.setPixmap(pixmap)
def dict_intersection(dict1, dict2):
r"""
Args:
dict1 (dict):
dict2 (dict):
Returns:
dict: mergedict_
CommandLine:
python -m utool.util_dict --exec-dict_intersection
Example:
>>> # ENABLE_DOCTEST
>>> dict1 = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
>>> dict2 = {'b': 2, 'c': 3, 'd': 5, 'e': 21, 'f': 42}
>>> mergedict_ = dict_intersection(dict1, dict2)
>>> print(ub.repr2(mergedict_, nl=0))
{'b': 2, 'c': 3}
"""
isect_keys = set(dict1.keys()).intersection(set(dict2.keys()))
# maintain order if possible
if isinstance(dict1, ub.odict):
isect_keys_ = [k for k in dict1.keys() if k in isect_keys]
_dict_cls = ub.odict
else:
isect_keys_ = isect_keys
_dict_cls = dict
dict_isect = _dict_cls(
(k, dict1[k]) for k in isect_keys_ if dict1[k] == dict2[k]
)
return dict_isect
def _dark_background(ax=None, doubleit=False, force=False):
r"""
Args:
ax (None): (default = None)
doubleit (bool): (default = False)
CommandLine:
python -m .draw_func2 --exec-_dark_background --show
Example:
>>> # ENABLE_DOCTEST
>>> autompl()
>>> fig = figure()
>>> _dark_background()
>>> show_if_requested()
"""
import matplotlib as mpl
from matplotlib import pyplot as plt
def is_using_style(style):
style_dict = mpl.style.library[style]
return len(dict_intersection(style_dict, mpl.rcParams)) == len(style_dict)
if force:
from mpl_toolkits.mplot3d import Axes3D
BLACK = np.array(( 0, 0, 0, 255)) / 255.0
# Should use mpl style dark background instead
bgcolor = BLACK * .9
if ax is None:
ax = plt.gca()
if isinstance(ax, Axes3D):
ax.set_axis_bgcolor(bgcolor)
ax.tick_params(colors='white')
return
xy, width, height = _get_axis_xy_width_height(ax)
if doubleit:
halfw = (doubleit) * (width / 2)
halfh = (doubleit) * (height / 2)
xy = (xy[0] - halfw, xy[1] - halfh)
width *= (doubleit + 1)
height *= (doubleit + 1)
rect = mpl.patches.Rectangle(xy, width, height, lw=0, zorder=0)
rect.set_clip_on(True)
rect.set_fill(True)
rect.set_color(bgcolor)
rect.set_zorder(-99999999999)
rect = ax.add_patch(rect)
def _get_axis_xy_width_height(ax=None, xaug=0, yaug=0, waug=0, haug=0):
""" gets geometry of a subplot """
from matplotlib import pyplot as plt
if ax is None:
ax = plt.gca()
autoAxis = ax.axis()
xy = (autoAxis[0] + xaug, autoAxis[2] + yaug)
width = (autoAxis[1] - autoAxis[0]) + waug
height = (autoAxis[3] - autoAxis[2]) + haug
return xy, width, height
_LEGEND_LOCATION = {
'upper right': 1,
'upper left': 2,
'lower left': 3,
'lower right': 4,
'right': 5,
'center left': 6,
'center right': 7,
'lower center': 8,
'upper center': 9,
'center': 10,
}
def set_figtitle(figtitle, subtitle='', forcefignum=True, incanvas=True,
size=None, fontfamily=None, fontweight=None,
fig=None):
r"""
Args:
figtitle (?):
subtitle (str): (default = '')
forcefignum (bool): (default = True)
incanvas (bool): (default = True)
fontfamily (None): (default = None)
fontweight (None): (default = None)
size (None): (default = None)
fig (None): (default = None)
CommandLine:
python -m .custom_figure set_figtitle --show
Example:
>>> # DISABLE_DOCTEST
>>> autompl()
>>> fig = figure(fnum=1, doclf=True)
>>> result = set_figtitle(figtitle='figtitle', fig=fig)
>>> # xdoc: +REQUIRES(--show)
>>> show_if_requested()
"""
from matplotlib import pyplot as plt
if figtitle is None:
figtitle = ''
if fig is None:
fig = plt.gcf()
figtitle = ub.ensure_unicode(figtitle)
subtitle = ub.ensure_unicode(subtitle)
if incanvas:
if subtitle != '':
subtitle = '\n' + subtitle
prop = {
'family': fontfamily,
'weight': fontweight,
'size': size,
}
prop = {k: v for k, v in prop.items() if v is not None}
sup = fig.suptitle(figtitle + subtitle)
if prop:
fontproperties = sup.get_fontproperties().copy()
for key, val in prop.items():
getattr(fontproperties, 'set_' + key)(val)
sup.set_fontproperties(fontproperties)
# fontproperties = mpl.font_manager.FontProperties(**prop)
else:
fig.suptitle('')
# Set title in the window
window_figtitle = ('fig(%d) ' % fig.number) + figtitle
window_figtitle = window_figtitle.replace('\n', ' ')
fig.canvas.set_window_title(window_figtitle)
def legend(loc='best', fontproperties=None, size=None, fc='w', alpha=1,
ax=None, handles=None):
r"""
Args:
loc (str): (default = 'best')
fontproperties (None): (default = None)
size (None): (default = None)
Ignore:
>>> # ENABLE_DOCTEST
>>> autompl()
>>> loc = 'best'
>>> xdata = np.linspace(-6, 6)
>>> ydata = np.sin(xdata)
>>> plt.plot(xdata, ydata, label='sin')
>>> fontproperties = None
>>> size = None
>>> result = legend(loc, fontproperties, size)
>>> print(result)
>>> show_if_requested()
"""
from matplotlib import pyplot as plt
assert loc in _LEGEND_LOCATION or loc == 'best', (
'invalid loc. try one of %r' % (_LEGEND_LOCATION,))
if ax is None:
ax = plt.gca()
if fontproperties is None:
prop = {}
if size is not None:
prop['size'] = size
# prop['weight'] = 'normal'
# prop['family'] = 'sans-serif'
else:
prop = fontproperties
legendkw = dict(loc=loc)
if prop:
legendkw['prop'] = prop
if handles is not None:
legendkw['handles'] = handles
legend = ax.legend(**legendkw)
if legend:
legend.get_frame().set_fc(fc)
legend.get_frame().set_alpha(alpha)
def distinct_colors(N, brightness=.878, randomize=True, hue_range=(0.0, 1.0), cmap_seed=None):
r"""
Args:
N (int):
brightness (float):
Returns:
list: RGB_tuples
CommandLine:
python -m color_funcs --test-distinct_colors --N 2 --show --hue-range=0.05,.95
python -m color_funcs --test-distinct_colors --N 3 --show --hue-range=0.05,.95
python -m color_funcs --test-distinct_colors --N 4 --show --hue-range=0.05,.95
python -m .color_funcs --test-distinct_colors --N 3 --show --no-randomize
python -m .color_funcs --test-distinct_colors --N 4 --show --no-randomize
python -m .color_funcs --test-distinct_colors --N 6 --show --no-randomize
python -m .color_funcs --test-distinct_colors --N 20 --show
References:
http://blog.jianhuashao.com/2011/09/generate-n-distinct-colors.html
CommandLine:
python -m .color_funcs --exec-distinct_colors --show
python -m .color_funcs --exec-distinct_colors --show --no-randomize --N 50
python -m .color_funcs --exec-distinct_colors --show --cmap_seed=foobar
Ignore:
>>> # build test data
>>> autompl()
>>> N = ub.smartcast(ub.get_argval('--N', default=2), int) # FIXME
>>> randomize = not ub.argflag('--no-randomize')
>>> brightness = 0.878
>>> # execute function
>>> cmap_seed = ub.get_argval('--cmap_seed', default=None)
>>> hue_range = ub.smartcast(ub.get_argval('--hue-range', default=(0.00, 1.0)), list) #FIXME
>>> RGB_tuples = distinct_colors(N, brightness, randomize, hue_range, cmap_seed=cmap_seed)
>>> # verify results
>>> assert len(RGB_tuples) == N
>>> result = str(RGB_tuples)
>>> print(result)
>>> # xdoctest: +REQUIRES(--show)
>>> color_list = RGB_tuples
>>> testshow_colors(color_list)
>>> show_if_requested()
"""
# TODO: Add sin wave modulation to the sat and value
# HACK for white figures
from matplotlib import pyplot as plt
import colorsys
remove_yellow = True
use_jet = False
if use_jet:
cmap = plt.cm.jet
RGB_tuples = list(map(tuple, cmap(np.linspace(0, 1, N))))
elif cmap_seed is not None:
# Randomized map based on a seed
#cmap_ = 'Set1'
#cmap_ = 'Dark2'
choices = [
#'Set1', 'Dark2',
'jet',
#'gist_rainbow',
#'rainbow',
#'gnuplot',
#'Accent'
]
cmap_hack = ub.argval('--cmap-hack', default=None)
ncolor_hack = ub.argval('--ncolor-hack', default=None)
if cmap_hack is not None:
choices = [cmap_hack]
if ncolor_hack is not None:
N = int(ncolor_hack)
N_ = N
seed = sum(list(map(ord, ub.hash_data(cmap_seed))))
rng = np.random.RandomState(seed + 48930)
cmap_str = rng.choice(choices, 1)[0]
#print('cmap_str = %r' % (cmap_str,))
cmap = plt.cm.get_cmap(cmap_str)
#.hashstr27(cmap_seed)
#cmap_seed = 0
#pass
jitter = (rng.randn(N) / (rng.randn(100).max() / 2)).clip(-1, 1) * ((1 / (N ** 2)))
range_ = np.linspace(0, 1, N, endpoint=False)
#print('range_ = %r' % (range_,))
range_ = range_ + jitter
#print('range_ = %r' % (range_,))
while not (np.all(range_ >= 0) and np.all(range_ <= 1)):
range_[range_ < 0] = np.abs(range_[range_ < 0] )
range_[range_ > 1] = 2 - range_[range_ > 1]
#print('range_ = %r' % (range_,))
shift = rng.rand()
range_ = (range_ + shift) % 1
#print('jitter = %r' % (jitter,))
#print('shift = %r' % (shift,))
#print('range_ = %r' % (range_,))
if ncolor_hack is not None:
range_ = range_[0:N_]
RGB_tuples = list(map(tuple, cmap(range_)))
else:
sat = brightness
val = brightness
hmin, hmax = hue_range
if remove_yellow:
hue_skips = [(.13, .24)]
else:
hue_skips = []
hue_skip_ranges = [_[1] - _[0] for _ in hue_skips]
total_skip = sum(hue_skip_ranges)
hmax_ = hmax - total_skip
hue_list = np.linspace(hmin, hmax_, N, endpoint=False, dtype=np.float)
# Remove colors (like hard to see yellows) in specified ranges
for skip, range_ in zip(hue_skips, hue_skip_ranges):
hue_list = [hue if hue <= skip[0] else hue + range_ for hue in hue_list]
HSV_tuples = [(hue, sat, val) for hue in hue_list]
RGB_tuples = [colorsys.hsv_to_rgb(*x) for x in HSV_tuples]
if randomize:
deterministic_shuffle(RGB_tuples)
return RGB_tuples
def distinct_markers(num, style='astrisk', total=None, offset=0):
r"""
Args:
num (?):
CommandLine:
python -m .draw_func2 --exec-distinct_markers --show
python -m .draw_func2 --exec-distinct_markers --style=star --show
python -m .draw_func2 --exec-distinct_markers --style=polygon --show
Ignore:
>>> autompl()
>>> style = ub.get_argval('--style', type_=str, default='astrisk')
>>> marker_list = distinct_markers(10, style)
>>> x_data = np.arange(0, 3)
>>> for count, (marker) in enumerate(marker_list):
>>> plt.plot(x_data, [count] * len(x_data), marker=marker, markersize=10, linestyle='', label=str(marker))
>>> legend()
>>> show_if_requested()
"""
num_sides = 3
style_num = {
'astrisk': 2,
'star': 1,
'polygon': 0,
'circle': 3
}[style]
if total is None:
total = num
total_degrees = 360 / num_sides
marker_list = [
(num_sides, style_num, total_degrees * (count + offset) / total)
for count in range(num)
]
return marker_list
def deterministic_shuffle(list_, rng=0):
r"""
Args:
list_ (list):
seed (int):
Returns:
list: list_
Example:
>>> list_ = [1, 2, 3, 4, 5, 6]
>>> seed = 1
>>> list_ = deterministic_shuffle(list_, seed)
>>> result = str(list_)
>>> print(result)
[3, 2, 5, 1, 4, 6]
"""
from netharn import util
rng = util.ensure_rng(rng)
rng.shuffle(list_)
return list_
_BASE_FNUM = 9001
def next_fnum(new_base=None):
global _BASE_FNUM
if new_base is not None:
_BASE_FNUM = new_base
_BASE_FNUM += 1
return _BASE_FNUM
def ensure_fnum(fnum):
if fnum is None:
return next_fnum()
return fnum
def _save_requested(fpath_, save_parts):
raise NotImplementedError('havent done this yet')
# dpi = ub.argval('--dpi', type_=int, default=200)
from os.path import expanduser
from matplotlib import pyplot as plt
dpi = 200
fpath_ = expanduser(fpath_)
print('Figure save was requested')
# arg_dict = ut.get_arg_dict(prefix_list=['--', '-'],
# type_hints={'t': list, 'a': list})
arg_dict = {}
# HACK
arg_dict = {
key: (val[0] if len(val) == 1 else '[' + ']['.join(val) + ']')
if isinstance(val, list) else val
for key, val in arg_dict.items()
}
fpath_ = fpath_.format(**arg_dict)
fpath_ = fpath_.replace(' ', '').replace('\'', '').replace('"', '')
dpath = ub.argval('--dpath', type_=str, default=None)
if dpath is None:
gotdpath = False
dpath = '.'
else:
gotdpath = True
fpath = join(dpath, fpath_)
if not gotdpath:
dpath = dirname(fpath_)
print('dpath = %r' % (dpath,))
fig = plt.gcf()
fig.dpi = dpi
fpath_strict = ub.truepath(fpath)
CLIP_WHITE = ub.argflag('--clipwhite')
from netharn import util
if save_parts:
# TODO: call save_parts instead, but we still need to do the
# special grouping.
# Group axes that belong together
atomic_axes = []
seen_ = set([])
for ax in fig.axes:
div = _get_plotdat(ax, _DF2_DIVIDER_KEY, None)
if div is not None:
df2_div_axes = _get_plotdat_dict(ax).get('df2_div_axes', [])
seen_.add(ax)
seen_.update(set(df2_div_axes))
atomic_axes.append([ax] + df2_div_axes)
# TODO: pad these a bit
else:
if ax not in seen_:
atomic_axes.append([ax])
seen_.add(ax)
hack_axes_group_row = ub.argflag('--grouprows')
if hack_axes_group_row:
groupid_list = []
for axs in atomic_axes:
for ax in axs:
groupid = ax.colNum
groupid_list.append(groupid)
groups = ub.group_items(atomic_axes, groupid_list)
new_groups = list(map(ub.flatten, groups.values()))
atomic_axes = new_groups
#[[(ax.rowNum, ax.colNum) for ax in axs] for axs in atomic_axes]
# save all rows of each column
subpath_list = save_parts(fig=fig, fpath=fpath_strict,
grouped_axes=atomic_axes, dpi=dpi)
absfpath_ = subpath_list[-1]
if CLIP_WHITE:
for subpath in subpath_list:
# remove white borders
util.clipwhite_ondisk(subpath, subpath)
else:
savekw = {}
# savekw['transparent'] = fpath.endswith('.png') and not noalpha
savekw['transparent'] = ub.argflag('--alpha')
savekw['dpi'] = dpi
savekw['edgecolor'] = 'none'
savekw['bbox_inches'] = extract_axes_extents(fig, combine=True) # replaces need for clipwhite
absfpath_ = ub.truepath(fpath)
fig.savefig(absfpath_, **savekw)
if CLIP_WHITE:
# remove white borders
fpath_in = fpath_out = absfpath_
util.clipwhite_ondisk(fpath_in, fpath_out)
if ub.argflag(('--diskshow', '--ds')):
# show what we wrote
ub.startfile(absfpath_)
def show_if_requested(N=1):
"""
Used at the end of tests. Handles command line arguments for saving figures
Referencse:
http://stackoverflow.com/questions/4325733/save-a-subplot-in-matplotlib
"""
import matplotlib.pyplot as plt
# Process figures adjustments from command line before a show or a save
# udpate_adjust_subplots()
# if use_argv:
# # hack to take args from commandline
# adjust_dict = ut.parse_dict_from_argv(adjust_dict)
# adjust_subplots(use_argv=True)
# def update_figsize():
# """ updates figsize based on command line """
# figsize = ub.argval('--figsize', type_=list, default=None)
# if figsize is not None:
# # Enforce inches and DPI
# fig = plt.gcf()
# figsize = [eval(term) if isinstance(term, str) else term
# for term in figsize]
# figw, figh = figsize[0], figsize[1]
# print('get_size_inches = %r' % (fig.get_size_inches(),))
# print('fig w,h (inches) = %r, %r' % (figw, figh))
# fig.set_size_inches(figw, figh)
# #print('get_size_inches = %r' % (fig.get_size_inches(),))
# update_figsize()
save_parts = ub.argflag('--saveparts')
fpath_ = ub.argval('--save', default=None)
if fpath_ is None:
fpath_ = ub.argval('--saveparts', default=None)
save_parts = True
if fpath_ is not None:
_save_requested(fpath_, save_parts)
# elif ub.argflag('--cmd'):
# pass
if ub.argflag('--show'):
# if ub.argflag('--tile'):
# if ut.get_computer_name().lower() in ['hyrule']:
# fig_presenter.all_figures_tile(percent_w=.5, monitor_num=0)
# else:
# fig_presenter.all_figures_tile()
# if ub.argflag('--present'):
# fig_presenter.present()
# for fig in fig_presenter.get_all_figures():
# fig.set_dpi(80)
plt.show()
def save_parts(fig, fpath, grouped_axes=None, dpi=None):
"""
FIXME: this works in mpl 2.0.0, but not 2.0.2
Args:
fig (?):
fpath (str): file path string
dpi (None): (default = None)
Returns:
list: subpaths
CommandLine:
python -m draw_func2 save_parts
Ignore:
>>> # DISABLE_DOCTEST
>>> autompl()
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
>>> def testimg(fname):
>>> return plt.imread(mpl.cbook.get_sample_data(fname))
>>> fnames = ['grace_hopper.png', 'ada.png'] * 4
>>> fig = plt.figure(1)
>>> for c, fname in enumerate(fnames, start=1):
>>> ax = fig.add_subplot(3, 4, c)
>>> ax.imshow(testimg(fname))
>>> ax.set_title(fname[0:3] + str(c))
>>> ax.set_xticks([])
>>> ax.set_yticks([])
>>> ax = fig.add_subplot(3, 1, 3)
>>> ax.plot(np.sin(np.linspace(0, np.pi * 2)))
>>> ax.set_xlabel('xlabel')
>>> ax.set_ylabel('ylabel')
>>> ax.set_title('title')
>>> fpath = 'test_save_parts.png'
>>> adjust_subplots(fig=fig, wspace=.3, hspace=.3, top=.9)
>>> subpaths = save_parts(fig, fpath, dpi=300)
>>> fig.savefig(fpath)
>>> ub.startfile(subpaths[0])
>>> ub.startfile(fpath)
"""
if dpi:
# Need to set figure dpi before we draw
fig.dpi = dpi
# We need to draw the figure before calling get_window_extent
# (or we can figure out how to set the renderer object)
# if getattr(fig.canvas, 'renderer', None) is None:
fig.canvas.draw()
# Group axes that belong together
if grouped_axes is None:
grouped_axes = []
for ax in fig.axes:
grouped_axes.append([ax])
subpaths = []
_iter = enumerate(grouped_axes, start=0)
_iter = ub.ProgIter(list(_iter), label='save subfig')
for count, axs in _iter:
subpath = ub.augpath(fpath, suffix=chr(count + 65))
extent = axes_extent(axs).transformed(fig.dpi_scale_trans.inverted())
savekw = {}
savekw['transparent'] = ub.argflag('--alpha')
if dpi is not None:
savekw['dpi'] = dpi
savekw['edgecolor'] = 'none'
fig.savefig(subpath, bbox_inches=extent, **savekw)
subpaths.append(subpath)
return subpaths
_qtensured = False
def _current_ipython_session():
"""
Returns a reference to the current IPython session, if one is running
"""
try:
__IPYTHON__
except NameError:
return None
else:
import IPython
ipython = IPython.get_ipython()
# if ipython is None we must have exited ipython at some point
return ipython
def qtensure():
"""
If you are in an IPython session, ensures that your backend is Qt.
"""
global _qtensured
if not _qtensured:
ipython = _current_ipython_session()
if ipython:
import sys
if 'PyQt4' in sys.modules:
ipython.magic('pylab qt4 --no-import-all')
_qtensured = True
else:
ipython.magic('pylab qt5 --no-import-all')
_qtensured = True
def aggensure():
"""
Ensures that you are in agg mode as long as IPython is not running
This might help prevent errors in tmux like:
qt.qpa.screen: QXcbConnection: Could not connect to display localhost:10.0
Could not connect to any X display.
"""
import matplotlib as mpl
current_backend = mpl.get_backend()
if current_backend != 'agg':
ipython = _current_ipython_session()
if not ipython:
set_mpl_backend('agg')
def set_mpl_backend(backend):
"""
Args:
backend (str): name of backend to use (e.g. Agg, PyQt)
"""
import sys
import matplotlib as mpl
if backend.lower().startswith('qt'):
# handle interactive qt case
qtensure()
if backend != mpl.get_backend():
# If we have already imported pyplot, then we need to use experimental
# behavior. Otherwise, we can just set the backend.
if 'matplotlib.pyplot' in sys.modules:
from matplotlib import pyplot as plt
plt.switch_backend(backend)
else:
mpl.use(backend)
def autompl():
"""
Uses platform heuristics to automatically set the mpl backend.
If no display is available it will be set to agg, otherwise we will try to
use the cross-platform Qt5Agg backend.
"""
import os
import sys
if sys.platform.startswith('win32'):
# TODO: something reasonable
pass
else:
DISPLAY = os.environ.get('DISPLAY', '')
if not DISPLAY:
set_mpl_backend('agg')
else:
set_mpl_backend('Qt5Agg')
def imshow(img, fnum=None, title=None, figtitle=None, pnum=None,
interpolation='nearest', cmap=None, heatmap=False,
data_colorbar=False, xlabel=None, redraw_image=True,
colorspace='bgr', ax=None, alpha=None, norm=None, **kwargs):
r"""
Args:
img (ndarray): image data
fnum (int): figure number
colorspace (str): if the data is 3-4 channels, this indicates the colorspace
1 channel data is assumed grayscale. 4 channels assumes alpha.
title (str):
figtitle (None):
pnum (tuple): plot number
interpolation (str): other interpolations = nearest, bicubic, bilinear
cmap (None):
heatmap (bool):
data_colorbar (bool):
darken (None):
redraw_image (bool): used when calling imshow over and over. if false
doesnt do the image part.
Returns:
tuple: (fig, ax)
Kwargs:
docla, doclf, projection
Returns:
tuple: (fig, ax)
Ignore:
>>> autompl()
>>> img_fpath = ut.grab_test_imgpath('carl.jpg')
>>> img = util.imread(img_fpath)
>>> (fig, ax) = imshow(img)
>>> result = ('(fig, ax) = %s' % (str((fig, ax)),))
>>> print(result)
>>> ut.show_if_requested()
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
if ax is not None:
fig = ax.figure
nospecial = True
else:
fig = figure(fnum=fnum, pnum=pnum, title=title, figtitle=figtitle, **kwargs)
ax = plt.gca()
nospecial = False
#ax.set_xticks([])
#ax.set_yticks([])
#return fig, ax
if not redraw_image:
return fig, ax
if isinstance(img, six.string_types):
# Allow for path to image to be specified
from netharn import util
img_fpath = img
img = util.imread(img_fpath)
plt_imshow_kwargs = {
'interpolation': interpolation,
#'cmap': plt.get_cmap('gray'),
}
if alpha is not None:
plt_imshow_kwargs['alpha'] = alpha
if norm is not None:
if norm is True:
norm = mpl.colors.Normalize()
plt_imshow_kwargs['norm'] = norm
else:
if cmap is None and not heatmap and not nospecial:
plt_imshow_kwargs['vmin'] = 0
plt_imshow_kwargs['vmax'] = 255
if heatmap:
cmap = 'hot'
# Handle tensor chw format in most cases
if img.ndim == 3:
if img.shape[0] == 3 or img.shape[0] == 1:
if img.shape[2] > 4:
# probably in chw format
img = img.transpose(1, 2, 0)
try:
if len(img.shape) == 3 and (img.shape[2] == 3 or img.shape[2] == 4):
# img is in a color format
from netharn import util
dst_space = 'rgb'
if img.shape[2] == 4:
colorspace += 'a'
dst_space += 'a'
imgRGB = util.convert_colorspace(img, dst_space=dst_space,
src_space=colorspace)
if imgRGB.dtype.kind == 'f':
maxval = imgRGB.max()
if maxval > 1.01 and maxval < 256:
imgRGB = np.array(imgRGB, dtype=np.uint8)
ax.imshow(imgRGB, **plt_imshow_kwargs)
elif len(img.shape) == 2 or (len(img.shape) == 3 and img.shape[2] == 1):
# img is in grayscale
if len(img.shape) == 3:
imgGRAY = img.reshape(img.shape[0:2])
else:
imgGRAY = img
if cmap is None:
cmap = plt.get_cmap('gray')
if isinstance(cmap, six.string_types):
cmap = plt.get_cmap(cmap)
# for some reason gray floats aren't working right
if imgGRAY.max() <= 1.01 and imgGRAY.min() >= -1E-9:
imgGRAY = (imgGRAY * 255).astype(np.uint8)
ax.imshow(imgGRAY, cmap=cmap, **plt_imshow_kwargs)
else:
raise AssertionError(
'unknown image format. img.dtype=%r, img.shape=%r' %
(img.dtype, img.shape))
except TypeError as te:
print('[df2] imshow ERROR %r' % (te,))
raise
except Exception as ex:
print('!!!!!!!!!!!!!!WARNING!!!!!!!!!!!')
print('[df2] type(img) = %r' % type(img))
if not isinstance(img, np.ndarray):
print('!!!!!!!!!!!!!!ERRROR!!!!!!!!!!!')
pass
#print('img = %r' % (img,))
print('[df2] img.dtype = %r' % (img.dtype,))
print('[df2] type(img) = %r' % (type(img),))
print('[df2] img.shape = %r' % (img.shape,))
print('[df2] imshow ERROR %r' % ex)
raise
#plt.set_cmap('gray')
ax.set_xticks([])
ax.set_yticks([])
if data_colorbar is True:
scores = np.unique(img.flatten())
if cmap is None:
cmap = 'hot'
colors = scores_to_color(scores, cmap)
colorbar(scores, colors)
if xlabel is not None:
ax.set_xlabel(xlabel)
if figtitle is not None:
set_figtitle(figtitle)
return fig, ax
def colorbar(scalars, colors, custom=False, lbl=None, ticklabels=None,
float_format='%.2f', **kwargs):
"""
adds a color bar next to the axes based on specific scalars
Args:
scalars (ndarray):
colors (ndarray):
custom (bool): use custom ticks
Kwargs:
See plt.colorbar
Returns:
cb : matplotlib colorbar object
Ignore:
>>> autompl()
>>> scalars = np.array([-1, -2, 1, 1, 2, 7, 10])
>>> cmap_ = 'plasma'
>>> logscale = False
>>> custom = True
>>> reverse_cmap = True
>>> val2_customcolor = {
... -1: UNKNOWN_PURP,
... -2: LIGHT_BLUE,
... }
>>> colors = scores_to_color(scalars, cmap_=cmap_, logscale=logscale, reverse_cmap=reverse_cmap, val2_customcolor=val2_customcolor)
>>> colorbar(scalars, colors, custom=custom)
>>> df2.present()
>>> show_if_requested()
Ignore:
>>> # ENABLE_DOCTEST
>>> scalars = np.linspace(0, 1, 100)
>>> cmap_ = 'plasma'
>>> logscale = False
>>> custom = False
>>> reverse_cmap = False
>>> colors = scores_to_color(scalars, cmap_=cmap_, logscale=logscale,
>>> reverse_cmap=reverse_cmap)
>>> colors = [lighten_rgb(c, .3) for c in colors]
>>> colorbar(scalars, colors, custom=custom)
>>> df2.present()
>>> show_if_requested()
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
assert len(scalars) == len(colors), 'scalars and colors must be corresponding'
if len(scalars) == 0:
return None
# Parameters
ax = plt.gca()
divider = _ensure_divider(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
xy, width, height = _get_axis_xy_width_height(ax)
#orientation = ['vertical', 'horizontal'][0]
TICK_FONTSIZE = 8
#
# Create scalar mappable with cmap
if custom:
# FIXME: clean this code up and change the name custom
# to be meaningful. It is more like: display unique colors
unique_scalars, unique_idx = np.unique(scalars, return_index=True)
unique_colors = np.array(colors)[unique_idx]
#max_, min_ = unique_scalars.max(), unique_scalars.min()
#extent_ = max_ - min_
#bounds = np.linspace(min_, max_ + 1, extent_ + 2)
listed_cmap = mpl.colors.ListedColormap(unique_colors)
#norm = mpl.colors.BoundaryNorm(bounds, listed_cmap.N)
#sm = mpl.cm.ScalarMappable(cmap=listed_cmap, norm=norm)
sm = mpl.cm.ScalarMappable(cmap=listed_cmap)
sm.set_array(np.linspace(0, 1, len(unique_scalars) + 1))
else:
sorted_scalars = sorted(scalars)
listed_cmap = scores_to_cmap(scalars, colors)
sm = plt.cm.ScalarMappable(cmap=listed_cmap)
sm.set_array(sorted_scalars)
# Use mapable object to create the colorbar
#COLORBAR_SHRINK = .42 # 1
#COLORBAR_PAD = .01 # 1
#COLORBAR_ASPECT = np.abs(20 * height / (width)) # 1
cb = plt.colorbar(sm, cax=cax, **kwargs)
## Add the colorbar to the correct label
#axis = cb.ax.yaxis # if orientation == 'horizontal' else cb.ax.yaxis
#position = 'bottom' if orientation == 'horizontal' else 'right'
#axis.set_ticks_position(position)
# This line alone removes data
# axis.set_ticks([0, .5, 1])
if custom:
ticks = np.linspace(0, 1, len(unique_scalars) + 1)
if len(ticks) < 2:
ticks += .5
else:
# SO HACKY
ticks += (ticks[1] - ticks[0]) / 2
if isinstance(unique_scalars, np.ndarray) and unique_scalars.dtype.kind == 'f':
ticklabels = [float_format % scalar for scalar in unique_scalars]
else:
ticklabels = unique_scalars
cb.set_ticks(ticks) # tick locations
cb.set_ticklabels(ticklabels) # tick labels
elif ticklabels is not None:
ticks_ = cb.ax.get_yticks()
mx = ticks_.max()
mn = ticks_.min()
ticks = np.linspace(mn, mx, len(ticklabels))
cb.set_ticks(ticks) # tick locations
cb.set_ticklabels(ticklabels)
#cb.ax.get_yticks()
#cb.set_ticks(ticks) # tick locations
#cb.set_ticklabels(ticklabels) # tick labels
# _set_plotdat(cb.ax, 'viztype', 'colorbar-%s' % (lbl,))
# _set_plotdat(cb.ax, 'sm', sm)
# FIXME: Figure out how to make a maximum number of ticks
# and to enforce them to be inside the data bounds
cb.ax.tick_params(labelsize=TICK_FONTSIZE)
# Sets current axis
plt.sca(ax)
if lbl is not None:
cb.set_label(lbl)
return cb
_DF2_DIVIDER_KEY = '_df2_divider'
def _get_plotdat(ax, key, default=None):
""" returns internal property from a matplotlib axis """
_plotdat = _get_plotdat_dict(ax)
val = _plotdat.get(key, default)
return val
def _set_plotdat(ax, key, val):
""" sets internal property to a matplotlib axis """
_plotdat = _get_plotdat_dict(ax)
_plotdat[key] = val
def _del_plotdat(ax, key):
""" sets internal property to a matplotlib axis """
_plotdat = _get_plotdat_dict(ax)
if key in _plotdat:
del _plotdat[key]
def _get_plotdat_dict(ax):
""" sets internal property to a matplotlib axis """
if '_plotdat' not in ax.__dict__:
ax.__dict__['_plotdat'] = {}
plotdat_dict = ax.__dict__['_plotdat']
return plotdat_dict
def _ensure_divider(ax):
""" Returns previously constructed divider or creates one """
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = _get_plotdat(ax, _DF2_DIVIDER_KEY, None)
if divider is None:
divider = make_axes_locatable(ax)
_set_plotdat(ax, _DF2_DIVIDER_KEY, divider)
orig_append_axes = divider.append_axes
def df2_append_axes(divider, position, size, pad=None, add_to_figure=True, **kwargs):
""" override divider add axes to register the divided axes """
div_axes = _get_plotdat(ax, 'df2_div_axes', [])
new_ax = orig_append_axes(position, size, pad=pad, add_to_figure=add_to_figure, **kwargs)
div_axes.append(new_ax)
_set_plotdat(ax, 'df2_div_axes', div_axes)
return new_ax
new_method = df2_append_axes.__get__(divider, divider.__class__)
setattr(divider, 'append_axes', new_method)
# ut.inject_func_as_method(divider, df2_append_axes, 'append_axes', allow_override=True)
return divider
def scores_to_cmap(scores, colors=None, cmap_='hot'):
import matplotlib as mpl
if colors is None:
colors = scores_to_color(scores, cmap_=cmap_)
scores = np.array(scores)
colors = np.array(colors)
sortx = scores.argsort()
sorted_colors = colors[sortx]
# Make a listed colormap and mappable object
listed_cmap = mpl.colors.ListedColormap(sorted_colors)
return listed_cmap
def scores_to_color(score_list, cmap_='hot', logscale=False, reverse_cmap=False,
custom=False, val2_customcolor=None, score_range=None,
cmap_range=(.1, .9)):
"""
Other good colormaps are 'spectral', 'gist_rainbow', 'gist_ncar', 'Set1',
'Set2', 'Accent'
# TODO: plasma
Args:
score_list (list):
cmap_ (str): defaults to hot
logscale (bool):
cmap_range (tuple): restricts to only a portion of the cmap to avoid extremes
Returns:
<class '_ast.ListComp'>
Ignore:
>>> ut.exec_funckw(scores_to_color, globals())
>>> score_list = np.array([-1, -2, 1, 1, 2, 10])
>>> # score_list = np.array([0, .1, .11, .12, .13, .8])
>>> # score_list = np.linspace(0, 1, 100)
>>> cmap_ = 'plasma'
>>> colors = scores_to_color(score_list, cmap_)
>>> imgRGB = util.atleast_nd(np.array(colors)[:, 0:3], 3, tofront=True)
>>> imgRGB = imgRGB.astype(np.float32)
>>> imgBGR = util.convert_colorspace(imgRGB, 'BGR', 'RGB')
>>> imshow(imgBGR)
>>> show_if_requested()
Ignore:
>>> score_list = np.array([-1, -2, 1, 1, 2, 10])
>>> cmap_ = 'hot'
>>> logscale = False
>>> reverse_cmap = True
>>> custom = True
>>> val2_customcolor = {
... -1: UNKNOWN_PURP,
... -2: LIGHT_BLUE,
... }
"""
import matplotlib.pyplot as plt
assert len(score_list.shape) == 1, 'score must be 1d'
if len(score_list) == 0:
return []
def apply_logscale(scores):
scores = np.array(scores)
above_zero = scores >= 0
scores_ = scores.copy()
scores_[above_zero] = scores_[above_zero] + 1
scores_[~above_zero] = scores_[~above_zero] - 1
scores_ = np.log2(scores_)
return scores_
if logscale:
# Hack
score_list = apply_logscale(score_list)
#if loglogscale
#score_list = np.log2(np.log2(score_list + 2) + 1)
#if isinstance(cmap_, six.string_types):
cmap = plt.get_cmap(cmap_)
#else:
# cmap = cmap_
if reverse_cmap:
cmap = reverse_colormap(cmap)
#if custom:
# base_colormap = cmap
# data = score_list
# cmap = customize_colormap(score_list, base_colormap)
if score_range is None:
min_ = score_list.min()
max_ = score_list.max()
else:
min_ = score_range[0]
max_ = score_range[1]
if logscale:
min_, max_ = apply_logscale([min_, max_])
if cmap_range is None:
cmap_scale_min, cmap_scale_max = 0., 1.
else:
cmap_scale_min, cmap_scale_max = cmap_range
extent_ = max_ - min_
if extent_ == 0:
colors = [cmap(.5) for fx in range(len(score_list))]
else:
if False and logscale:
# hack
def score2_01(score):
return np.log2(
1 + cmap_scale_min + cmap_scale_max *
(float(score) - min_) / (extent_))
score_list = np.array(score_list)
#rank_multiplier = score_list.argsort() / len(score_list)
#normscore = np.array(list(map(score2_01, score_list))) * rank_multiplier
normscore = np.array(list(map(score2_01, score_list)))
colors = list(map(cmap, normscore))
else:
def score2_01(score):
return cmap_scale_min + cmap_scale_max * (float(score) - min_) / (extent_)
colors = [cmap(score2_01(score)) for score in score_list]
if val2_customcolor is not None:
colors = [
np.array(val2_customcolor.get(score, color))
for color, score in zip(colors, score_list)]
return colors
def reverse_colormap(cmap):
"""
References:
http://nbviewer.ipython.org/github/kwinkunks/notebooks/blob/master/Matteo_colourmaps.ipynb
"""
import matplotlib as mpl
if isinstance(cmap, mpl.colors.ListedColormap):
return mpl.colors.ListedColormap(cmap.colors[::-1])
else:
reverse = []
k = []
for key, channel in six.iteritems(cmap._segmentdata):
data = []
for t in channel:
data.append((1 - t[0], t[1], t[2]))
k.append(key)
reverse.append(sorted(data))
cmap_reversed = mpl.colors.LinearSegmentedColormap(
cmap.name + '_reversed', dict(zip(k, reverse)))
return cmap_reversed
class PlotNums(object):
"""
Convinience class for dealing with plot numberings (pnums)
Example:
>>> pnum_ = PlotNums(nRows=2, nCols=2)
>>> # Indexable
>>> print(pnum_[0])
(2, 2, 1)
>>> # Iterable
>>> print(ub.repr2(list(pnum_), nl=0, nobr=True))
(2, 2, 1), (2, 2, 2), (2, 2, 3), (2, 2, 4)
>>> # Callable (iterates through a default iterator)
>>> print(pnum_())
(2, 2, 1)
>>> print(pnum_())
(2, 2, 2)
"""
def __init__(self, nRows=None, nCols=None, nSubplots=None, start=0):
nRows, nCols = self._get_num_rc(nSubplots, nRows, nCols)
self.nRows = nRows
self.nCols = nCols
base = 0
self.offset = 0 if base == 1 else 1
self.start = start
self._iter = None
def __getitem__(self, px):
return (self.nRows, self.nCols, px + self.offset)
def __call__(self):
"""
replacement for make_pnum_nextgen
Example:
>>> import itertools as it
>>> pnum_ = PlotNums(nSubplots=9)
>>> pnum_list = list( (pnum_() for _ in it.count()) )
>>> result = ('pnum_list = %s' % (ub.repr2(pnum_list),))
>>> print(result)
Example:
>>> import itertools as it
>>> for nRows, nCols, nSubplots in it.product([None, 3], [None, 3], [None, 9]):
>>> start = 0
>>> pnum_ = PlotNums(nRows, nCols, nSubplots, start)
>>> pnum_list = list( (pnum_() for _ in it.count()) )
>>> print((nRows, nCols, nSubplots))
>>> result = ('pnum_list = %s' % (ub.repr2(pnum_list),))
>>> print(result)
"""
if self._iter is None:
self._iter = iter(self)
return six.next(self._iter)
def __iter__(self):
r"""
Yields:
tuple : pnum
Example:
>>> pnum_ = iter(PlotNums(nRows=3, nCols=2))
>>> result = ub.repr2(list(pnum_), nl=1, nobr=True)
>>> print(result)
(3, 2, 1),
(3, 2, 2),
(3, 2, 3),
(3, 2, 4),
(3, 2, 5),
(3, 2, 6),
Example:
>>> nRows = 3
>>> nCols = 2
>>> pnum_ = iter(PlotNums(nRows, nCols, start=3))
>>> result = ub.repr2(list(pnum_), nl=1, nobr=True)
>>> print(result)
(3, 2, 4),
(3, 2, 5),
(3, 2, 6),
"""
for px in range(self.start, len(self)):
yield self[px]
def __len__(self):
total_plots = self.nRows * self.nCols
return total_plots
@classmethod
def _get_num_rc(PlotNums, nSubplots=None, nRows=None, nCols=None):
r"""
Gets a constrained row column plot grid
Args:
nSubplots (None): (default = None)
nRows (None): (default = None)
nCols (None): (default = None)
Returns:
tuple: (nRows, nCols)
Example:
>>> cases = [
>>> dict(nRows=None, nCols=None, nSubplots=None),
>>> dict(nRows=2, nCols=None, nSubplots=5),
>>> dict(nRows=None, nCols=2, nSubplots=5),
>>> dict(nRows=None, nCols=None, nSubplots=5),
>>> ]
>>> for kw in cases:
>>> print('----')
>>> size = PlotNums._get_num_rc(**kw)
>>> if kw['nSubplots'] is not None:
>>> assert size[0] * size[1] >= kw['nSubplots']
>>> print('**kw = %s' % (ub.repr2(kw),))
>>> print('size = %r' % (size,))
"""
if nSubplots is None:
if nRows is None:
nRows = 1
if nCols is None:
nCols = 1
else:
if nRows is None and nCols is None:
nRows, nCols = PlotNums._get_square_row_cols(nSubplots)
elif nRows is not None:
nCols = int(np.ceil(nSubplots / nRows))
elif nCols is not None:
nRows = int(np.ceil(nSubplots / nCols))
return nRows, nCols
def _get_square_row_cols(nSubplots, max_cols=None, fix=False, inclusive=True):
r"""
Args:
nSubplots (int):
max_cols (int):
Returns:
tuple: (int, int)
Example:
>>> nSubplots = 9
>>> nSubplots_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
>>> max_cols = None
>>> rc_list = [PlotNums._get_square_row_cols(nSubplots, fix=True) for nSubplots in nSubplots_list]
>>> print(repr(np.array(rc_list).T))
array([[1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3],
[1, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4]])
"""
if nSubplots == 0:
return 0, 0
if inclusive:
rounder = np.ceil
else:
rounder = np.floor
if fix:
# This function is very broken, but it might have dependencies
# this is the correct version
nCols = int(rounder(np.sqrt(nSubplots)))
nRows = int(rounder(nSubplots / nCols))
return nRows, nCols
else:
# This is the clamped num cols version
# probably used in ibeis.viz
if max_cols is None:
max_cols = 5
if nSubplots in [4]:
max_cols = 2
if nSubplots in [5, 6, 7]:
max_cols = 3
if nSubplots in [8]:
max_cols = 4
nCols = int(min(nSubplots, max_cols))
#nCols = int(min(rounder(np.sqrt(nrids)), 5))
nRows = int(rounder(nSubplots / nCols))
return nRows, nCols
def draw_border(ax, color, lw=2, offset=None, adjust=True):
'draws rectangle border around a subplot'
if adjust:
xy, width, height = _get_axis_xy_width_height(ax, -.7, -.2, 1, .4)
else:
xy, width, height = _get_axis_xy_width_height(ax)
if offset is not None:
xoff, yoff = offset
xy = [xoff, yoff]
height = - height - yoff
width = width - xoff
import matplotlib as mpl
rect = mpl.patches.Rectangle(xy, width, height, lw=lw)
rect = ax.add_patch(rect)
rect.set_clip_on(False)
rect.set_fill(False)
rect.set_edgecolor(color)
return rect
def draw_boxes(boxes, box_format='xywh', color='blue', labels=None,
textkw=None, ax=None):
"""
Args:
boxes (list): list of coordindates in xywh, tlbr, or cxywh format
box_format (str): specify how boxes are formated
xywh is the top left x and y pixel width and height
cxywh is the center xy pixel width and height
tlbr is the top left xy and the bottom right xy
color (str): edge color of the boxes
labels (list): if specified, plots a text annotation on each box
Example:
>>> from netharn.util.mplutil import *
>>> autompl()
>>> bboxes = [[.1, .1, .6, .3], [.3, .5, .5, .6]]
>>> col = draw_boxes(bboxes)
"""
import matplotlib as mpl
from matplotlib import pyplot as plt
if ax is None:
ax = plt.gca()
from netharn import util
if isinstance(boxes, util.Boxes):
box_format = boxes.format
boxes = boxes.data
if not len(boxes):
return
boxes = np.asarray(boxes)
if box_format == 'xywh':
xywh = boxes
elif box_format == 'cxywh':
cx, cy, w, h = boxes.T[0:4]
x1 = cx - (w / 2)
y1 = cy - (h / 2)
xywh = np.vstack([x1, y1, w, h]).T
elif box_format == 'tlbr':
x1, y1 = boxes.T[0:2]
w, h = boxes.T[2:4] - boxes.T[0:2]
xywh = np.vstack([x1, y1, w, h]).T
else:
raise KeyError(box_format)
edgecolor = Color(color).as01('rgba')
facecolor = Color((0, 0, 0, 0)).as01('rgba')
rectkw = dict(ec=edgecolor, fc=facecolor, lw=2, linestyle='solid')
patches = [mpl.patches.Rectangle((x, y), w, h, **rectkw)
for x, y, w, h in xywh]
col = mpl.collections.PatchCollection(patches, match_original=True)
ax.add_collection(col)
if labels:
texts = []
default_textkw = {
'horizontalalignment': 'left',
'verticalalignment': 'top',
'backgroundcolor': (0, 0, 0, .3),
'color': 'white',
'fontproperties': mpl.font_manager.FontProperties(
size=6, family='monospace'),
}
tkw = default_textkw.copy()
if textkw is not None:
tkw.update(textkw)
for (x1, y1, w, h), label in zip(xywh, labels):
texts.append((x1, y1, label, tkw))
for (x1, y1, catname, tkw) in texts:
ax.text(x1, y1, catname, **tkw)
return col
def draw_line_segments(pts1, pts2, ax=None, **kwargs):
"""
draws `N` line segments between `N` pairs of points
Args:
pts1 (ndarray): Nx2
pts2 (ndarray): Nx2
ax (None): (default = None)
**kwargs: lw, alpha, colors
CommandLine:
python -m netharn.util.mplutil draw_line_segments --show
Example:
>>> pts1 = np.array([(.1, .8), (.6, .8)])
>>> pts2 = np.array([(.6, .7), (.4, .1)])
>>> figure(fnum=None)
>>> draw_line_segments(pts1, pts2)
>>> # xdoc: +REQUIRES(--show)
>>> import matplotlib.pyplot as plt
>>> ax = plt.gca()
>>> ax.set_xlim(0, 1)
>>> ax.set_ylim(0, 1)
>>> show_if_requested()
"""
import matplotlib.pyplot as plt
import matplotlib as mpl
if ax is None:
ax = plt.gca()
assert len(pts1) == len(pts2), 'unaligned'
segments = [(xy1, xy2) for xy1, xy2 in zip(pts1, pts2)]
linewidth = kwargs.pop('lw', kwargs.pop('linewidth', 1.0))
alpha = kwargs.pop('alpha', 1.0)
if 'color' in kwargs:
kwargs['colors'] = kwargs['color']
# mpl.colors.ColorConverter().to_rgb(kwargs['color'])
line_group = mpl.collections.LineCollection(segments, linewidths=linewidth,
alpha=alpha, **kwargs)
ax.add_collection(line_group)
def make_heatmask(probs, cmap='plasma', with_alpha=True):
"""
Colorizes a single-channel intensity mask (with an alpha channel)
"""
import matplotlib as mpl
from netharn.util import imutil
assert len(probs.shape) == 2
cmap_ = mpl.cm.get_cmap(cmap)
probs = imutil.ensure_float01(probs)
heatmask = cmap_(probs)
if with_alpha:
heatmask[:, :, 0:3] = heatmask[:, :, 0:3][:, :, ::-1]
heatmask[:, :, 3] = probs
return heatmask
def colorbar_image(domain, cmap='plasma', dpi=96, shape=(200, 20), transparent=False):
"""
Notes:
shape is approximate
Ignore:
domain = np.linspace(-30, 200)
cmap='plasma'
dpi = 80
dsize = (20, 200)
util.imwrite('foo.png', util.colorbar_image(np.arange(0, 1)), shape=(400, 80))
import plottool as pt
pt.qtensure()
import matplotlib as mpl
mpl.style.use('ggplot')
util.imwrite('foo.png', util.colorbar_image(np.linspace(0, 1, 100), dpi=200, shape=(1000, 40), transparent=1))
ub.startfile('foo.png')
"""
import matplotlib as mpl
mpl.use('agg', force=False, warn=False)
from matplotlib import pyplot as plt
fig = plt.figure(dpi=dpi)
w, h = shape[1] / dpi, shape[0] / dpi
# w, h = 1, 10
fig.set_size_inches(w, h)
ax = fig.add_subplot('111')
sm = plt.cm.ScalarMappable(cmap=plt.get_cmap(cmap))
sm.set_array(domain)
plt.colorbar(sm, cax=ax)
cb_img = render_figure_to_image(fig, dpi=dpi, transparent=transparent)
plt.close(fig)
return cb_img
class Color(ub.NiceRepr):
"""
move to colorutil?
Example:
>>> from netharn.util.mplutil import *
>>> print(Color('g'))
>>> print(Color('orangered'))
>>> print(Color('#AAAAAA').as255())
>>> print(Color([0, 255, 0]))
>>> print(Color([1, 1, 1.]))
>>> print(Color([1, 1, 1]))
>>> print(Color(Color([1, 1, 1])).as255())
>>> print(Color(Color([1., 0, 1, 0])).ashex())
>>> print(Color([1, 1, 1], alpha=255))
>>> print(Color([1, 1, 1], alpha=255, space='lab'))
"""
def __init__(self, color, alpha=None, space=None):
if isinstance(color, Color):
assert alpha is None
assert space is None
space = color.space
color = color.color01
else:
color = self._ensure_color01(color)
if alpha is not None:
alpha = self._ensure_color01([alpha])[0]
if space is None:
space = 'rgb'
# always normalize the color down to 01
color01 = list(color)
if alpha is not None:
if len(color01) not in [1, 3]:
raise ValueError('alpha already in color')
color01 = color01 + [alpha]
# correct space if alpha is given
if len(color01) in [2, 4]:
if not space.endswith('a'):
space += 'a'
self.color01 = color01
self.space = space
def __nice__(self):
colorpart = ', '.join(['{:.2f}'.format(c) for c in self.color01])
return self.space + ': ' + colorpart
def ashex(self, space=None):
c255 = self.as255(space)
return '#' + ''.join(['{:02x}'.format(c) for c in c255])
def as255(self, space=None):
color = (np.array(self.as01(space)) * 255).astype(np.uint8)
return tuple(map(int, color))
def as01(self, space=None):
"""
self = mplutil.Color('red')
mplutil.Color('green').as01('rgba')
"""
color = tuple(self.color01)
if space is not None:
if space == self.space:
pass
elif space == 'rgba' and self.space == 'rgb':
color = color + (1,)
elif space == 'bgr' and self.space == 'rgb':
color = color[::-1]
elif space == 'rgb' and self.space == 'bgr':
color = color[::-1]
else:
assert False
return tuple(map(float, color))
@classmethod
def _is_base01(channels):
""" check if a color is in base 01 """
def _test_base01(channels):
tests01 = {
'is_float': all([isinstance(c, (float, np.float64)) for c in channels]),
'is_01': all([c >= 0.0 and c <= 1.0 for c in channels]),
}
return tests01
if isinstance(channels, six.string_types):
return False
return all(_test_base01(channels).values())
@classmethod
def _is_base255(Color, channels):
""" there is a one corner case where all pixels are 1 or less """
if (all(c > 0.0 and c <= 255.0 for c in channels) and any(c > 1.0 for c in channels)):
# Definately in 255 space
return True
else:
# might be in 01 or 255
return all(isinstance(c, int) for c in channels)
@classmethod
def _hex_to_01(Color, hex_color):
"""
hex_color = '#6A5AFFAF'
"""
assert hex_color.startswith('#'), 'not a hex string %r' % (hex_color,)
parts = hex_color[1:].strip()
color255 = tuple(int(parts[i: i + 2], 16) for i in range(0, len(parts), 2))
assert len(color255) in [3, 4], 'must be length 3 or 4'
return Color._255_to_01(color255)
def _ensure_color01(Color, color):
""" Infer what type color is and normalize to 01 """
if isinstance(color, six.string_types):
color = Color._string_to_01(color)
elif Color._is_base255(color):
color = Color._255_to_01(color)
return color
@classmethod
def _255_to_01(Color, color255):
""" converts base 255 color to base 01 color """
return [channel / 255.0 for channel in color255]
@classmethod
def _string_to_01(Color, color):
"""
mplutil.Color._string_to_01('green')
mplutil.Color._string_to_01('red')
"""
from matplotlib import colors as mcolors
if color in mcolors.BASE_COLORS:
color01 = mcolors.BASE_COLORS[color]
elif color in mcolors.CSS4_COLORS:
color_hex = mcolors.CSS4_COLORS[color]
color01 = Color._hex_to_01(color_hex)
elif color.startswith('#'):
color01 = Color._hex_to_01(color)
else:
raise ValueError('unknown color=%r' % (color,))
return color01
@classmethod
def named_colors():
from matplotlib import colors as mcolors
names = sorted(list(mcolors.BASE_COLORS.keys()) + list(mcolors.CSS4_COLORS.keys()))
return names
@classmethod
def distinct(Color, num, space='rgb'):
"""
Make multiple distinct colors
"""
import matplotlib as mpl
import matplotlib._cm as _cm
cm = mpl.colors.LinearSegmentedColormap.from_list(
'gist_rainbow', _cm.datad['gist_rainbow'],
mpl.rcParams['image.lut'])
distinct_colors = [
np.array(cm(i / num)).tolist()[0:3]
for i in range(num)
]
if space == 'rgb':
return distinct_colors
else:
return [Color(c, space='rgb').as01(space=space) for c in distinct_colors]
if __name__ == '__main__':
r"""
CommandLine:
python -m netharn.util.mplutil
"""
import xdoctest
xdoctest.doctest_module(__file__)
| 33.943964
| 139
| 0.566649
| 11,590
| 0.128391
| 1,691
| 0.018732
| 4,393
| 0.048665
| 0
| 0
| 34,573
| 0.382991
|
18dcca6339890714a53a527f99f816d155ae5c43
| 4,876
|
py
|
Python
|
mmdeploy/codebase/mmdet/models/roi_heads/test_mixins.py
|
zhiqwang/mmdeploy
|
997d111a6f4ca9624ab3b36717748e6ce002037d
|
[
"Apache-2.0"
] | 746
|
2021-12-27T10:50:28.000Z
|
2022-03-31T13:34:14.000Z
|
mmdeploy/codebase/mmdet/models/roi_heads/test_mixins.py
|
zhiqwang/mmdeploy
|
997d111a6f4ca9624ab3b36717748e6ce002037d
|
[
"Apache-2.0"
] | 253
|
2021-12-28T05:59:13.000Z
|
2022-03-31T18:22:25.000Z
|
mmdeploy/codebase/mmdet/models/roi_heads/test_mixins.py
|
zhiqwang/mmdeploy
|
997d111a6f4ca9624ab3b36717748e6ce002037d
|
[
"Apache-2.0"
] | 147
|
2021-12-27T10:50:33.000Z
|
2022-03-30T10:44:20.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdeploy.core import FUNCTION_REWRITER
@FUNCTION_REWRITER.register_rewriter(
'mmdet.models.roi_heads.test_mixins.BBoxTestMixin.simple_test_bboxes')
def bbox_test_mixin__simple_test_bboxes(ctx,
self,
x,
img_metas,
proposals,
rcnn_test_cfg,
rescale=False):
"""Rewrite `simple_test_bboxes` of `BBoxTestMixin` for default backend.
1. This function eliminates the batch dimension to get forward bbox
results, and recover batch dimension to calculate final result
for deployment.
2. This function returns detection result as Tensor instead of numpy
array.
Args:
ctx (ContextCaller): The context with additional information.
self: The instance of the original class.
x (tuple[Tensor]): Features from upstream network. Each
has shape (batch_size, c, h, w).
img_metas (list[dict]): Meta information of images.
proposals (list(Tensor)): Proposals from rpn head.
Each has shape (num_proposals, 5), last dimension
5 represent (x1, y1, x2, y2, score).
rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.
rescale (bool): If True, return boxes in original image space.
Default: False.
Returns:
tuple[Tensor, Tensor]: (det_bboxes, det_labels), `det_bboxes` of
shape [N, num_det, 5] and `det_labels` of shape [N, num_det].
"""
rois = proposals
batch_index = torch.arange(
rois.shape[0], device=rois.device).float().view(-1, 1, 1).expand(
rois.size(0), rois.size(1), 1)
rois = torch.cat([batch_index, rois[..., :4]], dim=-1)
batch_size = rois.shape[0]
num_proposals_per_img = rois.shape[1]
# Eliminate the batch dimension
rois = rois.view(-1, 5)
bbox_results = self._bbox_forward(x, rois)
cls_score = bbox_results['cls_score']
bbox_pred = bbox_results['bbox_pred']
# Recover the batch dimension
rois = rois.reshape(batch_size, num_proposals_per_img, rois.size(-1))
cls_score = cls_score.reshape(batch_size, num_proposals_per_img,
cls_score.size(-1))
bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img,
bbox_pred.size(-1))
det_bboxes, det_labels = self.bbox_head.get_bboxes(
rois,
cls_score,
bbox_pred,
img_metas[0]['img_shape'],
None,
rescale=rescale,
cfg=rcnn_test_cfg)
return det_bboxes, det_labels
@FUNCTION_REWRITER.register_rewriter(
'mmdet.models.roi_heads.test_mixins.MaskTestMixin.simple_test_mask')
def mask_test_mixin__simple_test_mask(ctx, self, x, img_metas, det_bboxes,
det_labels, **kwargs):
"""Rewrite `simple_test_mask` of `BBoxTestMixin` for default backend.
This function returns detection result as Tensor instead of numpy
array.
Args:
ctx (ContextCaller): The context with additional information.
self: The instance of the original class.
x (tuple[Tensor]): Features from upstream network. Each
has shape (batch_size, c, h, w).
img_metas (list[dict]): Meta information of images.
det_bboxes (tuple[Tensor]): Detection bounding-boxes from features.
Each has shape of (batch_size, num_det, 5).
det_labels (tuple[Tensor]): Detection labels from features. Each
has shape of (batch_size, num_det).
Returns:
tuple[Tensor]: (segm_results), `segm_results` of shape
[N, num_det, roi_H, roi_W].
"""
batch_size = det_bboxes.size(0)
det_bboxes = det_bboxes[..., :4]
batch_index = torch.arange(
det_bboxes.size(0),
device=det_bboxes.device).float().view(-1, 1, 1).expand(
det_bboxes.size(0), det_bboxes.size(1), 1)
mask_rois = torch.cat([batch_index, det_bboxes], dim=-1)
mask_rois = mask_rois.view(-1, 5)
mask_results = self._mask_forward(x, mask_rois)
mask_pred = mask_results['mask_pred']
max_shape = img_metas[0]['img_shape']
num_det = det_bboxes.shape[1]
det_bboxes = det_bboxes.reshape(-1, 4)
det_labels = det_labels.reshape(-1)
segm_results = self.mask_head.get_seg_masks(mask_pred, det_bboxes,
det_labels, self.test_cfg,
max_shape)
segm_results = segm_results.reshape(batch_size, num_det,
segm_results.shape[-2],
segm_results.shape[-1])
return segm_results
| 41.322034
| 75
| 0.611567
| 0
| 0
| 0
| 0
| 4,764
| 0.97703
| 0
| 0
| 2,227
| 0.456727
|
18dd011d855404f1d1af53f818b57ec996f325ba
| 1,060
|
py
|
Python
|
examples/props.py
|
SandNerd/notional
|
ccab44bc4c5d19d4546156f0d72b22b93e28e2ed
|
[
"MIT"
] | 23
|
2021-08-03T08:13:14.000Z
|
2022-03-27T13:13:54.000Z
|
examples/props.py
|
SandNerd/notional
|
ccab44bc4c5d19d4546156f0d72b22b93e28e2ed
|
[
"MIT"
] | 15
|
2021-08-03T04:04:23.000Z
|
2022-03-31T14:27:26.000Z
|
examples/props.py
|
SandNerd/notional
|
ccab44bc4c5d19d4546156f0d72b22b93e28e2ed
|
[
"MIT"
] | 3
|
2021-08-08T04:47:48.000Z
|
2022-03-06T23:13:52.000Z
|
#!/usr/bin/env python3
"""This script demonstrates setting properties on a page manually.
The script accepts a single command line option, which is a page ID. It will then
display information about the properties and update a few of them.
Note that this script assumes the database has already been created with required
fields.
The caller must set `NOTION_AUTH_TOKEN` to a valid integration token.
"""
import logging
import os
import sys
logging.basicConfig(level=logging.INFO)
import notional
from notional import types
page_id = sys.argv[1]
auth_token = os.getenv("NOTION_AUTH_TOKEN")
notion = notional.connect(auth=auth_token)
# get an existing page...
page = notion.pages.retrieve(page_id)
print(f"{page.Title} => {page.url}")
# print all current properties on the page...
for name, prop in page.properties.items():
print(f"{name} => {prop}")
# update a property on the page...
page["Complete"] = types.Checkbox.from_value(True)
# FIXME this feature is broken - https://github.com/jheddings/notional/issues/9
# notion.pages.update(page)
| 25.853659
| 82
| 0.756604
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 692
| 0.65283
|
18dd1d1444e3f06d7820ae1bbcacd5a56dc12c2e
| 1,116
|
py
|
Python
|
retroroot.py
|
retroroot-linux/retroroo
|
07ae0a93f6ea781fa6330a8defdabac9bda82adc
|
[
"MIT"
] | null | null | null |
retroroot.py
|
retroroot-linux/retroroo
|
07ae0a93f6ea781fa6330a8defdabac9bda82adc
|
[
"MIT"
] | null | null | null |
retroroot.py
|
retroroot-linux/retroroo
|
07ae0a93f6ea781fa6330a8defdabac9bda82adc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Use this file to setup a build environment."""
import os
import argparse
from support.linux.log import Log
from support.docker_wrapper.retroroot import RetrorootDocker
CWD = os.getcwd()
def parse_args(args):
"""Parse arguments.
:return: The argument object.
"""
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--build",
default=False,
action="store_true",
help="Build")
parser.add_argument("-s", "--setup",
default=False,
action="store_true",
help="setup")
parser.add_argument("--verbose",
default=False,
action="store_true",
help="Prepare verbosely")
return parser.parse_args(args)
def main(args=None):
# logger = Log("retroroot", args.verbose)
args = parse_args(args)
if args.build:
retroroot_docker = RetrorootDocker(args)
retroroot_docker.build()
if __name__ == '__main__':
main()
| 24.8
| 60
| 0.556452
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 290
| 0.259857
|
18dd3cd341f57a8da1bfa888190207388f947eb8
| 1,796
|
py
|
Python
|
grr/test_bench.py
|
kecho/grr
|
b6554f20bc8a279bc946a2a0da54d028160d880d
|
[
"MIT"
] | 8
|
2021-11-08T16:12:25.000Z
|
2021-12-16T06:41:01.000Z
|
grr/test_bench.py
|
kecho/grr
|
b6554f20bc8a279bc946a2a0da54d028160d880d
|
[
"MIT"
] | null | null | null |
grr/test_bench.py
|
kecho/grr
|
b6554f20bc8a279bc946a2a0da54d028160d880d
|
[
"MIT"
] | null | null | null |
import coalpy.gpu as g
import numpy as np
import math
import functools
from . import prefix_sum as gpu_prefix_sum
def prefix_sum(input_data, is_exclusive = False):
accum = 0
output = []
for i in range(0, len(input_data), 1):
if is_exclusive:
output.append(accum)
accum += input_data[i]
else:
accum += input_data[i]
output.append(accum)
return output
def test_cluster_gen(is_exclusive = False):
buffersz = 8529
input_data = np.array([x for x in range(0, buffersz, 1)], dtype='i')
test_input_buffer = g.Buffer(format = g.Format.R32_UINT, element_count = buffersz)
reduction_buffers = gpu_prefix_sum.allocate_args(buffersz)
cmd_list = g.CommandList()
cmd_list.upload_resource(source = input_data, destination = test_input_buffer)
output = gpu_prefix_sum.run(cmd_list, test_input_buffer, reduction_buffers, is_exclusive)
g.schedule(cmd_list)
dr = g.ResourceDownloadRequest(resource = output)
dr.resolve()
result = np.frombuffer(dr.data_as_bytearray(), dtype='i')
result = np.resize(result, buffersz)
expected = prefix_sum(input_data, is_exclusive)
correct_count = functools.reduce(lambda x, y: x + y, [1 if x == y else 0 for (x, y) in zip(result, expected)])
return True if correct_count == len(input_data) else False
def run_test(nm, fn):
result = fn()
print(nm + " : " + ("PASS" if result else "FAIL"))
def test_cluster_gen_inclusive():
return test_cluster_gen(is_exclusive = False)
def test_cluster_gen_exclusive():
return test_cluster_gen(is_exclusive = True)
if __name__ == "__main__":
run_test("test prefix sum inclusive", test_cluster_gen_inclusive)
run_test("test prefix sum exclusive", test_cluster_gen_exclusive)
| 32.654545
| 114
| 0.698218
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 87
| 0.048441
|
18dd6ac52fd7ae55fdafeac9d413e2a786dc94b3
| 3,633
|
py
|
Python
|
code/train.py
|
ty-on-h12/srgan-pytorch
|
de0972782200a052a615754b14466f0c495f8b80
|
[
"MIT"
] | null | null | null |
code/train.py
|
ty-on-h12/srgan-pytorch
|
de0972782200a052a615754b14466f0c495f8b80
|
[
"MIT"
] | null | null | null |
code/train.py
|
ty-on-h12/srgan-pytorch
|
de0972782200a052a615754b14466f0c495f8b80
|
[
"MIT"
] | null | null | null |
from torchvision.transforms import transforms
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
import torch as T
import torch.optim as optim
from model import Generator, Discriminator
from loss_fn import GeneratorLoss, TVLoss
from utils import show_progress, save
import datetime
import gc
import os
class ConcatDataset(T.utils.data.Dataset):
def __init__(self, *datasets):
self.datasets = datasets
def __getitem__(self, i):
return tuple(d[i] for d in self.datasets)
def __len__(self):
return min(len(d) for d in self.datasets)
device = 'cuda' if T.cuda.is_available() else 'cpu'
BATCH_SIZE = 16
SIZE_HR = 256
SIZE_LR = 64
num_workers = 2
rootpath = '../data'
transform_hr = transforms.Compose([
transforms.Resize((SIZE_HR, SIZE_HR)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
data_hr = ImageFolder(rootpath, transform=transform_hr)
transform_lr = transforms.Compose([
transforms.Resize((SIZE_LR, SIZE_LR)),
transforms.ToTensor(),
transforms.GaussianBlur(kernel_size=25),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
data_lr = ImageFolder(rootpath, transform=transform_lr)
full_data = ConcatDataset(data_lr, data_hr)
loader = DataLoader(full_data, BATCH_SIZE, num_workers=num_workers)
generator = Generator(3, 64).to(device)
discriminator = Discriminator(3, 64).to(device)
lr = 1e-1000
gen_optimizer = optim.Adam(generator.parameters(), lr=lr)
disc_optimizer = optim.Adam(discriminator.parameters(), lr=lr)
generator_criterion = GeneratorLoss().to(device)
g_losses = []
d_losses = []
EPOCHS = 1000
if 'models' not in os.listdir():
os.mkdir('models')
save_path = '../models/'
# <----- TRAINING LOOP ----->
for epoch in range(1, EPOCHS):
generator.train()
discriminator.train()
print(f'EPOCH [{epoch}/{EPOCHS}]')
sum_d_loss = 0
sum_g_loss = 0
gc.collect()
T.cuda.empty_cache()
start = datetime.datetime.now()
for idx, (item, target) in enumerate(loader):
item = item[0].to(device)
target = target[0].to(device)
fake_image = generator(item)
discriminator.zero_grad()
real_out = discriminator(target).mean()
fake_out = discriminator(fake_image).mean()
d_loss = 1 - real_out + fake_out
d_loss.backward(retain_graph=True)
generator.zero_grad()
g_loss = generator_criterion(fake_out, fake_image, target)
g_loss.backward()
fake_img = generator(item)
fake_out = discriminator(fake_img).mean()
if idx % 100 == 0:
print(
f'Batch {idx}/{loader.__len__()} \nLoss (Generator) {g_loss.detach().cpu()}\nLoss (Discriminator) {d_loss.detach().cpu()}'
)
pred = fake_img[0].detach().cpu()
save(generator, discriminator, save_path)
show_progress([item.detach().cpu()[0], pred, target.detach().cpu()[0]], save=True, show=False)
gen_optimizer.step()
sum_d_loss += d_loss.detach().cpu()
sum_g_loss += g_loss.detach().cpu()
print(f'Time per epoch = {start - datetime.datetime.now()}')
g_losses.append(sum_g_loss / loader.__len__())
d_losses.append(sum_d_loss / loader.__len__())
print(f'D_loss {sum_d_loss}')
print(f'G_loss {sum_g_loss}')
| 31.318966
| 138
| 0.619048
| 265
| 0.072942
| 0
| 0
| 0
| 0
| 0
| 0
| 323
| 0.088907
|
18dd7f23d5115fd8f4284ee064ed94347d9523f8
| 497
|
py
|
Python
|
utils/Formatting.py
|
levindoneto/lmGen
|
ffe2150ebff577135efa3d65a845dd3b806a94ed
|
[
"MIT"
] | 5
|
2018-11-17T17:16:24.000Z
|
2019-10-17T15:16:37.000Z
|
utils/Formatting.py
|
levindoneto/lanGen
|
ffe2150ebff577135efa3d65a845dd3b806a94ed
|
[
"MIT"
] | 6
|
2018-02-06T23:05:29.000Z
|
2019-10-14T02:23:38.000Z
|
utils/Formatting.py
|
levindoneto/lmGen
|
ffe2150ebff577135efa3d65a845dd3b806a94ed
|
[
"MIT"
] | 4
|
2018-10-29T06:37:58.000Z
|
2019-10-06T13:51:18.000Z
|
import re
''' Function for Formatting n-grams.
@Parameters: Tuple: n-gram to be formatted.
@Return: String: formatted gram.
'''
def formatGram(ngram):
return re.sub("[(',)]", '', str(ngram))
''' Function for Formatting sentences.
@Parameters: Sentence: unformatted sentence.
@Return: String: formatted sentence.
'''
def formatSentence(sentence):
sentence = list(sentence)
sentence[0] = sentence[0].upper()
sentence = "".join(sentence)
return sentence + '.\n'
| 26.157895
| 48
| 0.661972
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 274
| 0.551308
|
18de55269df5672d53cc5989addf4883d366d066
| 1,735
|
py
|
Python
|
mkt/users/tasks.py
|
ngokevin/zamboni
|
a33dcd489175d8e7ba1c02ee4dabb6cfdc405e69
|
[
"BSD-3-Clause"
] | null | null | null |
mkt/users/tasks.py
|
ngokevin/zamboni
|
a33dcd489175d8e7ba1c02ee4dabb6cfdc405e69
|
[
"BSD-3-Clause"
] | null | null | null |
mkt/users/tasks.py
|
ngokevin/zamboni
|
a33dcd489175d8e7ba1c02ee4dabb6cfdc405e69
|
[
"BSD-3-Clause"
] | null | null | null |
from datetime import timedelta
import commonware.log
from celeryutils import task
from django.utils.encoding import force_text
from tower import ugettext_lazy as _
from mkt.account.utils import fxa_preverify_url
from mkt.site.mail import send_html_mail_jinja
from mkt.users.models import UserProfile
fxa_email_subjects = {
'customers-before': _('Firefox Accounts is coming'),
'customers-during': _('Activate your Firefox Account'),
'customers-after': _('Activate your Firefox Account'),
'developers-before': _('Firefox Accounts is coming'),
'developers-during': _('Activate your Firefox Account'),
'developers-after': _('Activate your Firefox Account')
}
fxa_email_types = fxa_email_subjects.keys()
log = commonware.log.getLogger('z.users')
@task
def send_mail(user_ids, subject, html_template, text_template, link):
for user in UserProfile.objects.filter(pk__in=user_ids):
if not user.email:
log.info('Skipping: {0}, no email'.format(user.pk))
continue
context = {'title': subject}
if link:
context['link'] = fxa_preverify_url(user, timedelta(days=7))
with user.activate_lang():
log.info('Sending FxA transition email to: {0} (id={1})'
.format(user.email, user.pk))
send_html_mail_jinja(
force_text(subject),
html_template, text_template,
context, recipient_list=[user.email])
@task
def send_fxa_mail(user_ids, mail_type, send_link):
return send_mail(
user_ids,
fxa_email_subjects[mail_type],
'users/emails/{0}.html'.format(mail_type),
'users/emails/{0}.ltxt'.format(mail_type),
send_link)
| 32.12963
| 72
| 0.673199
| 0
| 0
| 0
| 0
| 959
| 0.552738
| 0
| 0
| 429
| 0.247262
|
18df2c4ff7c83fc2ff4c4df2ad5efb199366fdfd
| 82
|
wsgi
|
Python
|
jpmorgan.wsgi
|
mrukhlov/jpmorgan
|
ef8f49054772c3f07161f4eaf7c119019ce600e2
|
[
"Apache-2.0"
] | null | null | null |
jpmorgan.wsgi
|
mrukhlov/jpmorgan
|
ef8f49054772c3f07161f4eaf7c119019ce600e2
|
[
"Apache-2.0"
] | null | null | null |
jpmorgan.wsgi
|
mrukhlov/jpmorgan
|
ef8f49054772c3f07161f4eaf7c119019ce600e2
|
[
"Apache-2.0"
] | null | null | null |
import sys
sys.path.insert(0, '/srv/jpmorgan')
from app import app as application
| 20.5
| 35
| 0.768293
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 15
| 0.182927
|
18e485c0872cf9f87d1144effd64d6706192e11d
| 449
|
py
|
Python
|
examples/plot_voronoi.py
|
smsaladi/msmexplorer
|
7880545c239c8f33ababdd111f58fd553b8bbdde
|
[
"MIT"
] | 6
|
2018-03-02T21:02:32.000Z
|
2020-05-26T08:23:24.000Z
|
examples/plot_voronoi.py
|
smsaladi/msmexplorer
|
7880545c239c8f33ababdd111f58fd553b8bbdde
|
[
"MIT"
] | 9
|
2018-03-02T21:19:26.000Z
|
2021-07-26T13:54:30.000Z
|
examples/plot_voronoi.py
|
smsaladi/msmexplorer
|
7880545c239c8f33ababdd111f58fd553b8bbdde
|
[
"MIT"
] | 5
|
2018-02-07T18:42:23.000Z
|
2021-04-29T07:01:50.000Z
|
"""
Voronoi Plot
============
"""
import numpy as np
from sklearn.cluster import KMeans
import msmexplorer as msme
# Create a random dataset across several variables
rs = np.random.RandomState(42)
n, p = 1000, 2
d = rs.normal(0, 2, (n, p))
d += np.log(np.arange(1, p + 1)) * -5 + 10
# Cluster data using KMeans
kmeans = KMeans(random_state=rs)
kmeans.fit(d)
# Plot Voronoi Diagram
msme.plot_voronoi(kmeans, color_palette=msme.palettes.msme_rgb)
| 20.409091
| 63
| 0.701559
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 132
| 0.293987
|
18e6697372af7e5090bad7d69e9278ea7660cfcd
| 40,586
|
py
|
Python
|
algo_sherbend.py
|
ymoisan/GeoSim
|
84f1482c885d7d3b1e07b92dee9580e4bcacf9cb
|
[
"MIT"
] | null | null | null |
algo_sherbend.py
|
ymoisan/GeoSim
|
84f1482c885d7d3b1e07b92dee9580e4bcacf9cb
|
[
"MIT"
] | null | null | null |
algo_sherbend.py
|
ymoisan/GeoSim
|
84f1482c885d7d3b1e07b92dee9580e4bcacf9cb
|
[
"MIT"
] | null | null | null |
"""This algorithm implements the Wang Generalization algotithm with constraint checking
This algorithm simplifies lines. It detects for each line the bends. It analyze the bend and
remove the bends that are below a certain diameter. The point and lines that do not need
to be simplified are still used to enforce topology integrity between those feature that need to be simplified
Limits and constraints
Always works better when the line to process meet the OGC simple line.
"""
import math, sys
from shapely.geometry import Point, LineString, LinearRing, Polygon
from shapely.prepared import prep
from shapely import affinity
from lib_geosim import GenUtil, PointSc, LineStringSc, SpatialContainer, GeoSimException
# Internal constant ===> Should be modify with care...
_AREA_CMP_INDEX = .75 # Compactness index factor applied to the adjusted area
#Internal key word constants
_BURNED = "Burned"
_DIAMETER = "diameter"
_SIMPLIFIED = 'Simplified'
_NOT_SIMPLIFIED = 'NotSimplified'
_UNSIMPLIFIABLE = 'Unsimplifiable'
class LineStringSb(LineStringSc):
"""A class to represent a LineString used by the SherBend algorithm
Attributes
----------
coords : List
A list of coordinates (x,y)
original_type: str
The original type of the feature
min_adj_are : float
The minimal adjusted area below which the vends are deleted
properties : dict
The dictionary of the properties (attributes of the features)
fast_access : Boolean
A flag to indicate if we keep a copy od the coordinate in order to accelrate the access becase
the access to the C function is slow
"""
def __init__(self, coords, original_type, min_adj_area, layer_name, properties, fast_access=True):
super().__init__(coords)
self.sb_original_type = original_type
self.sb_layer_name = layer_name
self.sb_properties = properties
self.sb_min_adj_area = min_adj_area
self._sb_fast_access = fast_access
if self._sb_fast_access:
self.__lst_coords = list(super().coords)
# Declaration of the instance variable
self.sb_geom_type = self.geom_type # variable defined to avoid slower C calls with geom_type
self.sb_is_simplest = False # The line is not at its simplest form
self.sb_bends = [] # Holder for the bend of the line
# Is the line string closed
@property
def sb_is_closed(self):
"""This method tests if a line is closed (first/last coordinates are the same)
Parameters
----------
None
Returns
-------
bool
True: the line is closed or False the line is open
"""
try:
return self._sb_is_closed
except AttributeError:
# A closed line need at least 4 vertex to be valid
if len(self.coords) >= 4 and GenUtil.distance(self.coords[0], self.coords[-1]) <= GenUtil.ZERO:
self._sb_is_closed = True
else:
self._sb_is_closed = False
return self._sb_is_closed
@property
def coords(self):
"""This method keeps a copy of the coordinate in a list.
This methods allows a faster acces than to always access the coordinates from the C call
of shapely. the drawback more memory space
Parameters
----------
None
Returns
-------
list
Coordinate of the LineString
"""
if self._sb_fast_access:
return self.__lst_coords
else:
return super().coords
@coords.setter
def coords(self, coords):
"""Set the coordinate of a LineString
Parameters
----------
coords : list
List of x,y coordinates
Returns
-------
None
"""
# Access the coord attribute in the parent class
super(LineStringSb, self.__class__).coords.fset(self, coords) # Odd writing but it's needed...
if self._sb_fast_access:
self.__lst_coords = list(super().coords)
# Delete variable that are now outdated. so they will be computed next time it will be accessed
try:
del self._vertex_orientation
except AttributeError:
pass
@property
def vertex_orientation(self):
"""This method calculates the orientation of the vertex
List containing the orientation at each vertex of the line.
-1: anti clockwise, +1 Clockwise; 0 Straight line
For closed line the first and last vertice bear the same value
For open line the first and last value are None
Parameters
----------
None
Returns
-------
None
"""
try:
return self._vertex_orientation
except AttributeError:
self._vertex_orientation = []
for i in range(1, len(self.coords) - 1): # '1' and 'cnt-1' to 'forget' first and last vertice
orient = GenUtil.orientation(self.coords[i-1], self.coords[i], self.coords[i+1])
self._vertex_orientation.append(orient)
if self.is_closed:
# Case of a closed line or polygon; we do not copy the first and lat even if they are the same
orient = GenUtil.orientation(self.coords[-2], self.coords[0], self.coords[1])
self._vertex_orientation = [orient] + self._vertex_orientation
else:
# Case of an open line; the first and last are None
orient = None
self._vertex_orientation = [orient] + self._vertex_orientation + [orient]
return self._vertex_orientation
def _remove_colinear_vertex(self):
"""This method remove the co linear vertex in the line string. Also handles closed line
Parameters
----------
None
Returns
-------
None
"""
if len(self.coords) <= 2:
# Nothing to do with a line with 2 points
pass
else:
# Detect the position of the colinear vertex
vertex_to_del = [i for i, orient in (enumerate(self.vertex_orientation)) if orient == 0]
if len(vertex_to_del) >= 1:
# Delete the co linear vertex
lst_coords = list(self.coords)
for i in reversed(vertex_to_del):
del(lst_coords[i])
if vertex_to_del[0] == 0:
# When delete the first vertex than we need to recopy the "new first" to the last vertice
lst_coords = lst_coords + [lst_coords[0]]
self.coords = lst_coords
def _rotate_start_bend(self):
"""Rotate a closed line string so the start of the line is also the start of a clockwise bend
To be done on closed line only
Parameters
----------
None
Returns
-------
None
"""
rotate = None
max_v = len(self.vertex_orientation)
for i in range(max_v):
j = (i+1) % max_v
if self.vertex_orientation[i] == GenUtil.CLOCKWISE and \
self.vertex_orientation[j] == GenUtil.ANTI_CLOCKWISE:
rotate = i
break
# Rotate the frist last vertex to the position of the biggest bend
if rotate is None:
# All the bend are clockwise. Nothing to do
pass
elif rotate == 0:
# The line string does not to be rotated
pass
else:
lst_coord = self.coords[rotate:] + self.coords[1:rotate+1]
self.coords = lst_coord # Update the LineString coordinate
def _extract_coords(self, i,j):
"""Extract the coordinate between index [i,j]
If j is lower than i act like a circular array and avoid duplication of first/last vertice
Parameters
----------
i,j : int
Index used to extract a sub list
Returns
-------
List
list of (x,y) coordinates
"""
if i <= j:
lst_coords = self.coords[i:j+1]
else:
lst_coords = self.coords[i:] + self.coords[0:j+1]
return lst_coords
def _change_inflexion(self, i):
"""Flag if there is an inflexion between at the specified vertices.
There is inflexion when a change of orientation occurs from clock wise to anti clocwise or vice cersa
Parameters
----------
i : int
Index of for vertex orientation
Returns
-------
bool
Flag indicating if an inflexion occurs or not
"""
max_v = len(self.vertex_orientation)
if (self.vertex_orientation[i] == GenUtil.ANTI_CLOCKWISE and
self.vertex_orientation[(i+1) % max_v] == GenUtil.CLOCKWISE) or \
(self.vertex_orientation[i] == GenUtil.CLOCKWISE and
self.vertex_orientation[(i+1) % max_v] == GenUtil.ANTI_CLOCKWISE):
inflexion = True
else:
inflexion = False
return inflexion
def _add_bends(self, inflexions):
"""Add Bend to the line from the inflexion list
Parameters
----------
inflexions : List
List of the inflexions in the list
Returns
-------
None
"""
for k in range(len(inflexions) - 1):
i = inflexions[k][0]
j = inflexions[k + 1][1]
self.sb_bends.append(Bend(i, j, self._extract_coords(i, j)))
def _create_bends(self):
"""Create the bends in the line
Parameters
----------
None
Returns
-------
None
"""
# Delete any actual bend information
self.sb_bends = []
# Remove the colinear vertice in order to facilitate bend detection (moreover colinaer vertice are useless)
self._remove_colinear_vertex()
inflexions = []
max = len(self.vertex_orientation)
if self.is_closed:
# Rotate the line to position at the start of a bend
self._rotate_start_bend()
# The vertex_oriention list is considered a circular list
for i in range(max):
j = (i + 1) % max
if self._change_inflexion(i):
inflexions.append((i, j))
# Create the bend from the inflexion point
if inflexions:
if len(inflexions) >= 3:
# If there is more than 23 inflexions we add another circular inflexion
i = inflexions[-1][0]
j = inflexions[0][1]
inflexions.append((i, j))
# Transform the inflexion into bends
self._add_bends(inflexions)
else:
# The vertex_oriention list is not considered a circular list
if max == 3:
# Special case there is only one bend to simplify
j = len(self.coords)-1
self.sb_bends.append(Bend(0, j, self._extract_coords(0, j)))
elif max >= 4:
for i in range(1, max-2):
if self._change_inflexion(i):
inflexions.append((i, i+1))
# Add inflexion to add the first and last bend
inflexions = [(0, None)] + inflexions + [(None, max-1)]
# Transform inflexion into bends
self._add_bends(inflexions)
return
def _sort_bends(self):
"""Sort the bends by order of ascending min_adj_are
Parameters
----------
None
Returns
-------
None
"""
lst_bends = []
for i, bend in enumerate(self.sb_bends):
if bend.adj_area <= self.sb_min_adj_area:
# Only select the bend below the minimum adjusted area
lst_bends.append((i, bend.adj_area))
# Sort based of the adj_area from smallest to biggest
lst_bends.sort(key=lambda tup: tup[1]) # sorts in place
return lst_bends
def _offset_bend_ij(self, i, j):
""""Offset the value of the different bend i,j because one or more vertice of the line were removed
Handle circular list when j < i
Parameters
----------
i,j : int
Index in the line where the vertice were removed
Returns
-------
None
"""
if i < j:
offset = j-i-1
else:
offset = j
for bend in self.sb_bends:
if bend.status == _NOT_SIMPLIFIED:
if bend.i < bend.j:
if bend.i >= j:
bend.i -= offset
bend.j -= offset
else:
if bend.i >= j:
bend.i -= offset
def _make_line_ccw(self):
"""Make sure the line is counter clockwise.
Only apply to closed line
Parameters
----------
None
Returns
-------
None
"""
if self.sb_is_closed:
tmp_ring = LinearRing(self.coords)
if not tmp_ring.is_ccw:
# The linear ring is clockwise. Reverse the coordinates to make it ccw
self.coords = list(reversed(self.coords))
def simplify(self, diameter, s_constraints=None):
"""Simplify the line by reducing each bend
Parameters
----------
None
Returns
-------
None
"""
nbr_bend_simplified = 0
# Make sure the line is counter clockwise
#
self._make_line_ccw()
# Create the bend in the line
self._create_bends()
max_bends = len(self.sb_bends)
sorted_bends = self._sort_bends()
if len(sorted_bends) == 0:
# No more bend to simplify. Line is at its simplest form
self.sb_is_simplest = True
elif len(sorted_bends) >= 2:
# Make the biggest bend (last one) unsimplifiable
ind_last = sorted_bends[-1][0]
self.sb_bends[ind_last].status = _UNSIMPLIFIABLE
# Loop over each bend to simplify them
for sorted_bend in sorted_bends:
ind = sorted_bend[0]
if self.sb_bends[ind].status == _NOT_SIMPLIFIED:
ind_before = None
ind_after = None
if self.sb_is_closed:
if max_bends >= 2:
ind_before = (ind-1) % max_bends
ind_after = (ind+1) % max_bends
else:
if ind > 0:
ind_before = ind-1
if ind < max_bends-1:
ind_after = ind+1
# Validate the spatial constraints
i = self.sb_bends[ind].i
j = self.sb_bends[ind].j
if i < j:
lst_coords = self.coords[0:i+1] + self.coords[j:]
else:
# Manage circular list
lst_coords = self.coords[j:i+1] + self.coords[j:j+1]
if self.is_closed:
if len(lst_coords) >= 4:
if s_constraints is not None:
in_conflict = s_constraints.check_constraints(self, self.sb_bends[ind])
else:
in_conflict = False
else:
# A closed line cannot have less than 4 vertices
in_conflict = True
else:
if len(lst_coords) >= 2:
if s_constraints is not None:
in_conflict = s_constraints.check_constraints(self, self.sb_bends[ind])
else:
in_conflict = False
else:
# An open line cannot have less than 3 vertices
in_conflict = True
if not in_conflict:
# Update the coordinates
self.coords = lst_coords
# Bend before and after must no be simplified in this pass maybe a next pass
if ind_before is not None:
self.sb_bends[ind_before].status = _UNSIMPLIFIABLE
if ind_after is not None:
self.sb_bends[ind_after].status = _UNSIMPLIFIABLE
self.sb_bends[ind].status = _SIMPLIFIED
nbr_bend_simplified += 1
self._offset_bend_ij(i, j)
return nbr_bend_simplified
class PointSb(PointSc):
"""
A class to represent a Point used by the SherBend algorithm
Attributes
----------
coords : tuple
A tuple (x,y) representing one coordinate
properties : dict
The dictionary of the properties (attributes of the features)
fast_access : Boolean
A flag to indicate if we keep a copy od the coordinate in order to accelrate the access becase
the access to the C function is slow
"""
def __init__(self, coords, layer_name, properties, fast_access=True):
super().__init__(coords)
self.sb_is_simplest = True
self.sb_layer_name = layer_name
self.sb_properties = properties
self.sb_original_type = GenUtil.POINT
self.sb_geom_type = GenUtil.POINT # For faster access than calling C (geom_type)
self._sb_fast_access = fast_access
if self._sb_fast_access:
self.__lst_coords = list(super().coords)
@property
def coords(self):
if self._sb_fast_access:
return self.__lst_coords
else:
return super().coords
@coords.setter
def coords(self, coords):
Point.coords.__set__(self, coords)
if self._sb_fast_access:
self.__lst_coords = list(super().coords)
class SpatialConstraints(object):
"""
A class to represent validation of spatial constraints
Attributes
----------
simplicity : bool
Flag indicating if simplicity constraint (self crossing) is validated
crossing : bool
Flag indicating if crossing constraint (intersection between feature) is validated
sidedness : bool
Flag indicating if sidedness constraint (relative adjacency) is validated
s_container : SpatialContainer
Object containing all the feature
"""
def __init__(self, simplicity=True, crossing=True, sidedness=True, s_container=None):
"""Constructor for the SpatialConstraint class"""
self.simplicity = simplicity
self.crossing = crossing
self.sidedness = sidedness
self.s_container = s_container
self.nbr_err_simplicity = 0
self.nbr_err_crossing = 0
self.nbr_err_sidedness = 0
def check_constraints(self, line, bend):
"""Validate the different spatial constraint
Parameters
----------
line : LineStringSb
LineString to validate for spatial constraints
bend : Bend
Bend to validate for spatial constraints
Returns
-------
bool
Flag indicating if the spatial constrainst are valid or not"""
in_conflict = False
if not in_conflict:
in_conflict = self._check_simplicity(line, bend.replacement_line)
if not in_conflict:
in_conflict = self._check_crossing(line, bend.replacement_line)
if not in_conflict:
in_conflict = self._check_sidedness(line, bend.polygon)
return in_conflict
def _check_simplicity(self, line, new_sub_line):
"""Check if the new sub line creates a self intersection in the line
Parameter
---------
line : LineStringSb
LineString to validate for self intersection
new_sub_line : LineString
New LineString to validate for self intersection
Returns
-------
Boolean
Flag indicating if the line is simple or not
"""
# Create a very short line so that the line does not -touch the start and end line (increase performance)
smaller_sub_line = affinity.scale(new_sub_line, xfact=1. - GenUtil.ZERO, yfact=1. - GenUtil.ZERO)
in_conflict = False
prepared_smaller_sub_line = prep(smaller_sub_line)
if prepared_smaller_sub_line.intersects(line):
in_conflict = True
self.nbr_err_simplicity += 1
return in_conflict
def _check_crossing(self, line, new_sub_line):
"""Check if the new sub line intersects other line
Parameter
---------
line : LineStringSb
LineString to validate for intersection with other line
new_sub_line : LineString
New LineString to validate for intersection with other line
Returns
-------
Boolean
Flag indicating if the line intersect with other line or not
"""
features = self.s_container.get_features(new_sub_line.bounds, remove_features=(line,))
# Check that the new middle line does not cross any interior holes of the polygon
prepared_new_sub_line = prep(new_sub_line)
in_conflict = False
gen_crosses = filter(prepared_new_sub_line.intersects, features)
for feature in gen_crosses:
in_conflict = True
self.nbr_err_crossing += 1
break
return in_conflict
def _check_sidedness(self, line, pol):
"""Validate the line for adjacency constraints
Parameter
---------
line : LineStringSb
LineString to validate for adjacency
new_sub_line : LineString
New Polygon to check for adjacency
Returns
-------
Boolean
Flag indicating if the line creates or not adjacency problem
"""
features = self.s_container.get_features(pol.bounds, remove_features=(line,))
# Check that the new middle line does not cross any interior holes of the polygon
prepared_pol = prep(pol)
gen_contains = filter(prepared_pol.contains, features)
in_conflict = False
for feature in gen_contains:
in_conflict = True
self.nbr_err_sidedness += 1
break
return in_conflict
class Bend(object):
"""Class defining the attributes and operations for bend manipulation
Attributes: None
"""
def __init__(self, i, j, bend_coords):
"""Constructor of the class
Parameters
----------
i : int
Index of the start of the bend in the list of coordinates
j : int
Index of the end of the bend in the list of coordinates
bend_coords : list
List of x,y coordinate of the bend
Returns
-------
None
"""
self.i = i # Index of the start of the bend coordinate
self.j = j # Index of the end of the bend coordinate
self.status = _NOT_SIMPLIFIED # Type of bend by default: UNTOUCHED
self.bend_coords = bend_coords # List of the coordinate forming the bend
@property
def polygon(self): # Polygon formed by the bend
"""Creates a polygon from the coordinates forming the bend
Parameters
----------
None
Returns
-------
Polygon
polygon formed by the coordinates
"""
try:
return self._polygon
except AttributeError:
self._polygon = Polygon(self.bend_coords)
return self._polygon
@property
def area(self):
"""Constructor
Parameters
----------
None
Returns
-------
float
Area of the polygon
"""
try:
return self._area
except AttributeError:
self._area = self.polygon.area
if self._area <= GenUtil.ZERO:
self._area = GenUtil.ZERO # In case of area=0 we assume almost 0 area instead
return self._area
@property
def base(self):
"""Length of the base of the bend. Distance between the first and last coordinate
Parameters
----------
None
Returns
-------
Float
Length of the bend of the polygon
"""
try:
return self._base
except AttributeError:
self._base = GenUtil.distance(self.bend_coords[0], self.bend_coords[-1])
if self._base <= GenUtil.ZERO:
self._base = GenUtil.ZERO # Avois a case of division by zero
return self._base
@property
def perimeter(self):
"""Length of the perimeter of the bend (polygon)
Parameters
----------
None
Returns
-------
float
Length of the perimeter
"""
try:
return self._perimeter
except AttributeError:
self._perimeter = self.polygon.length
return self._perimeter
@property
def cmp_index(self):
"""Calculates the value of the compactness index
Parameters
----------
None
Returns
-------
float
Value of the compactness index
"""
try:
return self._cmp_index
except AttributeError:
self._cmp_index = GenUtil.calculate_compactness_index(self.area, self.perimeter)
return self._cmp_index
@property
def adj_area(self):
"""Calculates the value of the compactness index of the polygon
Parameters
----------
None
Returns
-------
float
Value of the compactness index
"""
try:
return self._adj_area
except AttributeError:
self._adj_area = GenUtil.calculate_adjusted_area(self.area, self.cmp_index)
return self._adj_area
@property
def replacement_line(self):
"""Calculates the replacement line of the bend
Parameters
----------
None
Returns
-------
LineString
Replacement line for the bend
"""
try:
return self._replacement_line
except AttributeError:
self._replacement_line = LineString((self.bend_coords[0], self.bend_coords[-1]))
return self._replacement_line
def create_replacement_line (lst_coords, bend, diameter):
"""Calculate the replacement line for a bend"""
# Extract the sub line containing the bend with one extra vertice on each side
sub_line = LineStringSb(lst_coords[bend.i-1:bend.j+1])
bend_i = 1
bend_j = len(bend.j)-1
# Translate to sub line so that the bend starts at 0,0
xoff, yoff = lst_coords[bend.i][0], lst_coords[bend.i][1]
line_translate = affinity.affine_transform(sub_line, [1, 0, 0, 1, -xoff, -yoff])
# Extract the angle between the base of the bend (bendi, bendj) and the x axis
lst_coord = list(line_translate.coords)
p0 = (lst_coord[bend_j][0], lst_coord[bend_j][1])
p1 = (lst_coord[bend_i][0], lst_coord[bend_i][1])
p2 = (abs(p0[0])+1., 0)
angle = GenUtil.angle_vecor(p0, p1, p2)
# p0_x = line1_coord[bend_j][0]
# p0_y = line1_coord[bend_j][1]
# p1_x = abs(p0_x) + 1. # In case x == 0
# p1_y = 0.
# dot = p0_x * p1_x + p0_y * p1_y
# len_a = (p0_x ** 2 + p0_y ** 2) ** .5
# len_b = (p1_x ** 2 + p1_y ** 2) ** .5
angle = math.acos(dot / (len_a * len_b))
angle = (angle * 180 / math.pi)
if p0[1] >= 0.:
angle = -angle # Clockwise rotation
# if p0_y >= 0.:
# angle = -angle
# Rotate the bend so it's on the x axis
a = math.cos(angle)
b = -math.sin(angle)
d = math.sin(angle)
e = math.cos(angle)
line_rotate = affinity.rotate(line_translate, angle, origin=(0, 0))
lst_coords = list(line_rotate.coords)
# line_i = LineString(lst_coords[0:3])
# line_j = LineString(lst_coords[-2:])
# Calculate the angle between the base of the bend of segment before and after the bend
theta_i = lib_geobato.GenUtil.compute_angle(lst_coords[0], lst_coords[1], lst_coords[bend_j])
theta_j = lib_geobato.GenUtil.compute_angle(lst_coords[bend_j], lst_coords[-2], lst_coords[-1])
# Determine if the
bend_line = LineString(lst_coord[bend_i:bend_j+1])
(minx, miny, maxx, maxy) = bend_line.bounds
y_dynamic = (abs(miny) + abs(maxy)) * 10.
x_middle = (lst_coords[bend_i][0] + lst_coords[bend_j][0]) / 2.
line_y_positive = LineString(((x_middle, 0), (x_middle, y_dynamic)))
line_y_negative = LineString(((x_middle, 0), (x_middle, -y_dynamic)))
if line4.crosses(line_y_positive):
bend_side = +1
else:
if line4.crosses(line_y_negative):
bend_side = -1
if lst_coords[0][1] >= 0.:
start_line_side = 1
else:
start_line_side = -1
if lst_coords[-1][1] >= 0.:
end_line_side = 1
else:
end_line_side = -1
if (start_line_side * end_line_side == -1):
print("Nothing to do....")
line5 = LineString(lst_coords[0:bend_i + 1] + lst_coords[bend_j:])
else:
# Both line are on the same side
if start_line_side == 1 and end_line_side == 1:
if bend_side == -1:
angle_bias = 2.
y_offset = -1
else:
angle_bias = 3.
y_offset = 1
if start_line_side == -1 and end_line_side == -1:
if bend_side == 1:
angle_bias = 2.
y_offset = 1
else:
angle_bias = 3.
y_offset = 1
theta_i = (180. - theta_i) / angle_bias
if theta_i >= 5.:
hypothenus = x_middle / math.cos(theta_i * math.pi / 180.)
y_height = math.sqrt(hypothenus ** 2 - x_middle ** 2)
if bend_side == -1:
y_height *= y_offset
new_coord = (x_middle, y_height)
line5 = LineString(lst_coords[0:bend_i + 1] + [new_coord] + lst_coords[bend_j:])
else:
print("Nothing to do....")
line5 = LineString(lst_coords[0:bend_i + 1] + lst_coords[bend_j:])
class AlgoSherbend(object):
"""Main class for the Sherbend algorithm
Attributes:
- None
"""
def __init__(self, command, geo_content):
"""Constructor of the class
Parameters
----------
command : DataClass
Contains all the commands for the Sherbend line simplification algorithm
geo_content: DataClass
Contains the geo information needed for the the sherbend line reduction algorithm
Returns
-------
None
"""
self.command = command
self.geo_content = geo_content
self.nbr_bend_simplified = 0
def calculate_min_adj_area(self, diameter):
"""Calculates the minimum adjusted area of a band
Parameters
----------
diameter : float
diameter used to calculate the minimum adjusted area
Returns
-------
float
Minimum adjusted area
"""
return (_AREA_CMP_INDEX * math.pi * (diameter/2.0)**2.0)
def _calculate_adj_area(self, coords):
"""Calculates the adjusted area of a polygon
Parameters
----------
coords : list
List of x,y coordinates defining a polygon
Returns
-------
float
Minimum adjusted area
"""
pol = Polygon(coords)
cmp_index = GenUtil.calculate_compactness_index(pol.area, pol.length)
adj_area = GenUtil.calculate_adjusted_area(pol.area, cmp_index)
return adj_area
def load_features(self, geo_content, command):
"""Load the points, line strings and polygons in the spatial container.
The Polygons are deconstructued into a list LineString with clockwise orientation and extra added information
needed for the reconstruction of the original Polygon
Parameters
----------
geo_content : DataClass
Contains all the input#output geo spatial information
command :ParserArgument
Contains the parameters of the command line interface
Returns
-------
None
"""
features = [] # List of features to pass to the spatial container
# Load all the features in the spatial container
for feature in geo_content.in_features:
diameter = command.dlayer_dict[feature.sb_layer_name]
min_adj_area = self.calculate_min_adj_area(diameter)
if feature.geom_type == GenUtil.POINT:
out_feature = PointSb(feature.coords, feature.sb_layer_name, feature.sb_properties)
# Add the feature
features.append(out_feature)
elif feature.geom_type == GenUtil.LINE_STRING:
out_feature = out_feature = LineStringSb(feature.coords, GenUtil.LINE_STRING, min_adj_area, feature.sb_layer_name,
feature.sb_properties)
# Add the feature
features.append(out_feature)
elif feature.geom_type == GenUtil.POLYGON:
adj_area = self._calculate_adj_area(feature.exterior.coords)
# Only keep the polygon over the minimum adjusted area
if not command.exclude_polygon or adj_area > min_adj_area:
# Deconstruct the Polygon into a list of LineString with supplementary information
# needed to reconstruct the original Polygon
ext_feature = LineStringSb(feature.exterior.coords, GenUtil.POLYGON_EXTERIOR, min_adj_area,
feature.sb_layer_name, feature.sb_properties)
interiors = feature.interiors
int_features = []
# Extract the interiors as LineString
for interior in interiors:
adj_area = self._calculate_adj_area(interior.coords)
# Only keep the interior (hole) over the minimal adjusted area
if not command.exclude_hole or adj_area > min_adj_area:
interior = LineStringSb(interior.coords, GenUtil.POLYGON_INTERIOR, min_adj_area, None, None)
int_features.append(interior)
else:
geo_content.nbr_del_holes += len(feature.interiors)
# Add interior features needed for Polygon reconstruction
ext_feature.sb_interiors = int_features
# Add the exterior and the interior independently
features.append(ext_feature) # Add the exterior
features += int_features # Add the interiors
else:
# Do not add the feature (exterior and interiors ) in the spatial container
# Update some stats
geo_content.nbr_del_polygons += 1
geo_content.nbr_del_holes += len(feature.interiors)
else:
raise GeoSimException ("Invalid geometry type: {}".format(feature.geometry))
# Create the spatial container that will receive all the spatial features
self.s_container = SpatialContainer()
self.s_container.add_features(features) # Load all the features
return
def _manage_lines_simplification (self, s_constraints):
"""Main routine to simplify the lines
For each line to simplify
For each valid bend to simplify
check the consraints if the constraint are violated check alternative bends (only if the
number of bend to simplify is one.
One of the costly operation specially for very long line string (like contour) is to rewrite the
coordinates into the Shapely structure. This is why we updtade the shapely structure at the end
when the last bend of the line is processed
Parameters
----------
s_constraints : SpatialContraints
Spatal constraints to validate
Returns
-------
int
Total number of bend simplified
"""
iter_nbr = 0
total_nbr_bend_simplified = 0
# Iterate until all the line are simplified or there are no more line have to be simplified
while (True):
iter_nbr_bend_simplified = 0
print('Iteration # {}'.format(iter_nbr))
# Build line iterator
lines = (feature for feature in self.s_container.get_features()
if(not feature.sb_is_simplest and feature.sb_geom_type==GenUtil.LINE_STRING ))
for line in lines:
nbr_bend_simplified = line.simplify(self.command.diameter, s_constraints)
iter_nbr_bend_simplified += nbr_bend_simplified
total_nbr_bend_simplified += nbr_bend_simplified
print('Number of bend simplified {}'.format(iter_nbr_bend_simplified))
print('----------')
iter_nbr += 1
if iter_nbr_bend_simplified == 0:
break
print('Total number of bend simplified: {}'.format(total_nbr_bend_simplified))
print('Total number of simplicity error: {}'.format(s_constraints.nbr_err_simplicity))
print('Total number of crossing error: {}'.format(s_constraints.nbr_err_crossing))
print('Total number of sidedness error: {}'.format(s_constraints.nbr_err_sidedness))
return total_nbr_bend_simplified
def process(self):
"""Main routine for the Sherbend algorithm
The algorithm will simplify the lines using the Sherbend algorithm.
It will iterate over the lines until there are no more bends to simplify.
Parameters
----------
None
Returns
-------
geo_content : DataClass
Contains the output information
"""
# Load the features into the spatial container
self.load_features(self.geo_content, self.command)
s_constraints = SpatialConstraints(s_container=self.s_container)
self._manage_lines_simplification(s_constraints)
for feature in self.s_container.get_features():
if feature.sb_geom_type == GenUtil.POINT:
self.geo_content.out_features.append(feature)
elif feature.sb_geom_type == GenUtil.LINE_STRING:
if feature.sb_original_type == GenUtil.LINE_STRING:
self.geo_content.out_features.append(feature)
else:
if feature.sb_original_type == GenUtil.POLYGON_EXTERIOR:
# The LineString was an exterior Polygon so reconstruct the originalPolygon
interiors = [list(interior.coords) for interior in feature.sb_interiors]
polygon = Polygon(feature.coords, interiors)
polygon.sb_layer_name = feature.sb_layer_name
polygon.sb_properties = feature.sb_properties
self.geo_content.out_features.append(polygon)
else:
pass # Nothing to do with the holes here
return
| 33.486799
| 130
| 0.567708
| 39,508
| 0.973415
| 0
| 0
| 6,966
| 0.171631
| 0
| 0
| 17,096
| 0.421219
|
18e68b384996aec6ddd93fd4e05675ce4c043545
| 393
|
py
|
Python
|
src/Server/Py_Easy_TCP_Server.py
|
Moguf/Py_Network
|
13e351e9955464a5d65bd3dee3642438cfe9ed92
|
[
"MIT"
] | null | null | null |
src/Server/Py_Easy_TCP_Server.py
|
Moguf/Py_Network
|
13e351e9955464a5d65bd3dee3642438cfe9ed92
|
[
"MIT"
] | null | null | null |
src/Server/Py_Easy_TCP_Server.py
|
Moguf/Py_Network
|
13e351e9955464a5d65bd3dee3642438cfe9ed92
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import socket
port = 12345
MAX_SIZE = 65535
target_address = '127.0.0.1'
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.bind((target_address,port))
s.listen(2)
conn, addr = s.accept()
# conn: socket is the client socket.
print(addr, "Now Connected")
text = "Thank you for connecting from TCP Server."
data = text.encode('ascii')
conn.send(data)
conn.close()
| 19.65
| 52
| 0.725191
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 134
| 0.340967
|
18e718827e2560736ccb159689ee15cc3157f2a5
| 4,084
|
py
|
Python
|
empyric/collection/controllers.py
|
dmerthe/empyric
|
7553b71e241709836cdef156afa7dd2a1c1edf5a
|
[
"MIT"
] | 3
|
2021-01-17T14:05:27.000Z
|
2022-03-03T06:25:39.000Z
|
empyric/collection/controllers.py
|
dmerthe/empyric
|
7553b71e241709836cdef156afa7dd2a1c1edf5a
|
[
"MIT"
] | null | null | null |
empyric/collection/controllers.py
|
dmerthe/empyric
|
7553b71e241709836cdef156afa7dd2a1c1edf5a
|
[
"MIT"
] | 1
|
2021-01-17T14:05:29.000Z
|
2021-01-17T14:05:29.000Z
|
from empyric.adapters import *
from empyric.collection.instrument import *
class OmegaCN7500(Instrument):
"""
Omega model CN7500 PID temperature controller
"""
name = 'OmegaCN7500'
supported_adapters = (
(Modbus, {'slave_mode': 'rtu',
'baud_rate': 38400,
'parity': 'N',
'delay': 0.2}),
)
knobs = (
'output',
'setpoint',
'proportional band',
'integration time',
'derivative time'
)
meters = (
'temperature',
'power'
)
@setter
def set_output(self, state):
if state == 'ON':
self.backend.write_bit(0x814, 1) # turn on output & start PID control
elif state == 'OFF':
self.backend.write_bit(0x814, 0) # turn off output & stop PID control
@setter
def set_setpoint(self, setpoint):
self.write(0x1001, 10*setpoint)
@getter
def get_setpoint(self):
return self.read(0x1001) / 10
@setter
def set_proportional_band(self, P):
self.write(0x1009, int(P))
@getter
def get_proportional_band(self):
return self.read(0x1009)
@setter
def set_integration_time(self, Ti):
self.write(0x100c, int(Ti))
@getter
def get_integration_time(self):
return self.read(0x100c)
@setter
def set_derivative_time(self, Td):
self.write(0x100b, int(Td))
@getter
def get_derivative_time(self):
return self.read(0x100b)
@measurer
def measure_temperature(self):
return self.read(0x1000) / 10
@measurer
def measure_power(self):
return self.read(0x1000) / 10
class RedLionPXU(Instrument):
"""
Red Lion PXU temperature PID controller
"""
name = 'RedLionPXU'
supported_adapters = (
(Modbus, {'buad_rate': 38400}),
)
knobs = (
'output',
'setpoint',
'autotune'
)
meters = (
'temperature',
'power'
)
@setter
def set_output(self, state):
if state == 'ON':
self.backend.write_bit(0x11, 1) # turn on output & start PID control
elif state == 'OFF':
self.backend.write_bit(0x11, 0) # turn off output & stop PID control
@setter
def set_setpoint(self, setpoint):
self.write(0x1, int(setpoint))
@measurer
def measure_temperature(self):
return self.read(0x0)
@measurer
def measure_power(self):
return self.read(0x8) / 10
@setter
def set_autotune(self, state):
if state == 'ON':
self.write(0xf, 1)
elif state == 'OFF':
self.write(0xf, 0)
class WatlowEZZone(Instrument):
"""
Watlow EZ-Zone PID process controller
"""
name = 'WatlowEZZone'
supported_adapters = (
(Modbus, {'baud_rate': 9600}),
)
knobs = (
'setpoint',
)
meters = (
'temperature',
)
@measurer
def measure_temperature(self):
return self.read(360, dtype='float', byte_order=3) # swapped little-endian byte order (= 3 in minimalmodbus)
@getter
def get_setpoint(self):
return self.read(2160, dtype='float', byte_order=3)
@setter
def set_setpoint(self, setpoint):
return self.write(2160, setpoint, dtype='float', byte_order=3)
@getter
def get_proportional_band(self):
return self.read(1890, dtype='float', byte_order=3)
@setter
def set_proportional_band(self, band):
return self.write(1890, band, dtype='float', byte_order=3)
@getter
def get_time_integral(self):
return self.read(1894, dtype='float', byte_order=3)
@setter
def set_time_integral(self, integral):
return self.write(1894, integral, dtype='float', byte_order=3)
@getter
def get_time_derivative(self):
return self.read(1896, dtype='float', byte_order=3)
@setter
def set_time_derivative(self, derivative):
return self.write(1896, derivative, dtype='float', byte_order=3)
| 22.31694
| 117
| 0.588149
| 4,000
| 0.979432
| 0
| 0
| 2,745
| 0.672135
| 0
| 0
| 730
| 0.178746
|
18e80ab1f054cab4110f82ef2bcc62a0377ee9cd
| 2,468
|
py
|
Python
|
bot/main.py
|
the-rango/Discord-Python-Bot-Tutorial
|
5afa7b0b6b2397a0d566bc6009bb7cac2e4354de
|
[
"Apache-2.0"
] | null | null | null |
bot/main.py
|
the-rango/Discord-Python-Bot-Tutorial
|
5afa7b0b6b2397a0d566bc6009bb7cac2e4354de
|
[
"Apache-2.0"
] | null | null | null |
bot/main.py
|
the-rango/Discord-Python-Bot-Tutorial
|
5afa7b0b6b2397a0d566bc6009bb7cac2e4354de
|
[
"Apache-2.0"
] | null | null | null |
# APACHE LICENSE
# Copyright 2020 Stuart Paterson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# External Packages
import os
import discord
from dotenv import load_dotenv
# Local Files
import utils
# Create the bot
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
client = discord.Client()
def get_channel_by_name(client, guild, name):
"""Returns a channel by name from a specific guild"""
for server in client.guilds:
if server == guild:
for channel in server.text_channels:
if channel.name == name:
return channel
@client.event
async def on_ready():
# Triggered when starting up the bot
print(f'{client.user} has connected to Discord!')
@client.event
async def on_member_update(before, after):
if str(before.status) == "offline" and str(after.status) == "online":
# When a user comes online
channel = utils.get_channel_by_name(client, after.guild, 'general')
try:
# Send your message when a user comes online here!
pass
except discord.errors.Forbidden:
pass
@client.event
async def on_message(message):
if message.author == client.user:
# Ignore messages this bot sends
return
current_channel = message.channel
if message.content and len(message.content) > 1 and message.content[0] == '!':
# First we extract the message after the ! then split it on spaces to
# get a list or the arguments the user gave
message_text = message.content[1:]
split_message = message_text.split(" ")
command = split_message[0]
if command == "test":
response = "test successful"
await current_channel.send(response)
elif command == "stop":
await client.logout()
# elif command == "foo":
# # Add your extra commands in blocks like this!
# pass
# Run the bot
client.run(TOKEN)
| 29.380952
| 82
| 0.66329
| 0
| 0
| 0
| 0
| 1,339
| 0.542545
| 1,297
| 0.525527
| 1,156
| 0.468395
|
18e81c7e28ba4d13c0ba77aba68314299f3e766e
| 4,945
|
py
|
Python
|
src/main.py
|
LucidtechAI/auth_example
|
a370833a16f8345e1e595f1ade3e830f8371157c
|
[
"Apache-2.0"
] | null | null | null |
src/main.py
|
LucidtechAI/auth_example
|
a370833a16f8345e1e595f1ade3e830f8371157c
|
[
"Apache-2.0"
] | null | null | null |
src/main.py
|
LucidtechAI/auth_example
|
a370833a16f8345e1e595f1ade3e830f8371157c
|
[
"Apache-2.0"
] | 1
|
2019-03-08T09:52:05.000Z
|
2019-03-08T09:52:05.000Z
|
import argparse
import json
import requests
import pathlib
from urllib.parse import urlparse
from auth import AWSSignatureV4
def create_auth():
return AWSSignatureV4(
region='eu-west-1',
service='execute-api',
aws_access_key=args.access_key_id,
aws_secret_key=args.secret_access_key,
aws_api_key=args.api_key
)
def create_signing_headers(method, path, body):
auth = create_auth()
uri = urlparse(f'{args.api_endpoint}{path}')
auth_headers = auth.sign_headers(
uri=uri,
method=method,
body=body
)
headers = {**auth_headers, 'Content-Type': 'application/json'}
return uri, headers
def post_documents():
body = json.dumps({'contentType': args.content_type, 'consentId': args.consent_id}).encode()
uri, headers = create_signing_headers('POST', '/documents', body)
post_documents_response = requests.post(
url=uri.geturl(),
headers=headers,
data=body
)
post_documents_response.raise_for_status()
return post_documents_response.json()
def put_document(presigned_url):
body = pathlib.Path(args.document_path).read_bytes()
headers = {'Content-Type': args.content_type}
if args.with_s3_kms:
headers['x-amz-server-side-encryption'] = 'aws:kms'
put_document_response = requests.put(presigned_url, data=body, headers=headers)
put_document_response.raise_for_status()
return put_document_response.content.decode()
def post_predictions(document_id, model_name):
body = json.dumps({'documentId': document_id, 'modelName': model_name}).encode()
uri, headers = create_signing_headers('POST', '/predictions', body)
post_predictions_response = requests.post(
url=uri.geturl(),
headers=headers,
data=body
)
post_predictions_response.raise_for_status()
return post_predictions_response.json()
def upload_document():
post_documents_response = post_documents()
document_id = post_documents_response['documentId']
presigned_url = post_documents_response['uploadUrl']
put_document(presigned_url)
return document_id
def invoice_prediction():
document_id = upload_document()
predictions = post_predictions(document_id, 'invoice')
print(json.dumps(predictions, indent=2))
def receipt_prediction():
document_id = upload_document()
predictions = post_predictions(document_id, 'receipt')
print(json.dumps(predictions, indent=2))
def document_split():
document_id = upload_document()
predictions = post_predictions(document_id, 'documentSplit')
print(json.dumps(predictions, indent=2))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('api_endpoint', help='HTTPS endpoint for REST API')
parser.add_argument('api_key')
parser.add_argument('access_key_id')
parser.add_argument('secret_access_key')
parser.add_argument('--with_s3_kms', action='store_true')
subparsers = parser.add_subparsers()
invoice_prediction_parser = subparsers.add_parser('invoice_prediction')
invoice_prediction_parser.add_argument('document_path', help='Path to document to make predictions on')
invoice_prediction_parser.add_argument('content_type', choices={'image/jpeg', 'application/pdf'},
help='Content-Type of document to make predictions on')
invoice_prediction_parser.add_argument('--consent_id', default='1234',
help='Consent ID is typically a mapping from end user to a unique identifier')
invoice_prediction_parser.set_defaults(cmd=invoice_prediction)
receipt_prediction_parser = subparsers.add_parser('receipt_prediction')
receipt_prediction_parser.add_argument('document_path', help='Path to document to make predictions on')
receipt_prediction_parser.add_argument('content_type', choices={'image/jpeg', 'application/pdf'},
help='Content-Type of document to make predictions on')
receipt_prediction_parser.add_argument('--consent_id', default='1234',
help='Consent ID is typically a mapping from end user to a unique identifier')
receipt_prediction_parser.set_defaults(cmd=receipt_prediction)
document_split_parser = subparsers.add_parser('document_split')
document_split_parser.add_argument('document_path', help='Path to document to split')
document_split_parser.add_argument('content_type', choices={'application/pdf'},
help='Content-Type of document to split')
document_split_parser.add_argument('--consent_id', default='1234',
help='Consent ID is typically a mapping from end user to a unique identifier')
document_split_parser.set_defaults(cmd=document_split)
args = parser.parse_args()
args.cmd()
| 37.180451
| 121
| 0.70455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,137
| 0.229929
|
18e8661bfba7a01963831fc9dac3f2b59f8ea633
| 2,074
|
py
|
Python
|
examples/set_holidaydates.py
|
ultratolido/ekmmetters
|
e15325023262e228b4dc037021c28a8d2b9b9b03
|
[
"MIT"
] | null | null | null |
examples/set_holidaydates.py
|
ultratolido/ekmmetters
|
e15325023262e228b4dc037021c28a8d2b9b9b03
|
[
"MIT"
] | null | null | null |
examples/set_holidaydates.py
|
ultratolido/ekmmetters
|
e15325023262e228b4dc037021c28a8d2b9b9b03
|
[
"MIT"
] | null | null | null |
""" Simple example set holiday dates
(c) 2016 EKM Metering.
"""
import random
from ekmmeters import *
#port setup
my_port_name = "COM3"
my_meter_address = "300001162"
#log to console
ekm_set_log(ekm_print_log)
# init port and meter
port = SerialPort(my_port_name)
if (port.initPort() == True):
my_meter = V4Meter(my_meter_address)
my_meter.attachPort(port)
else:
print "Cannot open port"
exit()
# input over range(Extents.Holidays)
for holiday in range(Extents.Holidays):
day = random.randint(1,28)
mon = random.randint(1,12)
my_meter.assignHolidayDate(holiday, mon, day)
my_meter.setHolidayDates()
# input directly
param_buf = OrderedDict()
param_buf["Holiday_1_Month"] = 1
param_buf["Holiday_1_Day"] = 1
param_buf["Holiday_2_Month"] = 2
param_buf["Holiday_2_Day"] = 3
param_buf["Holiday_3_Month"] = 4
param_buf["Holiday_3_Day"] = 4
param_buf["Holiday_4_Month"] = 4
param_buf["Holiday_4_Day"] = 5
param_buf["Holiday_5_Month"] = 5
param_buf["Holiday_5_Day"] = 4
param_buf["Holiday_6_Month"] = 0
param_buf["Holiday_6_Day"] = 0
param_buf["Holiday_7_Month"] = 0
param_buf["Holiday_7_Day"] = 0
param_buf["Holiday_8_Month"] = 0
param_buf["Holiday_8_Day"] = 0
param_buf["Holiday_9_Month"] = 0
param_buf["Holiday_9_Day"] = 0
param_buf["Holiday_10_Month"] = 0
param_buf["Holiday_10_Day"] = 0
param_buf["Holiday_11_Month"] = 0
param_buf["Holiday_11_Day"] = 0
param_buf["Holiday_12_Month"] = 0
param_buf["Holiday_12_Day"] = 0
param_buf["Holiday_13_Month"] = 0
param_buf["Holiday_13_Day"] = 0
param_buf["Holiday_14_Month"] = 0
param_buf["Holiday_14_Day"] = 0
param_buf["Holiday_15_Month"] = 0
param_buf["Holiday_15_Day"] = 0
param_buf["Holiday_16_Month"] = 0
param_buf["Holiday_16_Day"] = 0
param_buf["Holiday_17_Month"] = 0
param_buf["Holiday_17_Day"] = 0
param_buf["Holiday_18_Month"] = 0
param_buf["Holiday_18_Day"] = 0
param_buf["Holiday_19_Month"] = 0
param_buf["Holiday_19_Day"] = 0
param_buf["Holiday_20_Month"] = 1
param_buf["Holiday_20_Day"] = 9
if my_meter.setHolidayDates(param_buf):
print "Set holiday dates success."
port.closePort()
| 27.289474
| 49
| 0.747348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 887
| 0.427676
|
18e9b27e387d5cd010bbb4d876619abf03cb83f9
| 4,242
|
py
|
Python
|
FCN.py
|
alexandrefelipemuller/timeseries_shapelet_transferlearning
|
be19c05ae88c5bf733fedcfed24a7140168f9727
|
[
"Apache-2.0"
] | null | null | null |
FCN.py
|
alexandrefelipemuller/timeseries_shapelet_transferlearning
|
be19c05ae88c5bf733fedcfed24a7140168f9727
|
[
"Apache-2.0"
] | null | null | null |
FCN.py
|
alexandrefelipemuller/timeseries_shapelet_transferlearning
|
be19c05ae88c5bf733fedcfed24a7140168f9727
|
[
"Apache-2.0"
] | 1
|
2021-03-31T07:46:37.000Z
|
2021-03-31T07:46:37.000Z
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 30 20:11:19 2016
@author: stephen
"""
from __future__ import print_function
from keras.models import Model
from keras.utils import np_utils
import numpy as np
import os
from keras.callbacks import ModelCheckpoint
import pandas as pd
import sys
import keras
from keras.callbacks import ReduceLROnPlateau
def readucr(filename):
data = np.loadtxt(filename, delimiter = ',')
Y = data[:,0]
X = data[:,1:]
return X, Y
nb_epochs = 300
#flist = ['Adiac', 'Beef', 'CBF', 'ChlorineConcentration', 'CinC_ECG_torso', 'Coffee', 'Cricket_X', 'Cricket_Y', 'Cricket_Z',
#'DiatomSizeReduction', 'ECGFiveDays', 'FaceAll', 'FaceFour', 'FacesUCR', '50words', 'FISH', 'Gun_Point', 'Haptics',
#'InlineSkate', 'ItalyPowerDemand', 'Lighting2', 'Lighting7', 'MALLAT', 'MedicalImages', 'MoteStrain', 'NonInvasiveFatalECG_Thorax1',
#'NonInvasiveFatalECG_Thorax2', 'OliveOil', 'OSULeaf', 'SonyAIBORobotSurface', 'SonyAIBORobotSurfaceII', 'StarLightCurves', 'SwedishLeaf', 'Symbols',
#'synthetic_control', 'Trace', 'TwoLeadECG', 'Two_Patterns', 'uWaveGestureLibrary_X', 'uWaveGestureLibrary_Y', 'uWaveGestureLibrary_Z', 'wafer', 'WordsSynonyms', 'yoga']
flist = [ sys.argv[1] ]
for each in flist:
fname = each
x_train, y_train = readucr(fname+'/'+fname+'_TRAIN')
x_test, y_test = readucr(fname+'/'+fname+'_TEST')
nb_classes = len(np.unique(y_test))
batch_size = int(min(x_train.shape[0]/10, 16))
y_train = (y_train - y_train.min())/(y_train.max()-y_train.min())*(nb_classes-1)
y_test = (y_test - y_test.min())/(y_test.max()-y_test.min())*(nb_classes-1)
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
x_train_mean = x_train.mean()
x_train_std = x_train.std()
x_train = (x_train - x_train_mean)/(x_train_std)
x_test = (x_test - x_train_mean)/(x_train_std)
x_train = x_train.reshape(x_train.shape + (1,))
x_test = x_test.reshape(x_test.shape + (1,))
print ("class:"+each+", number of classes: "+str(nb_classes))
x = keras.layers.Input(x_train.shape[1:])
# drop_out = Dropout(0.2)(x)
conv1 = keras.layers.Conv1D(filters=32, kernel_size=8, strides=1, activation='relu', input_shape=(32,1))(x)
conv1 = keras.layers.normalization.BatchNormalization()(conv1)
conv1 = keras.layers.Activation('relu')(conv1)
# drop_out = Dropout(0.2)(conv1)
conv2 = keras.layers.Conv1D(filters=64, kernel_size=5, border_mode='same')(conv1)
conv2 = keras.layers.normalization.BatchNormalization()(conv2)
conv2 = keras.layers.Activation('relu')(conv2)
# drop_out = Dropout(0.2)(conv2)
conv3 = keras.layers.Conv1D(filters=32, kernel_size=3, border_mode='same')(conv2)
conv3 = keras.layers.normalization.BatchNormalization()(conv3)
conv3 = keras.layers.Activation('relu')(conv3)
full = keras.layers.pooling.GlobalAveragePooling1D()(conv3)
out = keras.layers.Dense(nb_classes, activation='softmax')(full)
model = Model(input=x, output=out)
optimizer = keras.optimizers.Adam()
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
reduce_lr = ReduceLROnPlateau(monitor = 'loss', factor=0.5,
patience=50, min_lr=0.0001)
# if os.path.isfile(fname+"_best.hdf5"):
# model.load_weights(fname+'_best.hdf5')
# model.load_weights(fname+'_shapelet_best.hdf5')
checkpointer = ModelCheckpoint(filepath=fname+"_best.hdf5",
monitor = 'val_accuracy',
verbose=2,
save_best_only=True)
# hist = model.fit(x_train, Y_train, batch_size=batch_size, epochs=nb_epochs,
# verbose=1, callbacks=[reduce_lr], validation_data=(x_test, Y_test))
hist = model.fit(x_train, Y_train, batch_size=batch_size, epochs=nb_epochs,
verbose=1, callbacks=[checkpointer,reduce_lr], validation_data=(x_test, Y_test))
#Print the testing results which has the lowest training loss.
log = pd.DataFrame(hist.history)
print (log.loc[log['loss'].idxmin]['loss'], log.loc[log['loss'].idxmin])
| 40.018868
| 169
| 0.677982
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,453
| 0.342527
|
18e9e49334b24d6e872726b2848571c7d6855286
| 624
|
py
|
Python
|
localpackage/calcs.py
|
chapmanwilliam/Ogden8
|
e17b26609fc3cdd5650bfeba387bd7253513e00e
|
[
"Apache-2.0"
] | null | null | null |
localpackage/calcs.py
|
chapmanwilliam/Ogden8
|
e17b26609fc3cdd5650bfeba387bd7253513e00e
|
[
"Apache-2.0"
] | null | null | null |
localpackage/calcs.py
|
chapmanwilliam/Ogden8
|
e17b26609fc3cdd5650bfeba387bd7253513e00e
|
[
"Apache-2.0"
] | null | null | null |
import os
indentSize=1 #size of the indent
class calcs():
def __init__(self):
self.indent=0
self.txt=[] #text for each line
def clear(self):
self.txt.clear()
self.indent=0
def addCalcs(self,calc):
s=[' ' * self.indent+ t for t in calc.txt]
self.txt += s
def addText(self,txt):
txt=' ' * self.indent + txt
self.txt.append(txt)
def show(self):
return os.linesep.join(self.txt)
def inDent(self):
self.indent+=indentSize
def outDent(self):
if self.indent-indentSize>0:
self.indent-=indentSize
| 20.8
| 50
| 0.56891
| 579
| 0.927885
| 0
| 0
| 0
| 0
| 0
| 0
| 44
| 0.070513
|
18ea5f7f2758aa0649c55416dd1e9152a5f44a15
| 7,146
|
py
|
Python
|
src/cops_and_robots/fusion/probability.py
|
COHRINT/cops_and_robots
|
1df99caa1e38bde1b5ce2d04389bc232a68938d6
|
[
"Apache-2.0"
] | 3
|
2016-01-19T17:54:51.000Z
|
2019-10-21T12:09:03.000Z
|
src/cops_and_robots/fusion/probability.py
|
COHRINT/cops_and_robots
|
1df99caa1e38bde1b5ce2d04389bc232a68938d6
|
[
"Apache-2.0"
] | null | null | null |
src/cops_and_robots/fusion/probability.py
|
COHRINT/cops_and_robots
|
1df99caa1e38bde1b5ce2d04389bc232a68938d6
|
[
"Apache-2.0"
] | 5
|
2015-02-19T02:53:24.000Z
|
2019-03-05T20:29:12.000Z
|
#!/usr/bin/env python
from __future__ import division
"""MODULE_DESCRIPTION"""
__author__ = "Nick Sweet"
__copyright__ = "Copyright 2015, Cohrint"
__credits__ = ["Nick Sweet", "Nisar Ahmed"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Nick Sweet"
__email__ = "nick.sweet@colorado.edu"
__status__ = "Development"
import logging
from copy import deepcopy
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
class Probability(object):
"""Abstract base class for probability representation (grid, particle, etc)
long description of Probability
Parameters
----------
bounds : Array-like
Bounding coordinates for the probability map.
res : float
Resolution used for discretization of the probability map.
"""
def __init__(self, bounds, res):
self.bounds = bounds
self.ndims = int(len(bounds) / 2)
self.res = res
def entropy(self):
"""
"""
# <>TODO: figure this out. Look at papers!
# http://www-personal.acfr.usyd.edu.au/tbailey/papers/mfi08_huber.pdf
if not hasattr(self, 'pos'):
self._discretize()
if not hasattr(self, 'prob'):
self.pdf()
p_i = self.prob #TODO: change to 4 dims.
H = -np.nansum(p_i * np.log(p_i)) * self.res ** self.ndims # sum of elementwise entropy values
return H
def compute_kld(self, other_gm):
"""Computes the KLD of self from another GM.
Use a truth GM as other_gm.
"""
q_i = self.prob
p_i = other_gm.prob
kld = np.nansum(p_i * np.log(p_i / q_i)) * self.res ** self.ndims
return kld
# def _discretize(self, bounds=None, res=None, all_dims=False):
# if res is not None:
# self.res = res
# if bounds is None and self.bounds is None:
# b = [-10, 10] # bounds in any dimension
# bounds = [[d] * self.ndims for d in b] # apply bounds to each dim
# self.bounds = [d for dim in bounds for d in dim] # flatten bounds
# elif self.bounds is None:
# self.bounds = bounds
# # Create grid
# if self.ndims == 1:
# x = np.arange(self.bounds[0], self.bounds[1], res)
# self.x = x
# self.pos = x
# elif self.ndims == 2:
# X, Y = np.mgrid[self.bounds[0]:self.bounds[2] + self.res:self.res,
# self.bounds[1]:self.bounds[3] + self.res:self.res]
# pos = np.empty(X.shape + (2,))
# pos[:, :, 0] = X; pos[:, :, 1] = Y
# self.X = X; self.Y = Y
# self.pos = pos
# elif self.ndims > 2:
# logging.debug('Using first two variables as x and y')
# X, Y = np.mgrid[self.bounds[0]:self.bounds[2]
# + res:res,
# self.bounds[1]:self.bounds[3]
# + res:res]
# pos = np.empty(X.shape + (2,))
# pos[:, :, 0] = X; pos[:, :, 1] = Y
# self.X = X; self.Y = Y
# self.pos = pos
# if all_dims:
# #<>TODO: use more than the ndims == 4 case
# full_bounds = self.bounds[0:2] + [-0.5, -0.5] \
# + self.bounds[2:] + [0.5, 0.5]
# v_spacing = 0.1
# grid = np.mgrid[full_bounds[0]:full_bounds[4] + res:res,
# full_bounds[1]:full_bounds[5] + res:res,
# full_bounds[2]:full_bounds[6] + v_spacing:v_spacing,
# full_bounds[3]:full_bounds[7] + v_spacing:v_spacing,
# ]
# pos = np.empty(grid[0].shape + (4,))
# pos[:, :, :, :, 0] = grid[0]
# pos[:, :, :, :, 1] = grid[1]
# pos[:, :, :, :, 2] = grid[2]
# pos[:, :, :, :, 3] = grid[3]
# self.pos_all = pos
# else:
# logging.error('This should be impossible, a gauss mixture with no variables')
# raise ValueError
def plot(self, title=None, alpha=1.0, show_colorbar=True, **kwargs):
if not hasattr(self,'ax') or 'ax' in kwargs:
self.plot_setup(**kwargs)
if title is None:
title = self.__str__()
self.contourf = self.ax.contourf(self.X, self.Y,
self.prob,
levels=self.levels,
# cmap=plt.get_cmap('jet'),
alpha=alpha,
interpolation='none',
antialiased=False
)
if show_colorbar and not hasattr(self, 'cbar'):
divider = make_axes_locatable(self.ax)
cax = divider.append_axes("right", size="5%", pad=0.1)
cbar = plt.colorbar(self.contourf, cax)
cbar.ax.tick_params(labelsize=20)
self.cbar = cbar
self.ax.set_title(title, fontsize=20)
if self.show_ellipses:
if hasattr(self.distribution, 'camera_viewcone'):
poly = self.distribution.camera_viewcone
else:
poly = None
self.ellipse_patches = distribution.plot_ellipses(ax=self.ax,
poly=poly)
return self.contourf
def plot_setup(self, fig=None, ax=None, bounds=None, levels=None,
num_levels=50, resolution=0.1, show_ellipses=False):
self.show_ellipses = show_ellipses
if fig is None:
self.fig = plt.gcf()
else:
self.fig = fig
if ax is None:
self.ax = plt.gca()
else:
self.ax = ax
if bounds is None:
bounds = self.bounds
if not hasattr(self,'pos'):
self._discretize(bounds=bounds)
# Set levels
if levels is None:
_, max_prob = self.find_MAP()
self.levels = np.linspace(0, max_prob * 1.2, num_levels)
else:
self.levels = levels
# Set bounds
plt.axis('scaled')
self.ax.set_xlim([bounds[0], bounds[2]])
self.ax.set_ylim([bounds[1], bounds[3]])
def plot_remove(self):
"""Removes all plotted elements related to this gaussian mixture.
"""
if hasattr(self,'contourf'):
for collection in self.contourf.collections:
collection.remove()
del self.contourf
if hasattr(self, 'ellipse_patches'):
for patch in self.ellipse_patches:
patch.remove()
del self.ellipse_patches
def update_plot(self, i=0, **kwargs):
logging.debug('Probability update {}'.format(i))
self.plot_remove()
self.plot(**kwargs)
def copy(self):
return deepcopy(self)
| 34.191388
| 102
| 0.502379
| 6,666
| 0.93283
| 0
| 0
| 0
| 0
| 0
| 0
| 3,244
| 0.45396
|
18ea77727f1cb2220f22073ef4e4393ab431d65a
| 7,952
|
py
|
Python
|
vulnman/tests/mixins.py
|
blockomat2100/vulnman
|
835ff3aae1168d8e2fa5556279bc86efd2e46472
|
[
"MIT"
] | null | null | null |
vulnman/tests/mixins.py
|
blockomat2100/vulnman
|
835ff3aae1168d8e2fa5556279bc86efd2e46472
|
[
"MIT"
] | 23
|
2021-12-01T10:00:38.000Z
|
2021-12-11T11:43:13.000Z
|
vulnman/tests/mixins.py
|
blockomat2100/vulnman
|
835ff3aae1168d8e2fa5556279bc86efd2e46472
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.models import User, Group
from django.utils import timezone
from django.conf import settings
from django.urls import reverse_lazy
from apps.projects.models import Project, Client, ProjectContributor
from ddf import G
from guardian.shortcuts import assign_perm
class VulnmanTestMixin(object):
def init_mixin(self):
self.user1 = self._create_user("dummyuser1", "changeme")
self.user2 = self._create_user("dummyuser2", "changeme")
self.pentester1 = self._create_user("pentester", "changeme")
self.pentester2 = self._create_user("pentester2", "changeme")
self.read_only1 = self._create_user("readonly1", "changeme")
self.manager = self._create_user("manager", "changeme")
self.manager.groups.add(Group.objects.get(name="Management"))
self.pentester1.groups.add(Group.objects.get(name="Pentesters"))
self.pentester2.groups.add(Group.objects.get(name="Pentesters"))
self.project1 = self._create_project(creator=self.pentester1)
self.project2 = self._create_project(creator=self.pentester2)
self.add_contributor(self.read_only1, self.project1, role=ProjectContributor.ROLE_READ_ONLY)
def add_contributor(self, user, project, role=ProjectContributor.ROLE_PENTESTER):
return ProjectContributor.objects.create(user=user, project=project, role=role)
def _create_user(self, username, password, is_staff=False):
email = "%s@example.com" % username
return User.objects.create_user(username, password=password, is_staff=is_staff, email=email)
def assign_perm(self, perm, user_or_group, obj=None):
assign_perm(perm, user_or_group=user_or_group, obj=obj)
def _create_project(self, client=None, creator=None):
if not client:
client = self._create_instance(Client)
return Project.objects.create(creator=creator, client=client, start_date=timezone.now(),
end_date=timezone.now())
def get_url(self, endpoint, **kwargs):
return reverse_lazy(endpoint, kwargs=kwargs)
def _create_instance(self, obj_class, **kwargs):
return G(obj_class, **kwargs)
def _set_session_variable(self, key, value):
session = self.client.session
session[key] = value
session.save()
def login_with_project(self, user, project):
self.client.force_login(user)
self._set_session_variable("project_pk", str(project.pk))
def _test_unauthenticated_aceess(self, url, expected_status_code=403):
response = self.client.get(url, follow=True)
login_url = self.get_url(settings.LOGIN_URL)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(str(login_url) in str(response.redirect_chain[0][0]), True)
self.client.force_login(self.user1)
response = self.client.get(url)
self.assertEqual(response.status_code, expected_status_code)
def _test_foreign_access(self, url, foreign_user, project):
self.login_with_project(foreign_user, project)
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
class VulnmanAPITestMixin(VulnmanTestMixin):
def _check_creator_read_only(self, url, obj_class):
# TODO: use this one
# TODO: check same for projects
new_user = self._create_user("temporaryuser", "changeme")
payload = {"creator": new_user.username}
self.client.force_login(new_user)
response = self.client.patch(url, payload)
self.assertEqual(response.status_code, 404)
self.assertEqual(obj_class.objects.filter(creator=new_user).count(), 0)
def _test_project_updateview(self, lazy_url, payload, obj_class, project_creator_field="project__creator"):
project_field = project_creator_field.split("__")[-2]
project_data = {project_field: self._create_project()}
# test unauthenticated denied
temporary_object = self._create_instance(obj_class, **project_data)
url = self.get_url(lazy_url, pk=temporary_object.pk)
self.client.logout()
response = self.client.patch(url, payload)
self.assertEqual(response.status_code, 403)
response = self.client.put(url, payload)
self.assertEqual(response.status_code, 403)
# test as temporary user
new_user = self._create_user("temporaryuserupdateview", "changeme")
self.client.force_login(new_user)
response = self.client.patch(url, payload)
self.assertEqual(response.status_code, 404)
filter_data = {project_creator_field: new_user}
self.assertEqual(obj_class.objects.filter(**filter_data).count(), 0)
# test as creator user
my_object = self._create_instance(obj_class, **filter_data)
self.client.force_login(new_user)
url = self.get_url(lazy_url, pk=my_object.pk)
response = self.client.patch(url, payload)
self.assertEqual(response.status_code, 200)
self.assertEqual(obj_class.objects.filter(**payload).count(), 1)
def _test_project_listview(self, lazy_url, obj_class, project_creator_field="project__creator"):
# test unauthenticated denied
url = self.get_url(lazy_url)
self.client.logout()
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
# test my object
my_object_data = {project_creator_field: self.user1}
my_object = self._create_instance(obj_class, **my_object_data)
self.client.force_login(self.user1)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["count"], 1)
self.assertEqual(response.json()["results"][0]["uuid"], str(my_object.pk))
# test other object
self.client.force_login(self.user2)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["count"], 0)
def _test_project_createview(self, lazy_url, payload, obj_class, project_creator_field="project__creator",
format='json'):
url = self.get_url(lazy_url)
self.client.logout()
response = self.client.post(url, payload, format=format)
self.assertEqual(response.status_code, 403)
self.assertEqual(obj_class.objects.count(), 0)
project1 = self._create_project(creator=self.user1)
project2 = self._create_project(creator=self.user2)
# test my object
project_field = project_creator_field.split("__")[-2]
payload[project_field] = str(project1.pk)
filter_data = {project_field: str(project1.pk)}
self.client.force_login(self.user1)
response = self.client.post(url, payload, format=format)
self.assertEqual(response.status_code, 201)
self.assertEqual(obj_class.objects.filter(**filter_data).count(), 1)
# test to create object to foreign project
payload[project_field] = str(project2.pk)
response = self.client.post(url, payload, format=format)
self.assertEqual(response.status_code, 403)
def _test_project_deleteview(self, lazy_url, obj_class, project_creator_field="project__creator"):
data = {project_creator_field: self.user1}
my_object = self._create_instance(obj_class, **data)
url = self.get_url(lazy_url, pk=my_object.pk)
self.client.logout()
response = self.client.delete(url)
self.assertEqual(response.status_code, 403)
# test delete foreign objects
self.client.force_login(self.user2)
response = self.client.delete(url)
self.assertEqual(response.status_code, 404)
# test my object delete
self.client.force_login(self.user1)
response = self.client.delete(url)
self.assertEqual(response.status_code, 204)
| 47.616766
| 111
| 0.692027
| 7,661
| 0.963405
| 0
| 0
| 0
| 0
| 0
| 0
| 675
| 0.084884
|
18ea8109933fbbfe2b0922e33bce91ae934e86e1
| 2,010
|
py
|
Python
|
StateTracing/tester_helper.py
|
junchenfeng/diagnosis_tracing
|
4e26e2ad0c7abc547f22774b6c9c299999a152c3
|
[
"MIT"
] | null | null | null |
StateTracing/tester_helper.py
|
junchenfeng/diagnosis_tracing
|
4e26e2ad0c7abc547f22774b6c9c299999a152c3
|
[
"MIT"
] | null | null | null |
StateTracing/tester_helper.py
|
junchenfeng/diagnosis_tracing
|
4e26e2ad0c7abc547f22774b6c9c299999a152c3
|
[
"MIT"
] | 1
|
2020-09-08T13:42:16.000Z
|
2020-09-08T13:42:16.000Z
|
# -*- coding: utf-8 -*-
import numpy as np
from torch import load as Tload
from torch import tensor
from dataloader import read_data,DataLoader,load_init
from cdkt import CDKT
if 'model' not in dir():
model = CDKT()
model.load_state_dict(Tload('model.pkl'))
#
inits = load_init()
data = """0 506123310064654031030450460312100605
0 506123310064654031230450460312100605
0 506123310064654031231450460312100605
0 506123310064654031231456460312100605
0 506123310064654031231456460312100645
0 506123310564654031231456460312100645
0 506123310564654231231456460312100645
0 506123310564654231231456460312100605
0 506123310564654231231456460312100645
0 506123312564654231231456460312100645
0 546123312564654231231456460312100645
0 546123312564654231231456465312100645
0 546123312564654231231456465312120645
0 546123312564654231231456465312123645
1 002163163050030425245001316542000000
1 002163163054030425245001316542000000
1 002163163054030425245001316542000006"""
# 1 002163163054030425245001316542030006
# 1 002163163054030425245001316542000006
# 1 002163163054031425245001316542000006
# 1 002163163054631425245001316542000006
# 1 002163163254631425245001316542000006
# 1 002163163254631425245601316542000006
# 1 002163163254631425245631316542000006
# 1 052163163254631425245631316542000006
# 1 452163163254631425245631316542000006
# 1 452163163254631425245631316542000016
# 1 452163163254631425245631316542000316
# 1 452163163254631425245631316542003316
# 1 452163163254631425245631316542000316
# 1 452163163254631425245631316542500316
# 1 452163163254631425245631316542520316
# 1 452163163254631425245631316542524316"""
data = [d.strip().split() for d in data.split('\n')]
states = [list(map(int,s)) for i,s in data]
states = tensor([states])
out = model.predicts(states)
prds = np.argmax(out[0],axis=2).flatten()*np.array(inits[2])
| 35.892857
| 60
| 0.783085
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,503
| 0.747761
|
18eaed4c6444d0552d8dc7a9cc73624816ce21fa
| 3,958
|
py
|
Python
|
grpc-errors/stub/hello_pb2.py
|
twotwo/tools-python
|
b9e7a97e58fb0a3f3fb5e8b674e64a997669c2c4
|
[
"MIT"
] | null | null | null |
grpc-errors/stub/hello_pb2.py
|
twotwo/tools-python
|
b9e7a97e58fb0a3f3fb5e8b674e64a997669c2c4
|
[
"MIT"
] | null | null | null |
grpc-errors/stub/hello_pb2.py
|
twotwo/tools-python
|
b9e7a97e58fb0a3f3fb5e8b674e64a997669c2c4
|
[
"MIT"
] | 1
|
2016-10-21T07:51:24.000Z
|
2016-10-21T07:51:24.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: hello.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='hello.proto',
package='hello',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x0bhello.proto\x12\x05hello\"\x18\n\x08HelloReq\x12\x0c\n\x04Name\x18\x01 \x01(\t\"\x1b\n\tHelloResp\x12\x0e\n\x06Result\x18\x01 \x01(\t2v\n\x0cHelloService\x12/\n\x08SayHello\x12\x0f.hello.HelloReq\x1a\x10.hello.HelloResp\"\x00\x12\x35\n\x0eSayHelloStrict\x12\x0f.hello.HelloReq\x1a\x10.hello.HelloResp\"\x00\x62\x06proto3')
)
_HELLOREQ = _descriptor.Descriptor(
name='HelloReq',
full_name='hello.HelloReq',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Name', full_name='hello.HelloReq.Name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=22,
serialized_end=46,
)
_HELLORESP = _descriptor.Descriptor(
name='HelloResp',
full_name='hello.HelloResp',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Result', full_name='hello.HelloResp.Result', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=48,
serialized_end=75,
)
DESCRIPTOR.message_types_by_name['HelloReq'] = _HELLOREQ
DESCRIPTOR.message_types_by_name['HelloResp'] = _HELLORESP
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
HelloReq = _reflection.GeneratedProtocolMessageType('HelloReq', (_message.Message,), {
'DESCRIPTOR' : _HELLOREQ,
'__module__' : 'hello_pb2'
# @@protoc_insertion_point(class_scope:hello.HelloReq)
})
_sym_db.RegisterMessage(HelloReq)
HelloResp = _reflection.GeneratedProtocolMessageType('HelloResp', (_message.Message,), {
'DESCRIPTOR' : _HELLORESP,
'__module__' : 'hello_pb2'
# @@protoc_insertion_point(class_scope:hello.HelloResp)
})
_sym_db.RegisterMessage(HelloResp)
_HELLOSERVICE = _descriptor.ServiceDescriptor(
name='HelloService',
full_name='hello.HelloService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=77,
serialized_end=195,
methods=[
_descriptor.MethodDescriptor(
name='SayHello',
full_name='hello.HelloService.SayHello',
index=0,
containing_service=None,
input_type=_HELLOREQ,
output_type=_HELLORESP,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='SayHelloStrict',
full_name='hello.HelloService.SayHelloStrict',
index=1,
containing_service=None,
input_type=_HELLOREQ,
output_type=_HELLORESP,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_HELLOSERVICE)
DESCRIPTOR.services_by_name['HelloService'] = _HELLOSERVICE
# @@protoc_insertion_point(module_scope)
| 27.678322
| 348
| 0.741031
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,047
| 0.264528
|
18eb73361ec3feb33d8a12b5b8881d917685a4cc
| 504
|
py
|
Python
|
ckanext-sitemap/ckanext/sitemap/plugin.py
|
alexandru-m-g/hdx-ckan
|
647f1f23f0505fa195601245b758edcaf4d25985
|
[
"Apache-2.0"
] | 1
|
2020-03-07T02:47:15.000Z
|
2020-03-07T02:47:15.000Z
|
ckanext-sitemap/ckanext/sitemap/plugin.py
|
datopian/hdx-ckan
|
2d8871c035a18e48b53859fec522b997b500afe9
|
[
"Apache-2.0"
] | null | null | null |
ckanext-sitemap/ckanext/sitemap/plugin.py
|
datopian/hdx-ckan
|
2d8871c035a18e48b53859fec522b997b500afe9
|
[
"Apache-2.0"
] | null | null | null |
'''
Sitemap plugin for CKAN
'''
from ckan.plugins import implements, SingletonPlugin
from ckan.plugins import IRoutes
class SitemapPlugin(SingletonPlugin):
implements(IRoutes, inherit=True)
def before_map(self, map):
controller='ckanext.sitemap.controller:SitemapController'
map.connect('sitemap', '/sitemap.xml', controller=controller, action='view')
map.connect('sitemap_page', '/sitemap{page}.xml', controller=controller, action='index')
return map
| 29.647059
| 96
| 0.712302
| 374
| 0.742063
| 0
| 0
| 0
| 0
| 0
| 0
| 147
| 0.291667
|
18ebf74aba4efdef03b71cc4501701981953cbd1
| 3,049
|
py
|
Python
|
experiment_wrapper/__init__.py
|
stonkens/experiment_wrapper
|
78b02a09d412097834bc81bba4452db1738b99da
|
[
"MIT"
] | 2
|
2022-03-24T22:31:20.000Z
|
2022-03-25T03:26:01.000Z
|
experiment_wrapper/__init__.py
|
stonkens/experiment_wrapper
|
78b02a09d412097834bc81bba4452db1738b99da
|
[
"MIT"
] | null | null | null |
experiment_wrapper/__init__.py
|
stonkens/experiment_wrapper
|
78b02a09d412097834bc81bba4452db1738b99da
|
[
"MIT"
] | null | null | null |
from typing import Any, Dict, List
class Dynamics:
"""Provides a template for the functionality required from a dynamics class to interface with the experiment
wrapper functionality.
A dynamics class must implement the following methods:
- n_dims: returns the number of dimensions of the state space
- control_dims: returns the number of dimensions of the control space
- dt: returns the time step of the dynamics
- step: takes in the current state, control and time and returns the next state"""
STATES: List[str]
CONTROLS: List[str]
def __init__(self):
self._n_dims: int
self._control_dims: int
self._dt: float
raise RuntimeError("Dynamics is a template class")
@property
def n_dims(self) -> int:
return self._n_dims
@property
def control_dims(self) -> int:
return self._control_dims
@property
def dt(self) -> float:
return self._dt
def step(self, x: Any, u: Any, t: float) -> Any:
pass
class Controller:
"""Provides a template for the functionality required from a controller class to interface with the experiment
wrappper functionality.
A controller class must implement the following methods:
- __call__: takes in the current state and time and returns the control (note: a function object can be used, e.g.:
def nominal_policy(x, t):
return L @ x
with L the LQR controller matrix"""
def __init__(self):
raise RuntimeError("Controller is a template class")
def __call__(self, x: Any, t: float) -> Any:
pass
class ExtendedController(Controller):
"""Provides a template for functionality that is optional called within the experiment wrapper functionality.
A controller class (in addition to being callable) can also implement the following methods:
- controller_dt: returns the time step of the controller
- save_info: takes in the current state, control and time and returns a dictionary of information to be saved for
all measurements
- save_measurements: takes in the current state, control and time and returns a dictionary of additional
measurements to be saved
- reset: takes in the current state and resets the controller to an initial state
"""
def __init__(self):
self._controller_dt: float
raise RuntimeError("ExtendedController is a template class")
@property
def controller_dt(self) -> float:
return self._controller_dt
def save_info(self, x: Any, u: Any, t: float) -> Dict[str, Any]:
return {}
def save_measurements(self, x: Any, u: Any, t: float) -> Dict[str, Any]:
return {}
def reset(self, x: Any) -> None:
pass
from experiment_wrapper.experiment import Experiment, ScenarioList, Controllers
from experiment_wrapper.rollout_trajectory import (
RolloutTrajectory,
TimeSeriesExperiment,
StateSpaceExperiment,
)
from experiment_wrapper.experiment_suite import ExperimentSuite
__version__ = "1.0.1"
| 32.094737
| 119
| 0.700886
| 2,707
| 0.887832
| 0
| 0
| 286
| 0.093801
| 0
| 0
| 1,629
| 0.534274
|
18ecd7bb8ba5638e693807de98d542a164bfce66
| 2,870
|
py
|
Python
|
Figure_2/panel_a_Count_mC_bin.py
|
Wustl-Zhanglab/Placenta_Epigenome
|
227f2a42e5c0af821d372b42c9bcf9e561e4627c
|
[
"MIT"
] | 2
|
2021-06-28T09:16:17.000Z
|
2021-07-15T02:39:35.000Z
|
Figure_2/panel_a_Count_mC_bin.py
|
Wustl-Zhanglab/Placenta_Epigenome
|
227f2a42e5c0af821d372b42c9bcf9e561e4627c
|
[
"MIT"
] | null | null | null |
Figure_2/panel_a_Count_mC_bin.py
|
Wustl-Zhanglab/Placenta_Epigenome
|
227f2a42e5c0af821d372b42c9bcf9e561e4627c
|
[
"MIT"
] | 2
|
2020-05-29T01:06:19.000Z
|
2021-07-02T01:04:50.000Z
|
#!/usr/bin/python
# programmer : Bo
# usage: Count_Reads_bin.py file_list
import sys
import re
import random
import string
import time
def main(X):
try:
print 'opening file :',X
infile = open(X,"r").readlines()
print 'Total ',len(infile),' lines.'
return infile
except IOError,message:
print >> sys.stderr, "cannot open file",message
sys.exit(1)
def Read_data():
X = main('numM10K.bin.bed')
name = []
reads = []
score = []
site = {}
tt = 'V1\tV2\tV3\tV4\n'
for n in range(len(X)):
te = X[n][:-1].split('\t')
if te[0] not in site.keys():
print 'adding',te[0]
site[te[0]] = {}
w = int(len(te[1])/2)
tag = te[1][:w+1]
#if tag not in site[te[0]].keys():
# site[te[0]][tag] = {}
try:
site[te[0]][tag][te[1]] = n-1
except:
site[te[0]][tag] = {}
site[te[0]][tag][te[1]] = n-1
name.append(X[n][:-1])
reads.append(0)
score.append(0.0)
return site, name, reads,score,tt
def Read_blacklist():
bl = main('hg19_blacklist.bed')
BL = {}
for each in bl:
te = each[:-1].split('\t')
if te[0] not in BL.keys():
BL[te[0]]= []
BL[te[0]].append([int(te[1]),int(te[2])])
return BL
if __name__=="__main__":
tS = time.time()
bin = 50000
BL = Read_blacklist()
#(B_site,B_name,C_reads,tt) = Read_data(sys.argv[1])
OP = main(sys.argv[1])
for each in OP:
(B_site,B_name,B_reads,B_score,tt) = Read_data()
data = main(each[:-1])
n = 0
m = 0
out = file('M50K_'+'_'+each[:-1],'w')
#out.write(tt)
for each in data:
n += 1
if n == 1000000:
m += 1
n = 0
print m,'million reads'
te = each.split('\t')
start = int(te[1])
end = int(te[2])
if te[0] not in B_site.keys():
continue
if te[0] in BL.keys():
for ebi in range(len(BL[te[0]])):
if start < BL[te[0]][ebi][1] and end > BL[te[0]][ebi][0]:
continue
ss = int(0.5+(start/50000))*50000
s = str(ss)
w =int( len(s)/2)
tag = s[:w+1]
try :
y = B_site[te[0]][tag][s]
except:
continue
B_reads[y] += 1
B_score[y] += float(te[-1])
for i in range(len(B_name)):
if B_reads[i] == 0:
out.write(B_name[i]+'\t0\t0\n')
else:
out.write(B_name[i]+'\t'+str(B_reads[i])+'\t'+str(B_score[i]/B_reads[i])+'\n')
out.close()
tE = time.time()
print 'Cost ',(tE-tS),' sec'
| 27.075472
| 94
| 0.444599
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 401
| 0.139721
|
18ed346e6be46b5b4a74b44f23d751e2dd5b808b
| 6,648
|
py
|
Python
|
slm_lab/agent/memory/replay.py
|
jmribeiro/SLM-Lab
|
7cf7a10e56c9558764544e7683023945c72a42a7
|
[
"MIT"
] | 1,074
|
2017-11-10T02:20:09.000Z
|
2022-03-31T18:14:02.000Z
|
slm_lab/agent/memory/replay.py
|
jmribeiro/SLM-Lab
|
7cf7a10e56c9558764544e7683023945c72a42a7
|
[
"MIT"
] | 98
|
2017-11-04T22:00:01.000Z
|
2022-03-31T14:13:45.000Z
|
slm_lab/agent/memory/replay.py
|
jmribeiro/SLM-Lab
|
7cf7a10e56c9558764544e7683023945c72a42a7
|
[
"MIT"
] | 229
|
2018-01-07T22:39:09.000Z
|
2022-03-20T12:04:31.000Z
|
from collections import deque
from copy import deepcopy
from slm_lab.agent.memory.base import Memory
from slm_lab.lib import logger, math_util, util
from slm_lab.lib.decorator import lab_api
import numpy as np
import pydash as ps
logger = logger.get_logger(__name__)
def sample_next_states(head, max_size, ns_idx_offset, batch_idxs, states, ns_buffer):
'''Method to sample next_states from states, with proper guard for next_state idx being out of bound'''
# idxs for next state is state idxs with offset, modded
ns_batch_idxs = (batch_idxs + ns_idx_offset) % max_size
# if head < ns_idx <= head + ns_idx_offset, ns is stored in ns_buffer
ns_batch_idxs = ns_batch_idxs % max_size
buffer_ns_locs = np.argwhere(
(head < ns_batch_idxs) & (ns_batch_idxs <= head + ns_idx_offset)).flatten()
# find if there is any idxs to get from buffer
to_replace = buffer_ns_locs.size != 0
if to_replace:
# extract the buffer_idxs first for replacement later
# given head < ns_idx <= head + offset, and valid buffer idx is [0, offset)
# get 0 < ns_idx - head <= offset, or equiv.
# get -1 < ns_idx - head - 1 <= offset - 1, i.e.
# get 0 <= ns_idx - head - 1 < offset, hence:
buffer_idxs = ns_batch_idxs[buffer_ns_locs] - head - 1
# set them to 0 first to allow sampling, then replace later with buffer
ns_batch_idxs[buffer_ns_locs] = 0
# guard all against overrun idxs from offset
ns_batch_idxs = ns_batch_idxs % max_size
next_states = util.batch_get(states, ns_batch_idxs)
if to_replace:
# now replace using buffer_idxs and ns_buffer
buffer_ns = util.batch_get(ns_buffer, buffer_idxs)
next_states[buffer_ns_locs] = buffer_ns
return next_states
class Replay(Memory):
'''
Stores agent experiences and samples from them for agent training
An experience consists of
- state: representation of a state
- action: action taken
- reward: scalar value
- next state: representation of next state (should be same as state)
- done: 0 / 1 representing if the current state is the last in an episode
The memory has a size of N. When capacity is reached, the oldest experience
is deleted to make space for the lastest experience.
- This is implemented as a circular buffer so that inserting experiences are O(1)
- Each element of an experience is stored as a separate array of size N * element dim
When a batch of experiences is requested, K experiences are sampled according to a random uniform distribution.
If 'use_cer', sampling will add the latest experience.
e.g. memory_spec
"memory": {
"name": "Replay",
"batch_size": 32,
"max_size": 10000,
"use_cer": true
}
'''
def __init__(self, memory_spec, body):
super().__init__(memory_spec, body)
util.set_attr(self, self.memory_spec, [
'batch_size',
'max_size',
'use_cer',
])
self.is_episodic = False
self.batch_idxs = None
self.size = 0 # total experiences stored
self.seen_size = 0 # total experiences seen cumulatively
self.head = -1 # index of most recent experience
# generic next_state buffer to store last next_states (allow for multiple for venv)
self.ns_idx_offset = self.body.env.num_envs if body.env.is_venv else 1
self.ns_buffer = deque(maxlen=self.ns_idx_offset)
# declare what data keys to store
self.data_keys = ['states', 'actions', 'rewards', 'next_states', 'dones']
self.reset()
def reset(self):
'''Initializes the memory arrays, size and head pointer'''
# set self.states, self.actions, ...
for k in self.data_keys:
if k != 'next_states': # reuse self.states
# list add/sample is over 10x faster than np, also simpler to handle
setattr(self, k, [None] * self.max_size)
self.size = 0
self.head = -1
self.ns_buffer.clear()
@lab_api
def update(self, state, action, reward, next_state, done):
'''Interface method to update memory'''
if self.body.env.is_venv:
for sarsd in zip(state, action, reward, next_state, done):
self.add_experience(*sarsd)
else:
self.add_experience(state, action, reward, next_state, done)
def add_experience(self, state, action, reward, next_state, done):
'''Implementation for update() to add experience to memory, expanding the memory size if necessary'''
# Move head pointer. Wrap around if necessary
self.head = (self.head + 1) % self.max_size
self.states[self.head] = state.astype(np.float16)
self.actions[self.head] = action
self.rewards[self.head] = reward
self.ns_buffer.append(next_state.astype(np.float16))
self.dones[self.head] = done
# Actually occupied size of memory
if self.size < self.max_size:
self.size += 1
self.seen_size += 1
# set to_train using memory counters head, seen_size instead of tick since clock will step by num_envs when on venv; to_train will be set to 0 after training step
algorithm = self.body.agent.algorithm
algorithm.to_train = algorithm.to_train or (self.seen_size > algorithm.training_start_step and self.head % algorithm.training_frequency == 0)
@lab_api
def sample(self):
'''
Returns a batch of batch_size samples. Batch is stored as a dict.
Keys are the names of the different elements of an experience. Values are an array of the corresponding sampled elements
e.g.
batch = {
'states' : states,
'actions' : actions,
'rewards' : rewards,
'next_states': next_states,
'dones' : dones}
'''
self.batch_idxs = self.sample_idxs(self.batch_size)
batch = {}
for k in self.data_keys:
if k == 'next_states':
batch[k] = sample_next_states(self.head, self.max_size, self.ns_idx_offset, self.batch_idxs, self.states, self.ns_buffer)
else:
batch[k] = util.batch_get(getattr(self, k), self.batch_idxs)
return batch
def sample_idxs(self, batch_size):
'''Batch indices a sampled random uniformly'''
batch_idxs = np.random.randint(self.size, size=batch_size)
if self.use_cer: # add the latest sample
batch_idxs[-1] = self.head
return batch_idxs
| 43.168831
| 170
| 0.646811
| 4,864
| 0.731649
| 0
| 0
| 1,225
| 0.184266
| 0
| 0
| 3,096
| 0.465704
|
18ee4afcda48045a6b4b58a5f641a2905cb15b51
| 1,958
|
py
|
Python
|
misc/docker/GenDockerfile.py
|
Wheest/atJIT
|
7e29862db7b5eb9cee470edeb165380f881903c9
|
[
"BSD-3-Clause"
] | 47
|
2018-08-03T09:15:08.000Z
|
2022-02-14T07:06:12.000Z
|
misc/docker/GenDockerfile.py
|
Wheest/atJIT
|
7e29862db7b5eb9cee470edeb165380f881903c9
|
[
"BSD-3-Clause"
] | 15
|
2018-06-18T19:50:50.000Z
|
2019-08-29T16:52:11.000Z
|
misc/docker/GenDockerfile.py
|
Wheest/atJIT
|
7e29862db7b5eb9cee470edeb165380f881903c9
|
[
"BSD-3-Clause"
] | 5
|
2018-08-28T02:35:44.000Z
|
2021-11-01T06:54:51.000Z
|
import yaml
import sys
Head = "# Dockerfile derived from easy::jit's .travis.yml"
From = "ubuntu:latest"
Manteiner = "Juan Manuel Martinez Caamaño jmartinezcaamao@gmail.com"
base_packages = ['build-essential', 'python', 'python-pip', 'git', 'wget', 'unzip', 'cmake']
travis = yaml.load(open(sys.argv[1]))
travis_sources = travis['addons']['apt']['sources']
travis_packages = travis['addons']['apt']['packages']
before_install = travis['before_install']
script = travis['script']
# I could not get a better way to do this
AddSourceCmd = {
"llvm-toolchain-trusty-6.0" : "deb http://apt.llvm.org/trusty/ llvm-toolchain-trusty-6.0 main | tee -a /etc/apt/sources.list > /dev/null",
"ubuntu-toolchain-r-test" : "apt-add-repository -y \"ppa:ubuntu-toolchain-r/test\""
}
Sources = ["RUN {cmd} \n".format(cmd=AddSourceCmd[source]) for source in travis_sources]
Apt = """# add sources
RUN apt-get update
RUN apt-get install -y software-properties-common
{AddSources}
# install apt packages, base first, then travis
RUN apt-get update
RUN apt-get upgrade -y
RUN apt-get install -y {base_packages} && \\
apt-get install -y {travis_packages}
""".format(AddSources = "".join(Sources), base_packages = " ".join(base_packages), travis_packages=" ".join(travis_packages))
Checkout = "RUN git clone --depth=50 --branch=${branch} https://github.com/jmmartinez/easy-just-in-time.git easy-just-in-time && cd easy-just-in-time\n"
BeforeInstall = "".join(["RUN cd /easy-just-in-time && {0} \n".format(cmd) for cmd in before_install])
Run = "RUN cd easy-just-in-time && \\\n" + "".join([" {cmd} && \\ \n".format(cmd=cmd) for cmd in script]) + " echo ok!"
Template = """{Head}
FROM {From}
LABEL manteiner {Manteiner}
ARG branch=master
{Apt}
# checkout
{Checkout}
# install other deps
{BeforeInstall}
# compile and test!
{Run}"""
print(Template.format(Head=Head, From=From, Manteiner=Manteiner, Apt=Apt, BeforeInstall=BeforeInstall, Checkout=Checkout, Run=Run))
| 35.6
| 152
| 0.704801
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,220
| 0.622767
|
18eebda43ebee826c1945694815a04fc15eb96ef
| 278
|
py
|
Python
|
howareyoutwitter/api/tasks.py
|
tyheise/how-are-you-twitter
|
1e4b938381e7d552486e981b0f696f330635ba82
|
[
"MIT"
] | 1
|
2019-10-24T20:47:24.000Z
|
2019-10-24T20:47:24.000Z
|
howareyoutwitter/api/tasks.py
|
tyheise/how-are-you-twitter
|
1e4b938381e7d552486e981b0f696f330635ba82
|
[
"MIT"
] | 12
|
2019-10-22T22:32:40.000Z
|
2021-01-07T05:13:25.000Z
|
howareyoutwitter/api/tasks.py
|
tyheise/how-are-you-twitter
|
1e4b938381e7d552486e981b0f696f330635ba82
|
[
"MIT"
] | 1
|
2020-01-02T22:28:52.000Z
|
2020-01-02T22:28:52.000Z
|
from api import models
from api.twitter_tools.tweet_seeker import TweetSeeker
def retrieve_tweets():
tokens = models.Token.objects.all()
try:
token = tokens[0]
except IndexError:
token = None
t_s = TweetSeeker(token)
t_s.run('#vancouver')
| 19.857143
| 54
| 0.672662
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 0.043165
|
18ef5021800d056c99fea4a85de29d3c6771923f
| 390
|
py
|
Python
|
examples/example1.py
|
wallrj/twisted-names-talk
|
d3098ab6745abd0d14bb0b6eef41727e5a89de1f
|
[
"MIT"
] | 2
|
2017-12-01T00:14:25.000Z
|
2020-07-01T00:27:44.000Z
|
examples/example1.py
|
wallrj/twisted-names-talk
|
d3098ab6745abd0d14bb0b6eef41727e5a89de1f
|
[
"MIT"
] | null | null | null |
examples/example1.py
|
wallrj/twisted-names-talk
|
d3098ab6745abd0d14bb0b6eef41727e5a89de1f
|
[
"MIT"
] | null | null | null |
from twisted.internet import task
from twisted.names import dns
def main(reactor):
proto = dns.DNSDatagramProtocol(controller=None)
reactor.listenUDP(0, proto)
d = proto.query(('8.8.8.8', 53), [dns.Query('www.example.com', dns.AAAA)])
d.addCallback(printResult)
return d
def printResult(res):
print 'ANSWERS: ', [a.payload for a in res.answers]
task.react(main)
| 24.375
| 78
| 0.697436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 37
| 0.094872
|
18f0e1c869c59304bc5b9379e901a05831726491
| 5,975
|
py
|
Python
|
utility.py
|
ying-wen/pmln
|
76d82dd620504ac00035d9d0dc9d752cd53518d4
|
[
"MIT"
] | 1
|
2019-09-10T16:42:34.000Z
|
2019-09-10T16:42:34.000Z
|
utility.py
|
ying-wen/pmln
|
76d82dd620504ac00035d9d0dc9d752cd53518d4
|
[
"MIT"
] | null | null | null |
utility.py
|
ying-wen/pmln
|
76d82dd620504ac00035d9d0dc9d752cd53518d4
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import numpy as np
import pandas as pd
from sklearn import metrics
class Options(object):
"""Options used by the model."""
def __init__(self):
# Model options.
# Embedding dimension.
self.embedding_size = 32
# The initial learning rate.
self.learning_rate = 1.
# Number of epochs to train. After these many epochs, the learning
# rate decays linearly to zero and the training stops.
self.epochs_to_train = 100
# Number of examples for one training step.
self.batch_size = 128
self.log_path = './ctr.log'
def read_file(path, infinite=True):
while True:
fi = open(path,'r')
for line in fi:
yield map(int,line.replace('\n', '').split(' '))
if infinite == False:
break
yield None
def ctr_batch_generator(opts, train=True):
if train:
file_reader = read_file(opts.train_path, True)
else:
file_reader = read_file(opts.test_path, False)
while True:
batch = np.ndarray(shape=(opts.batch_size, opts.sequence_length))
labels = np.ndarray(shape=(opts.batch_size))
for i in xrange(opts.batch_size):
single_sample = file_reader.next()
if single_sample is None:
break
target = single_sample[0]
temp = single_sample[1:opts.sequence_length]
if len(temp) < opts.sequence_length:
gap = opts.sequence_length - len(temp)
temp = np.array(temp + [0] * gap)
assert len(temp) == opts.sequence_length
batch[i] = temp
labels[i] = target
if len(labels) == opts.batch_size and single_sample is not None:
yield np.array(batch), labels
else:
break
def get_substitute_cate(sample, target_index, opts):
field_i = opts.fields_index_inverse.get(sample[target_index])
if field_i is None:
field_i = np.random.choice(opts.fields_index.keys(),1)[0]
field_cates = opts.fields_index[field_i]
rst = np.random.choice(field_cates,1)[0]
if len(field_cates) == 1:
rst = np.random.randint(opts.vocabulary_size)
return rst
def generate_fake_sample(temp, opts):
temp_sequence_length = len(temp)
temp = temp[0:opts.sequence_length]
if len(temp) < opts.sequence_length:
gap = opts.sequence_length - len(temp)
temp = np.array(temp + [0] * gap)
else:
temp_sequence_length = opts.sequence_length
assert len(temp) == opts.sequence_length
targets_to_avoid = set(temp)
indices_to_avoid = set()
substitute_index = np.random.randint(temp_sequence_length)
substitute_target = get_substitute_cate(temp, substitute_index, opts)
for _ in range(opts.substitute_num):
while substitute_index in indices_to_avoid:
substitute_index = np.random.randint(temp_sequence_length)
indices_to_avoid.add(substitute_index)
count = 0
while substitute_target in targets_to_avoid:
if count > 5:
break
substitute_target = get_substitute_cate(temp, substitute_index, opts)
count += 1
targets_to_avoid.add(substitute_target)
temp[substitute_index] = substitute_target
return temp
def generate_discriminant_batch(opts, is_train=True, rate=0.5):
data_index = 0
if is_train:
file_reader = read_file(opts.train_path)
else:
file_reader = read_file(opts.test_path)
while True:
batch = np.ndarray(shape=(opts.batch_size, opts.sequence_length))
labels = []
for i in xrange(opts.batch_size):
if np.random.random() > rate:
single_sample = file_reader.next()
temp = single_sample[1:opts.sequence_length]
if len(temp) < opts.sequence_length:
gap = opts.sequence_length - len(temp)
temp = np.array(temp + [0] * gap)
assert len(temp) == opts.sequence_length
batch[i] = temp
labels.append(1.)
else:
single_sample = file_reader.next()
temp = single_sample[1:opts.sequence_length]
batch[i] = generate_fake_sample(temp, opts)
labels.append(0.)
yield batch, np.array(labels)
def read_feat_index(opts):
vocabulary_size = 0
reverse_dictionary_raw = np.array(pd.read_csv(opts.featindex, sep='\t', header=None))
reverse_dictionary = {}
dictionary = {}
for item in reverse_dictionary_raw:
reverse_dictionary[int(item[1])] = item[0]
dictionary[item[0]] = int(item[1])
if item[1] > vocabulary_size:
vocabulary_size = item[1]
vocabulary_size = len(dictionary.keys())
print('vocabulary_size: ',vocabulary_size)
return reverse_dictionary, dictionary, vocabulary_size
def eval_auc(model, opts, target=None, get_prob=None):
testing_batch_generator = ctr_batch_generator(opts,train=False)
batch_num = 0
y = []
pred = []
for batch, labels in testing_batch_generator:
if target is None or get_prob is None:
probs = model.predict_proba(batch, batch_size=opts.batch_size, verbose=0)
else:
probs = get_prob([batch])[0]
y.extend(labels)
pred.extend([p[0] for p in probs])
batch_num += 1
fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=1)
auc = metrics.auc(fpr, tpr)
loss = metrics.log_loss(y, pred)
print("Total testing sample: ", len(y), " Positive sample: ", sum(y))
opts.auc = auc
opts.loss = loss
with open(opts.log_path, 'a') as f:
f.write(str(opts.__dict__)+'\r')
print("AUC:", auc, ', log loss: ', loss)
| 36.882716
| 89
| 0.60887
| 547
| 0.091548
| 2,269
| 0.379749
| 0
| 0
| 0
| 0
| 379
| 0.063431
|
18f0f41a4a703e23e45d0e7b9b74208ed5cbd775
| 1,294
|
py
|
Python
|
setup.py
|
jeremycline/crochet
|
ecfc22cefa90f3dfbafa71883c1470e7294f2b6d
|
[
"MIT"
] | null | null | null |
setup.py
|
jeremycline/crochet
|
ecfc22cefa90f3dfbafa71883c1470e7294f2b6d
|
[
"MIT"
] | null | null | null |
setup.py
|
jeremycline/crochet
|
ecfc22cefa90f3dfbafa71883c1470e7294f2b6d
|
[
"MIT"
] | 1
|
2020-01-25T18:00:31.000Z
|
2020-01-25T18:00:31.000Z
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import versioneer
def read(path):
"""
Read the contents of a file.
"""
with open(path) as f:
return f.read()
setup(
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
name='crochet',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Use Twisted anywhere!",
install_requires=[
"Twisted>=15.0",
"wrapt",
],
keywords="twisted threading",
license="MIT",
packages=["crochet", "crochet.tests"],
url="https://github.com/itamarst/crochet",
maintainer='Itamar Turner-Trauring',
maintainer_email='itamar@itamarst.org',
long_description=read('README.rst') + '\n' + read('docs/news.rst'),
)
| 28.130435
| 71
| 0.616692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 675
| 0.521638
|
18f12f8a5d648308d20dd8053de45efc7d50fb10
| 1,071
|
py
|
Python
|
polling_test.py
|
ngocdh236/pypusu
|
2453ca4236e4467d4fc0b7dea062ae195183b293
|
[
"MIT"
] | null | null | null |
polling_test.py
|
ngocdh236/pypusu
|
2453ca4236e4467d4fc0b7dea062ae195183b293
|
[
"MIT"
] | null | null | null |
polling_test.py
|
ngocdh236/pypusu
|
2453ca4236e4467d4fc0b7dea062ae195183b293
|
[
"MIT"
] | null | null | null |
from __future__ import division
from __future__ import print_function
from builtins import range
from past.utils import old_div
from pypusu.polling import PuSuClient
from time import sleep, time
if __name__ == "__main__":
print("Connecting")
c = PuSuClient("ws://127.0.0.1:55000")
count = 0
def listener(msg):
global count
count += 1
print("Authorizing")
c.authorize("foo")
print("Subscribing")
c.subscribe("channel.1", listener)
print("Waiting")
target = 500
start = time()
for i in range(1, target + 1):
c.publish("channel.1", {"foo": "bar"})
end = time()
elapsed = end - start
print("Sent {} messages in {:.3f}s, {:.2f}msg/s".format(
target,
elapsed,
old_div(target, elapsed)
))
sleep(1)
print("So far got {} messages, polling...".format(count))
c.poll()
print("After poll got {} messages, waiting for more...".format(count))
for i in range(0, 60):
sleep(1)
c.poll()
print("Got {} messages".format(count))
| 22.3125
| 74
| 0.601307
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 260
| 0.242764
|
18f2ad5a7c870598e6dec3394ee47ca770ec9558
| 3,289
|
py
|
Python
|
tests/test_nacl.py
|
intangere/NewHope_X25519_XSalsa20_Poly1305
|
459914e520bcb5aa207a11533ae217d50719307d
|
[
"MIT"
] | null | null | null |
tests/test_nacl.py
|
intangere/NewHope_X25519_XSalsa20_Poly1305
|
459914e520bcb5aa207a11533ae217d50719307d
|
[
"MIT"
] | 1
|
2021-06-21T03:07:13.000Z
|
2021-06-21T03:07:13.000Z
|
tests/test_nacl.py
|
intangere/NewHope_X25519_XSalsa20_Poly1305
|
459914e520bcb5aa207a11533ae217d50719307d
|
[
"MIT"
] | null | null | null |
# Import libnacl libs
import libnacl
import libnacl.utils
# Import python libs
import unittest
class TestPublic(unittest.TestCase):
'''
Test public functions
'''
def test_gen(self):
pk1, sk1 = libnacl.crypto_box_keypair()
pk2, sk2 = libnacl.crypto_box_keypair()
pk3, sk3 = libnacl.crypto_box_keypair()
self.assertEqual(len(pk1), libnacl.crypto_box_PUBLICKEYBYTES)
self.assertEqual(len(sk1), libnacl.crypto_box_PUBLICKEYBYTES)
self.assertEqual(len(pk2), libnacl.crypto_box_PUBLICKEYBYTES)
self.assertEqual(len(sk2), libnacl.crypto_box_PUBLICKEYBYTES)
self.assertEqual(len(pk3), libnacl.crypto_box_PUBLICKEYBYTES)
self.assertEqual(len(sk3), libnacl.crypto_box_PUBLICKEYBYTES)
self.assertNotEqual(pk1, sk1)
self.assertNotEqual(pk2, sk2)
self.assertNotEqual(pk3, sk3)
self.assertNotEqual(pk1, pk2)
self.assertNotEqual(pk1, pk3)
self.assertNotEqual(sk1, sk2)
self.assertNotEqual(sk2, sk3)
def test_box(self):
msg = b'Are you suggesting coconuts migrate?'
# run 1
nonce1 = libnacl.utils.rand_nonce()
pk1, sk1 = libnacl.crypto_box_keypair()
pk2, sk2 = libnacl.crypto_box_keypair()
enc_msg = libnacl.crypto_box(msg, nonce1, pk2, sk1)
self.assertNotEqual(msg, enc_msg)
clear_msg = libnacl.crypto_box_open(enc_msg, nonce1, pk1, sk2)
self.assertEqual(clear_msg, msg)
# run 2
nonce2 = libnacl.utils.rand_nonce()
pk3, sk3 = libnacl.crypto_box_keypair()
pk4, sk4 = libnacl.crypto_box_keypair()
enc_msg2 = libnacl.crypto_box(msg, nonce2, pk4, sk3)
self.assertNotEqual(msg, enc_msg2)
clear_msg2 = libnacl.crypto_box_open(enc_msg2, nonce2, pk3, sk4)
self.assertEqual(clear_msg2, msg)
# Check bits
self.assertNotEqual(nonce1, nonce2)
self.assertNotEqual(enc_msg, enc_msg2)
def test_boxnm(self):
msg = b'Are you suggesting coconuts migrate?'
# run 1
nonce1 = libnacl.utils.rand_nonce()
pk1, sk1 = libnacl.crypto_box_keypair()
pk2, sk2 = libnacl.crypto_box_keypair()
k1 = libnacl.crypto_box_beforenm(pk2, sk1)
k2 = libnacl.crypto_box_beforenm(pk1, sk2)
enc_msg = libnacl.crypto_box_afternm(msg, nonce1, k1)
self.assertNotEqual(msg, enc_msg)
clear_msg = libnacl.crypto_box_open_afternm(enc_msg, nonce1, k2)
self.assertEqual(clear_msg, msg)
def test_box_seal(self):
msg = b'Are you suggesting coconuts migrate?'
print(msg)
# run 1
pk, sk = libnacl.crypto_box_keypair()
enc_msg = libnacl.crypto_box_seal(msg, pk)
self.assertNotEqual(msg, enc_msg)
clear_msg = libnacl.crypto_box_seal_open(enc_msg, pk, sk)
self.assertEqual(clear_msg, msg)
print(clear_msg)
# run 2
pk2, sk2 = libnacl.crypto_box_keypair()
enc_msg2 = libnacl.crypto_box_seal(msg, pk2)
self.assertNotEqual(msg, enc_msg2)
clear_msg2 = libnacl.crypto_box_seal_open(enc_msg2, pk2, sk2)
self.assertEqual(clear_msg2, msg)
# Check bits
self.assertNotEqual(enc_msg, enc_msg2)
t = TestPublic()
t.test_box_seal()
| 38.244186
| 72
| 0.663728
| 3,155
| 0.959258
| 0
| 0
| 0
| 0
| 0
| 0
| 254
| 0.077227
|
18f2c7ccc01f817c8542ea8ba418a16fde40bf5a
| 2,815
|
py
|
Python
|
gui.py
|
flifloo/PyTchat
|
89e0305557cfedba7637f061184d020ac7f71eeb
|
[
"MIT"
] | 1
|
2019-07-27T08:43:05.000Z
|
2019-07-27T08:43:05.000Z
|
gui.py
|
flifloo/PyTchat
|
89e0305557cfedba7637f061184d020ac7f71eeb
|
[
"MIT"
] | 5
|
2019-07-19T15:11:16.000Z
|
2019-07-24T15:11:00.000Z
|
gui.py
|
flifloo/PyTchat
|
89e0305557cfedba7637f061184d020ac7f71eeb
|
[
"MIT"
] | null | null | null |
from tkinter import Tk, Frame, Scrollbar, Label, Text, Button, Entry, StringVar, IntVar, TclError
from tkinter.messagebox import showerror, showwarning
from client import Client
from threading import Thread
from socket import error as socket_error
destroy = False
def on_closing():
global destroy
destroy = True
try:
client.send_server("quit")
except TclError:
pass
finally:
try:
tchat.destroy()
except TclError:
pass
def start():
if host.get() and port.get():
try:
global client
client = Client(host.get(), port.get())
except (socket_error, ConnectionError):
showerror("Error", "Can't connect to server !")
else:
login.destroy()
def receive():
while True:
try:
msg = client.receive_server()
if msg.lower() == "quit" or not msg:
raise ConnectionError("Client quit")
except (socket_error, ConnectionError, AttributeError):
show_message("""}------------------------------{
/!\\ [Receive system offline] /!\\
Press Enter to exit
}------------------------------{""")
break
else:
show_message(msg)
def send(event=None):
try:
client.send_server(message.get())
if not receive_thread.is_alive() or message.get().lower() == "quit":
raise ConnectionError("Client quit")
except (socket_error, ConnectionError):
showwarning("Disconnected", "Disconnected from server")
on_closing()
else:
message.set("")
def show_message(msg):
if msg[-1:] != "\n":
msg += "\n"
if not destroy:
chat_message.configure(state="normal")
chat_message.insert("end", msg)
chat_message.configure(state="disable")
login = Tk()
login.title("Login")
host = StringVar()
port = IntVar()
Label(login, text="Host & port:").pack()
login_f = Frame(login)
login_f.pack()
Entry(login_f, textvariable=host, width=14).grid(row=0, column=0)
Entry(login_f, textvariable=port, width=4).grid(row=0, column=1)
Button(login, text="Submit", command=start).pack()
login.mainloop()
tchat = Tk()
tchat.title("PyTchat")
tchat.protocol("WM_DELETE_WINDOW", on_closing)
chat = Frame(tchat)
chat.pack()
scrollbar = Scrollbar(chat)
scrollbar.pack(side="right", fill="y")
chat_message = Text(chat, height=15, width=50, yscrollcommand=scrollbar.set, state="disable")
chat_message.pack(side="left", fill="both")
receive_thread = Thread(target=receive)
receive_thread.start()
entry = Frame(tchat)
entry.pack()
message = StringVar()
field = Entry(entry, textvariable=message)
field.bind("<Return>", send)
field.grid(row=0, column=0)
Button(entry, text="Send", command=send).grid(row=0, column=1)
tchat.mainloop()
| 27.067308
| 97
| 0.628064
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 379
| 0.134636
|
18f342f2a9acba64d1ea5575f081da8b2ad4064d
| 281
|
py
|
Python
|
nautobot_secrets_providers/urls.py
|
jifox/nautobot-plugin-secrets-providers
|
4d6ca51d0c78b4785f78909b04cf7c7b33c02e5d
|
[
"Apache-2.0"
] | 6
|
2021-12-22T21:26:12.000Z
|
2022-02-16T10:00:04.000Z
|
nautobot_secrets_providers/urls.py
|
jifox/nautobot-plugin-secrets-providers
|
4d6ca51d0c78b4785f78909b04cf7c7b33c02e5d
|
[
"Apache-2.0"
] | 9
|
2021-12-14T13:43:13.000Z
|
2022-03-29T18:49:55.000Z
|
nautobot_secrets_providers/urls.py
|
jifox/nautobot-plugin-secrets-providers
|
4d6ca51d0c78b4785f78909b04cf7c7b33c02e5d
|
[
"Apache-2.0"
] | 2
|
2022-02-04T19:11:09.000Z
|
2022-03-22T16:23:31.000Z
|
"""Django urlpatterns declaration for nautobot_secrets_providers plugin."""
from django.urls import path
from nautobot_secrets_providers import views
app_name = "nautobot_secrets_providers"
urlpatterns = [
path("", views.SecretsProvidersHomeView.as_view(), name="home"),
]
| 23.416667
| 75
| 0.786477
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 111
| 0.395018
|
18f380451d6001349051a85381a7ca31b31818f6
| 1,920
|
py
|
Python
|
nadlogar/quizzes/views.py
|
LenartBucar/nadlogar
|
2aba693254d56896419d09e066f91551492f8980
|
[
"MIT"
] | null | null | null |
nadlogar/quizzes/views.py
|
LenartBucar/nadlogar
|
2aba693254d56896419d09e066f91551492f8980
|
[
"MIT"
] | null | null | null |
nadlogar/quizzes/views.py
|
LenartBucar/nadlogar
|
2aba693254d56896419d09e066f91551492f8980
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.shortcuts import get_object_or_404, redirect, render
from .forms import QuizForm
from .models import Quiz
def _get_quiz_if_allowed(request, quiz_id):
quiz = get_object_or_404(
Quiz.objects.select_related("student_group__user"), id=quiz_id
)
if quiz.student_group.user == request.user:
return quiz
else:
raise PermissionDenied
@login_required
def create_quiz(request):
form = QuizForm(request.user, request.POST or request.GET or None)
if form.is_valid():
quiz: Quiz = form.save(commit=False)
if quiz.student_group.user == request.user:
quiz.save()
return redirect("quizzes:view_quiz", quiz_id=quiz.id)
else:
raise PermissionDenied
return render(request, "quizzes/create_quiz.html", {"form": form})
@login_required
def view_quiz(request, quiz_id: int):
quiz = _get_quiz_if_allowed(request, quiz_id)
return render(
request,
"quizzes/view_quiz.html",
{"quiz": quiz},
)
@login_required
def edit_quiz(request, quiz_id: int):
quiz = _get_quiz_if_allowed(request, quiz_id)
form = QuizForm(request.user, request.POST or None, instance=quiz)
if form.is_valid():
quiz: Quiz = form.save()
return redirect("quizzes:view_quiz", quiz_id=quiz.id)
return render(request, "quizzes/edit_quiz.html", {"form": form})
@login_required
def delete_quiz(request, quiz_id: int):
quiz = _get_quiz_if_allowed(request, quiz_id)
quiz.delete()
return redirect("homepage")
@login_required
def generate(request, quiz_id: int):
quiz = _get_quiz_if_allowed(request, quiz_id)
return render(
request,
"quizzes/generate.html",
{"quiz": quiz, "generated_problems": quiz.generate_everything()},
)
| 28.656716
| 73
| 0.690625
| 0
| 0
| 0
| 0
| 1,414
| 0.736458
| 0
| 0
| 210
| 0.109375
|
18f4895ff656c51b070791d34f8e28cf58f2c463
| 6,757
|
py
|
Python
|
cogs/vote.py
|
FFrost/CBot
|
aee077ee36462cfef14a3fb2fa5e3c1ffe741064
|
[
"MIT"
] | 4
|
2018-06-26T08:15:04.000Z
|
2019-10-09T22:49:38.000Z
|
cogs/vote.py
|
FFrost/CBot
|
aee077ee36462cfef14a3fb2fa5e3c1ffe741064
|
[
"MIT"
] | null | null | null |
cogs/vote.py
|
FFrost/CBot
|
aee077ee36462cfef14a3fb2fa5e3c1ffe741064
|
[
"MIT"
] | null | null | null |
import discord
from discord.ext import commands
import asyncio
import time
from enum import Enum
class VoteType(Enum):
POLL = 1
MUTE = 2
class Vote(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.emojis = {
"yes": "\N{WHITE HEAVY CHECK MARK}",
"no": "\N{NEGATIVE SQUARED CROSS MARK}"
}
# votes currently running
self.votes = {}
# dict of muted members where the id is the key
# and the time to unmute is the value
self.muted_members = {}
# background task to check when votes expire and when to unmute users
self.vote_task = self.bot.loop.create_task(self.vote_think())
# time in seconds to mute users
self.MUTE_TIME = 60
def cog_unload(self):
self.vote_task.cancel()
@commands.group(description="starts a vote",
brief="starts a vote")
async def vote(self, ctx):
if (not ctx.invoked_subcommand):
return
def make_mute_embed(self, author: discord.Member, target: discord.User, time: int):
embed = discord.Embed()
embed.title = f"Vote to mute {target}"
embed.color = discord.Color.red()
embed.set_author(name=author.name, icon_url=author.avatar_url)
embed.set_thumbnail(url=target.avatar_url)
if (time > 0):
embed.set_footer(text=f"Time remaining: {time}s")
else:
embed.set_footer(text="Time remaining: expired")
return embed
@vote.command(description="starts a vote to mute a user",
brief="starts a vote to mute a user")
async def mute(self, ctx, user: discord.Member):
if (user == ctx.me):
await ctx.send(f"{ctx.author.mention} nice try")
return
embed = self.make_mute_embed(ctx.author, user, self.MUTE_TIME)
vote_message = await ctx.channel.send(embed=embed)
for emoji in self.emojis.values():
await vote_message.add_reaction(emoji)
# add vote to vote list
self.votes[vote_message.id] = {
"time": time.time() + self.MUTE_TIME,
"votes": 0,
"message": vote_message,
"author": ctx.author,
"target": user,
"type": VoteType.MUTE
}
def is_valid_reaction(self, emoji: str, message: discord.Message, user: discord.User) -> bool:
if (user == self.bot.user):
return False
if (message.id not in self.votes):
return False
if (emoji not in self.emojis.values()):
return False
return True
@commands.Cog.listener()
async def on_reaction_add(self, reaction: discord.Reaction, user: discord.User):
message = reaction.message
emoji = reaction.emoji
if (not self.is_valid_reaction(emoji, message, user)):
return
if (emoji == self.emojis["yes"]):
self.votes[message.id]["votes"] += 1
elif (emoji == self.emojis["no"]):
self.votes[message.id]["votes"] -= 1
@commands.Cog.listener()
async def on_reaction_remove(self, reaction: discord.Reaction, user: discord.User):
message = reaction.message
emoji = reaction.emoji
if (not self.is_valid_reaction(emoji, message, user)):
return
# if the reaction is removed, reverse the vote
if (emoji == self.emojis["yes"]):
self.votes[message.id]["votes"] -= 1
elif (emoji == self.emojis["no"]):
self.votes[message.id]["votes"] += 1
async def handle_mute(self, message_id: str, vote: dict):
if (vote["time"] < time.time()):
await vote["message"].clear_reactions()
total = vote["votes"]
del self.votes[message_id]
embed = self.make_mute_embed(vote["author"], vote["target"], -1)
await vote["message"].edit(embed=embed)
if (total > 0):
await vote["message"].channel.send(f"{vote['author'].mention}'s vote to mute {vote['target'].mention} passed, muting them for {self.MUTE_TIME} seconds")
try:
await vote["target"].edit(mute=True)
except discord.Forbidden:
await vote["message"].channel.send("I don't have permissions to mute")
except discord.HTTPException as e:
await vote["message"].channel.send(f"HTTPException: `{e}`")
self.muted_members[vote["target"].id] = {
"time": time.time() + self.MUTE_TIME,
"channel": vote["message"].channel,
"member": vote["target"]
}
elif (total < 0):
await vote["message"].channel.send(f"{vote['author'].mention}'s vote to mute {vote['target'].mention} failed, no action taken")
else:
await vote["message"].channel.send(f"{vote['author'].mention}'s vote to mute {vote['target'].mention} tied, no action taken")
else:
embed = self.make_mute_embed(vote["author"], vote["target"], int(vote["time"] - time.time()))
await vote["message"].edit(embed=embed)
async def vote_think(self):
await self.bot.wait_until_ready()
while (not self.bot.is_closed()):
try:
vote_copy = self.votes.copy()
for message_id, vote in vote_copy.items():
if (vote["type"] == VoteType.MUTE):
await self.handle_mute(message_id, vote)
vote_copy.clear()
muted_members = self.muted_members.copy()
for member_id, muted_dict in muted_members.items():
if (muted_dict["time"] < time.time()):
await muted_dict["channel"].send(f"{muted_dict['member'].mention}'s mute expired, unmuting")
try:
await vote["target"].edit(mute=False)
except discord.Forbidden:
await muted_dict["channel"].send("I don't have permissions to unmute")
except discord.HTTPException as e:
await muted_dict["channel"].send(f"HTTPException: `{e}`")
except Exception as e:
await muted_dict["channel"].send(f"An error occured unmuting {vote['target']}: ```{e}```")
del self.muted_members[member_id]
except Exception as e:
self.bot.bot_utils.log_error_to_file(e)
await asyncio.sleep(10)
def setup(bot):
bot.add_cog(Vote(bot))
| 35.563158
| 168
| 0.557052
| 6,611
| 0.978393
| 0
| 0
| 1,928
| 0.285334
| 4,735
| 0.700755
| 1,396
| 0.206601
|
18f4a88074003325bea709addb8e527765d91168
| 5,227
|
py
|
Python
|
async_limits/storage/memcached.py
|
anomit/limits
|
a02d3234664d2b4da9968fd5ad25899ce106517a
|
[
"MIT"
] | 1
|
2021-06-21T13:51:56.000Z
|
2021-06-21T13:51:56.000Z
|
async_limits/storage/memcached.py
|
anomit/limits
|
a02d3234664d2b4da9968fd5ad25899ce106517a
|
[
"MIT"
] | null | null | null |
async_limits/storage/memcached.py
|
anomit/limits
|
a02d3234664d2b4da9968fd5ad25899ce106517a
|
[
"MIT"
] | null | null | null |
import inspect
import threading
import time
from six.moves import urllib
from ..errors import ConfigurationError
from ..util import get_dependency
from .base import Storage
class MemcachedStorage(Storage):
"""
Rate limit storage with memcached as backend.
Depends on the `pymemcache` library.
"""
MAX_CAS_RETRIES = 10
STORAGE_SCHEME = ["memcached"]
def __init__(self, uri, **options):
"""
:param str uri: memcached location of the form
`memcached://host:port,host:port`, `memcached:///var/tmp/path/to/sock`
:param options: all remaining keyword arguments are passed
directly to the constructor of :class:`pymemcache.client.base.Client`
:raise ConfigurationError: when `pymemcache` is not available
"""
parsed = urllib.parse.urlparse(uri)
self.hosts = []
for loc in parsed.netloc.strip().split(","):
if not loc:
continue
host, port = loc.split(":")
self.hosts.append((host, int(port)))
else:
# filesystem path to UDS
if parsed.path and not parsed.netloc and not parsed.port:
self.hosts = [parsed.path]
self.library = options.pop('library', 'pymemcache.client')
self.cluster_library = options.pop('library', 'pymemcache.client.hash')
self.client_getter = options.pop('client_getter', self.get_client)
self.options = options
if not get_dependency(self.library):
raise ConfigurationError(
"memcached prerequisite not available."
" please install %s" % self.library
) # pragma: no cover
self.local_storage = threading.local()
self.local_storage.storage = None
def get_client(self, module, hosts, **kwargs):
"""
returns a memcached client.
:param module: the memcached module
:param hosts: list of memcached hosts
:return:
"""
return (
module.HashClient(hosts, **kwargs)
if len(hosts) > 1 else module.Client(*hosts, **kwargs)
)
def call_memcached_func(self, func, *args, **kwargs):
if 'noreply' in kwargs:
argspec = inspect.getargspec(func)
if not ('noreply' in argspec.args or argspec.keywords):
kwargs.pop('noreply') # noqa
return func(*args, **kwargs)
@property
def storage(self):
"""
lazily creates a memcached client instance using a thread local
"""
if not (
hasattr(self.local_storage, "storage")
and self.local_storage.storage
):
self.local_storage.storage = self.client_getter(
get_dependency(
self.cluster_library if len(self.hosts) > 1
else self.library
),
self.hosts, **self.options
)
return self.local_storage.storage
def get(self, key):
"""
:param str key: the key to get the counter value for
"""
return int(self.storage.get(key) or 0)
def clear(self, key):
"""
:param str key: the key to clear rate async_limits for
"""
self.storage.delete(key)
def incr(self, key, expiry, elastic_expiry=False):
"""
increments the counter for a given rate limit key
:param str key: the key to increment
:param int expiry: amount in seconds for the key to expire in
:param bool elastic_expiry: whether to keep extending the rate limit
window every hit.
"""
if not self.call_memcached_func(
self.storage.add, key, 1, expiry, noreply=False
):
if elastic_expiry:
value, cas = self.storage.gets(key)
retry = 0
while (
not self.call_memcached_func(
self.storage.cas, key,
int(value or 0) + 1, cas, expiry
) and retry < self.MAX_CAS_RETRIES
):
value, cas = self.storage.gets(key)
retry += 1
self.call_memcached_func(
self.storage.set,
key + "/expires",
expiry + time.time(),
expire=expiry,
noreply=False
)
return int(value or 0) + 1
else:
return self.storage.incr(key, 1)
self.call_memcached_func(
self.storage.set,
key + "/expires",
expiry + time.time(),
expire=expiry,
noreply=False
)
return 1
def get_expiry(self, key):
"""
:param str key: the key to get the expiry for
"""
return int(float(self.storage.get(key + "/expires") or time.time()))
def check(self):
"""
check if storage is healthy
"""
try:
self.call_memcached_func(self.storage.get, 'limiter-check')
return True
except: # noqa
return False
| 32.465839
| 79
| 0.543524
| 5,049
| 0.965946
| 0
| 0
| 572
| 0.109432
| 0
| 0
| 1,568
| 0.299981
|
18f6a37e4dfb35bf57b4cd1ecadb7071de8cbf6b
| 4,617
|
py
|
Python
|
floreal/views/view_purchases.py
|
caracole-io/circuitscourts
|
4e9279226373ae41eb4d0e0f37f84f12197f34ff
|
[
"MIT"
] | null | null | null |
floreal/views/view_purchases.py
|
caracole-io/circuitscourts
|
4e9279226373ae41eb4d0e0f37f84f12197f34ff
|
[
"MIT"
] | null | null | null |
floreal/views/view_purchases.py
|
caracole-io/circuitscourts
|
4e9279226373ae41eb4d0e0f37f84f12197f34ff
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
from django.http import HttpResponse, HttpResponseForbidden
from django.shortcuts import render_to_response
from django.contrib.auth.decorators import login_required
from caracole import settings
from .decorators import nw_admin_required
from .getters import get_delivery, get_subgroup
from . import latex
from .spreadsheet import spreadsheet
from .delivery_description import delivery_description
MIME_TYPE = {
'pdf': "application/pdf",
'xlsx': "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"}
def non_html_response(name_bits, name_extension, content):
"""Common helper to serve PDF and Excel content."""
filename = ("_".join(name_bits) + "." + name_extension).replace(" ", "_")
mime_type = MIME_TYPE[name_extension]
response = HttpResponse(content_type=mime_type)
response['Content-Disposition'] = 'attachment; filename="%s"' % filename
response.write(content)
return response
@login_required()
def view_purchases_html(request, delivery, subgroup=None):
"""View purchases for a given delivery, possibly restricted to a subgroup. (subgroup) staff only."""
dv = get_delivery(delivery)
if subgroup:
sg = get_subgroup(subgroup)
if request.user not in sg.staff.all() and request.user not in sg.network.staff.all():
return HttpResponseForbidden("Réservé aux admins")
subgroups = [sg]
else:
if request.user not in dv.network.staff.all():
return HttpResponseForbidden("Réservé aux admins")
subgroups = dv.network.subgroup_set.all()
return render_to_response('view_purchases.html', delivery_description(dv, subgroups))
@login_required()
def view_purchases_xlsx(request, delivery, subgroup=None):
"""View purchases for a given delivery as an MS-Excel spreadsheet, possibly restricted to a subgroup.
(subgroup) staff only."""
dv = get_delivery(delivery)
if subgroup:
sg = get_subgroup(subgroup)
if request.user not in sg.staff.all() and request.user not in sg.network.staff.all():
return HttpResponseForbidden("Réservé aux admins")
subgroups = [sg]
else:
if request.user not in dv.network.staff.all():
return HttpResponseForbidden("Réservé aux admins")
subgroups = dv.network.subgroup_set.all()
return non_html_response((dv.network.name, dv.name), "xlsx", spreadsheet(dv, subgroups))
@login_required()
def view_purchases_latex(request, delivery, subgroup=None):
"""View purchases for a given delivery as a PDF table, generated through LaTeX, possibly restricted to a subgroup.
(subgroup) staff only."""
dv = get_delivery(delivery)
if subgroup:
sg = get_subgroup(subgroup)
if request.user not in sg.staff.all() and request.user not in sg.network.staff.all():
return HttpResponseForbidden("Réservé aux admins")
content = latex.subgroup(dv, sg)
name_bits = (dv.network.name, dv.name, sg.name)
else:
if request.user not in dv.network.staff.all():
return HttpResponseForbidden("Réservé aux admins")
content = latex.delivery_table(dv)
name_bits = (dv.network.name, dv.name)
return non_html_response(name_bits, "pdf", content)
@login_required()
def view_cards_latex(request, delivery, subgroup=None):
"""View purchases for a given delivery as a PDF table, generated through LaTeX, possibly restricted to a subgroup.
Subgroups are presented as ready-to-cut tables, whole deliveries as list per subgroup. (subgroup) staff only."""
dv = get_delivery(delivery)
if subgroup:
sg = get_subgroup(subgroup)
if request.user not in sg.staff.all() and request.user not in sg.network.staff.all():
return HttpResponseForbidden("Réservé aux admins")
content = latex.cards(dv, sg)
name_bits = (dv.network.name, dv.name, sg.name)
else:
content = latex.delivery_cards(dv)
name_bits = (dv.network.name, dv.name)
return non_html_response(name_bits, "pdf", content)
@nw_admin_required(lambda a: get_delivery(a['delivery']).network)
def get_archive(request, delivery, suffix):
"""Retrieve the PDF/MS-Excel file versions of a terminated delivery which have been saved upontermination."""
dv = get_delivery(delivery)
file_name = os.path.join(settings.DELIVERY_ARCHIVE_DIR, "dv-%d.%s" % (dv.id, suffix))
with open(file_name) as f:
content = f.read()
name_bits = (dv.network.name, dv.name)
return non_html_response(name_bits, suffix, content)
| 41.972727
| 118
| 0.706953
| 0
| 0
| 0
| 0
| 3,623
| 0.782336
| 0
| 0
| 1,172
| 0.253077
|
18f75103fffe006c35337768f20ad10b43a5b636
| 411
|
py
|
Python
|
hack_today_2017/web/web_time_solver.py
|
runsel/CTF_Writeups
|
df3d8469b981265d4d43bfc90e75075a95acb1dd
|
[
"MIT"
] | 4
|
2019-01-07T03:15:45.000Z
|
2021-01-10T04:58:15.000Z
|
hack_today_2017/web/web_time_solver.py
|
runsel/CTF_Writeups
|
df3d8469b981265d4d43bfc90e75075a95acb1dd
|
[
"MIT"
] | null | null | null |
hack_today_2017/web/web_time_solver.py
|
runsel/CTF_Writeups
|
df3d8469b981265d4d43bfc90e75075a95acb1dd
|
[
"MIT"
] | 3
|
2018-10-21T19:17:34.000Z
|
2020-07-07T08:58:25.000Z
|
import requests
charset = "abcdefghijklmnopqrstuvwxyz0123456789_{}"
password = "HackToday{"
url = "http://sawah.ittoday.web.id:40137/"
while(password[-1]!="}"):
for i in charset:
r = requests.get(url)
payload = {'password': password+i, 'submit': 'Submit+Query'}
r = requests.post(url, data=payload)
if r.status_code==302:
password+=i
print password
| 27.4
| 68
| 0.615572
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 124
| 0.301703
|
18f9f056fd0c54a5b1e0f0f03ecf846e53698354
| 484
|
py
|
Python
|
mayan/__init__.py
|
sneha-rk/drawings-version-control
|
4e5a2bf0fd8b8026f1d3d56917b5be4b5c7be497
|
[
"Apache-2.0"
] | 1
|
2021-05-14T18:40:37.000Z
|
2021-05-14T18:40:37.000Z
|
mayan/__init__.py
|
sneha-rk/drawings-version-control
|
4e5a2bf0fd8b8026f1d3d56917b5be4b5c7be497
|
[
"Apache-2.0"
] | null | null | null |
mayan/__init__.py
|
sneha-rk/drawings-version-control
|
4e5a2bf0fd8b8026f1d3d56917b5be4b5c7be497
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
<<<<<<< HEAD
__title__ = 'Mayan EDMS'
__version__ = '2.7.3'
__build__ = 0x020703
=======
__title__ = 'IITH DVC'
__version__ = '2.7.2'
__build__ = 0x020702
>>>>>>> 4cedd41ab6b9750abaebc35d1970556408d83cf5
__author__ = 'Roberto Rosario'
__author_email__ = 'roberto.rosario@mayan-edms.com'
__description__ = 'Free Open Source Electronic Document Management System'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2011-2016 Roberto Rosario'
| 28.470588
| 74
| 0.760331
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 190
| 0.392562
|
18fa914340e673af7a09db0d4d032b0e04e6bdee
| 5,728
|
py
|
Python
|
ldt/utils/usaf/bcsd_preproc/lib_bcsd_metrics/BCSD_function.py
|
rkim3/LISF
|
afaf6a228d2b29a1d26111acc951204f0b436387
|
[
"Apache-2.0"
] | 67
|
2018-11-13T21:40:54.000Z
|
2022-02-23T08:11:56.000Z
|
ldt/utils/usaf/bcsd_preproc/lib_bcsd_metrics/BCSD_function.py
|
dmocko/LISF
|
08d024d6d5fe66db311e43e78740842d653749f4
|
[
"Apache-2.0"
] | 679
|
2018-11-13T20:10:29.000Z
|
2022-03-30T19:55:25.000Z
|
ldt/utils/usaf/bcsd_preproc/lib_bcsd_metrics/BCSD_function.py
|
dmocko/LISF
|
08d024d6d5fe66db311e43e78740842d653749f4
|
[
"Apache-2.0"
] | 119
|
2018-11-08T15:53:35.000Z
|
2022-03-28T10:16:01.000Z
|
from __future__ import division
import pandas as pd
import numpy as np
import calendar
import os.path as op
import sys
from datetime import datetime
from dateutil.relativedelta import relativedelta
from scipy.stats import percentileofscore
from scipy.stats import scoreatpercentile, pearsonr
from math import *
import time
from BCSD_stats_functions import *
import xarray as xr
import os, errno
def CALC_BCSD(OBS_CLIM_ALL, FCST_CLIM_ALL, LEAD_FINAL, TARGET_FCST_VAL_ARR, TARGET_FCST_SYR, TARGET_FCST_EYR, FCST_SYR, ENS_NUM, MON, MONTH_NAME, count_grid, BC_VAR, TINY):
CORRECT_FCST_COARSE = np.ones(((TARGET_FCST_EYR-TARGET_FCST_SYR)+1, LEAD_FINAL, ENS_NUM))*-999
for LEAD_NUM in range(0, LEAD_FINAL): ## Loop from lead =0 to Final Lead
TARGET_MONTH = MON + LEAD_NUM; ## This is the target forecast month
## Check for the cases when the target forecast month is in the next year (e.g. February 1983 forecast initialized in December 1982)
if (TARGET_MONTH>12):
TARGET_MONTH-=12 #subtracting 12 so 13 becomes 1 meaning the month of January and so on.
## Just checking if the lead and target month combination is working as expected
if (count_grid==0): #Only printing the following for the first grid cell, no need to repeat
print ("Initial forecast month is {} Lead is {} and Target month is {}".format(MONTH_NAME, LEAD_NUM, calendar.month_name[TARGET_MONTH]))
# Retriving Observed and forecast time series for given target month
OBS_QUANT_TS, OBS_CLIM_TS = OBS_CLIM_ALL[0, :], OBS_CLIM_ALL[TARGET_MONTH, :] ## Note that the first column is quantile time series
FCST_QUANT_TS, FCST_CLIM_TS = FCST_CLIM_ALL[0, :], FCST_CLIM_ALL[LEAD_NUM+1, :] ## Note that the first column is quantile time series
## Now calculating mean, standard deviation and skew of both observed and forecast time series
obs_mean, obs_sd, obs_skew = Calc_Stats(OBS_CLIM_TS, TINY)
fcst_mean, fcst_sd, fcst_skew = Calc_Stats(FCST_CLIM_TS, TINY)
#obs_mean, obs_sd, obs_skew = Calc_Stats(OBS_CLIM_TS.values, TINY)
#fcst_mean, fcst_sd, fcst_skew = Calc_Stats(FCST_CLIM_TS.values, TINY)
## Ok, now getting started on the bias correction
## Note that bias correction is done seprately for each ensemble member of all years
for fcst_yr in range(TARGET_FCST_SYR-FCST_SYR, (TARGET_FCST_EYR-FCST_SYR)+1):
for ens_num in range (0, ENS_NUM):
TARGET_FCST_VAL = TARGET_FCST_VAL_ARR[fcst_yr, LEAD_NUM, ens_num]
## First determine the quantile for given target forecast value
TARGET_FCST_QUANT = lookup(TARGET_FCST_VAL, FCST_CLIM_TS, FCST_QUANT_TS, len(FCST_CLIM_TS), BC_VAR, 'QUAN', fcst_mean, fcst_sd, fcst_skew, TINY);
#TARGET_FCST_QUANT = lookup(TARGET_FCST_VAL, FCST_CLIM_TS.values, FCST_QUANT_TS.values, len(FCST_CLIM_TS.values), BC_VAR, 'QUAN', fcst_mean, fcst_sd, fcst_skew, TINY);
## Also note that QUAN helps the the function lookup determine if we are trying to convert a value to quantile or VICE versa
## For converting a value to quantile use 'QUAN' for converting quantile to value use 'DATA'
## Now using the quantile above determine the corresponding value from the observed climatology
BIAS_CORRECTED_VALUE = lookup(TARGET_FCST_QUANT, OBS_QUANT_TS, OBS_CLIM_TS, len(OBS_CLIM_TS), BC_VAR, 'DATA', obs_mean, obs_sd, obs_skew, TINY);
#BIAS_CORRECTED_VALUE = lookup(TARGET_FCST_QUANT, OBS_QUANT_TS.values, OBS_CLIM_TS.values, len(OBS_CLIM_TS.values), BC_VAR, 'DATA', obs_mean, obs_sd, obs_skew, TINY);
if (BC_VAR=='PRCP') and (BIAS_CORRECTED_VALUE<0): ## This is just a hack to check we are not getting negative value of precipitation
print (TARGET_FCST_VAL, TARGET_FCST_QUANT, fcst_yr, LEAD_NUM, ens_num)
## Now storing the bias corrected anomaly
CORRECT_FCST_COARSE[fcst_yr, LEAD_NUM, ens_num] = BIAS_CORRECTED_VALUE
return CORRECT_FCST_COARSE
def latlon_calculations(ilat_min, ilat_max, ilon_min, ilon_max, nlats, nlons, \
np_OBS_CLIM_ARRAY, np_FCST_CLIM_ARRAY, \
LEAD_FINAL, TARGET_FCST_EYR, TARGET_FCST_SYR, FCST_SYR, ENS_NUM, MON, \
MONTH_NAME, BC_VAR, TINY, FCST_COARSE):
CORRECT_FCST_COARSE = np.ones(((TARGET_FCST_EYR-TARGET_FCST_SYR)+1, LEAD_FINAL, ENS_NUM, nlats, nlons))*-999
num_lats = ilat_max-ilat_min+1
num_lons = ilon_max-ilon_min+1
print("num_lats = ", num_lats, np_OBS_CLIM_ARRAY.shape)
print("num_lons = ", num_lons, FCST_COARSE.shape)
for ilat in range(num_lats):
lat_num = ilat_min + ilat
for ilon in range(num_lons):
lon_num = ilon_min + ilon
count_grid = ilon + ilat*num_lons
OBS_CLIM_ALL = np_OBS_CLIM_ARRAY[:, :, ilat, ilon]
FCST_CLIM_ALL = np_FCST_CLIM_ARRAY[:, :, ilat, ilon]
TARGET_FCST_VAL_ARR = FCST_COARSE[:, :, :, lat_num, lon_num]
CORRECT_FCST_COARSE[:, :, :, lat_num, lon_num] = CALC_BCSD(OBS_CLIM_ALL, FCST_CLIM_ALL, LEAD_FINAL, \
TARGET_FCST_VAL_ARR, TARGET_FCST_SYR, \
TARGET_FCST_EYR, FCST_SYR, ENS_NUM, MON, \
MONTH_NAME, count_grid, BC_VAR, TINY)
return CORRECT_FCST_COARSE
| 61.591398
| 191
| 0.662884
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,900
| 0.331704
|
18fd4c8c14d7b745e7af13adc4fd4221571ac4a2
| 1,212
|
py
|
Python
|
charybde/parsers/dump_parser.py
|
m09/charybde
|
3f8d7d17ed7b9df4bc42743bbd953f61bc807b81
|
[
"Apache-2.0"
] | 1
|
2020-03-12T12:58:30.000Z
|
2020-03-12T12:58:30.000Z
|
charybde/parsers/dump_parser.py
|
m09/charybde
|
3f8d7d17ed7b9df4bc42743bbd953f61bc807b81
|
[
"Apache-2.0"
] | 24
|
2019-10-28T07:21:19.000Z
|
2020-04-13T22:38:37.000Z
|
charybde/parsers/dump_parser.py
|
m09/charybde
|
3f8d7d17ed7b9df4bc42743bbd953f61bc807b81
|
[
"Apache-2.0"
] | null | null | null |
from bz2 import BZ2File
from pathlib import Path
from queue import Queue
from threading import Thread
from typing import Any, Callable, Dict, Iterator, List, Tuple
from xmltodict import parse as xmltodict_parse
def parse(dump: Path) -> Iterator[Dict[str, Any]]:
def filter(path: List[Tuple[str, Dict[str, str]]], item: Dict[str, Any]) -> bool:
return (
len(path) == 2
and path[1][0] == "page"
and item["ns"] == "0"
and "redirect" not in item
)
queue: Queue = Queue()
thread = Thread(target=_parse_dump, args=(dump, queue, filter))
thread.start()
while True:
item = queue.get()
if item is None:
break
yield item
def _parse_dump(
dump: Path,
output_queue: Queue,
filter_callable: Callable[[List[Tuple[str, Dict[str, str]]], Dict[str, Any]], bool],
) -> None:
def handler(path: List[Tuple[str, Dict[str, str]]], item: Dict[str, Any]) -> bool:
if filter_callable(path, item):
output_queue.put_nowait(item)
return True
with BZ2File(str(dump)) as fh:
xmltodict_parse(fh, item_depth=2, item_callback=handler)
output_queue.put(None)
| 28.857143
| 88
| 0.615512
| 0
| 0
| 520
| 0.429043
| 0
| 0
| 0
| 0
| 23
| 0.018977
|
18fdbb6a59afbc92dbdea6d53c5bce95efda434c
| 5,321
|
py
|
Python
|
server/py/camera.py
|
sreyas/Attendance-management-system
|
eeb57bcc942f407151b71bfab528e817c6806c74
|
[
"MIT"
] | null | null | null |
server/py/camera.py
|
sreyas/Attendance-management-system
|
eeb57bcc942f407151b71bfab528e817c6806c74
|
[
"MIT"
] | null | null | null |
server/py/camera.py
|
sreyas/Attendance-management-system
|
eeb57bcc942f407151b71bfab528e817c6806c74
|
[
"MIT"
] | null | null | null |
import cv2
import sys,json,numpy as np
import glob,os
import face_recognition
import datetime
from pathlib import Path
from pymongo import MongoClient
from flask_mongoengine import MongoEngine
from bson.objectid import ObjectId
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
client = MongoClient(port=27017)
db=client.GetMeThrough;
home = str(os.path.dirname(os.path.abspath(__file__))) + "/../../"
known_encodings_file_path = home + "/data/known_encodings_file.csv"
people_file_path = home + "/data/people_file.csv"
known_encodings_file = Path(known_encodings_file_path)
if known_encodings_file.is_file():
known_encodings = np.genfromtxt(known_encodings_file, delimiter=',')
else:
known_encodings = []
people_file = Path(people_file_path)
if people_file.is_file():
people = np.genfromtxt(people_file, dtype='U',delimiter=',')
else:
people = []
class VideoCamera(object):
def __init__(self):
# Using OpenCV to capture from device 0. If you have trouble capturing
# from a webcam, comment the line below out and use a video file
# instead.
camera = db.addconfigurations.find_one({'_id': ObjectId("5aaa4d382ca2233631b55ab4") })
self.video = cv2.VideoCapture(camera['configuration'])
# If you decide to use video.mp4, you must have this file in the folder
# as the main.py.
# self.video = cv2.VideoCapture('video.mp4')
def __del__(self):
self.video.release()
def compare_faces(self ,detectimage):
face_locations = face_recognition.face_locations(detectimage)
face_encodings = face_recognition.face_encodings(detectimage, face_locations)
match =[]
for face_encoding in face_encodings:
match = face_recognition.compare_faces(known_encodings, face_encoding)
return match
def get_name(self,peoplename):
collection = db['profiles']
cursor = collection.find()
for document in cursor:
profileimagepath = document['imagepath'];
category = document['category'];
imagecsv = profileimagepath.split('known_people/');
filename = imagecsv[1].split('.');
imagefilename = filename[0];
if(peoplename == imagefilename ):
usercategory = db.user_categories.find_one({'_id': ObjectId(category) })
text = usercategory['Category']
return text
else:
return "Unknown"
def insertattendance(self,peoplename):
collection = db['profiles']
cursor = collection.find()
for document in cursor:
profileimagepath = document['imagepath'];
category = document['category'];
user = document['user'];
imagecsv = profileimagepath.split('known_people/');
filename = imagecsv[1].split('.');
imagefilename = filename[0];
if(peoplename == imagefilename):
current_date =datetime.datetime.now()
attendance= {"user":user,"date_time" :str(current_date)}
date_format = "%Y-%m-%d %H:%M:%S.%f"
attendance_system = db.attendance.find({"user": user})
res = [col.encode('utf8') if isinstance(col, unicode) else col for col in attendance_system]
if not res:
db.attendances.insert_one(attendance).inserted_id
else:
for attendance_doc in res:
date_time = attendance_doc['date_time']
time1 = datetime.datetime.strptime(date_time.encode('utf8'), date_format)
time2 = datetime.datetime.strptime(str(datetime.datetime.now()), date_format)
diff = time2 - time1
minutes = (diff.seconds) / 60
if(minutes >=30):
db.attendances.insert_one(attendance).inserted_id
def get_frame(self):
success, image = self.video.read()
# We are using Motion JPEG, but OpenCV defaults to capture raw images,
# so we must encode it into JPEG in order to correctly display the
# video stream.
faces = face_cascade.detectMultiScale(image, 1.3, 5)
for (x, y, w, h) in faces:
match = self.compare_faces(image);
name = "Unknown"
for i in range(len(match)):
if match[i]:
face_detect_name = self.get_name(people[i])
name = face_detect_name
self.insertattendance(people[i])
color = (0, 255, 0)
break;
if "Unknown" in name:
color = (0, 0, 255)
name = "Unknown"
if "Blacklist" in name:
color = (0, 0, 0)
name = "Blacklist"
cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(image, name,(x + w, y + h), font, 1.0, (255,255,255), 1)
crop_img = image[y: y + h, x: x + w]
cv2.imwrite(home + "/data/face.jpg", crop_img)
ret, jpeg = cv2.imencode('.jpg', image)
img_str = jpeg.tostring();
return jpeg.tobytes()
| 41.248062
| 100
| 0.595001
| 4,432
| 0.832926
| 0
| 0
| 0
| 0
| 0
| 0
| 850
| 0.159744
|
18fe1679223211eeb9c906c7f88442b62f5fd7cf
| 929
|
py
|
Python
|
scgrn/src/utils.py
|
Fassial/nibs-intern
|
493a340f431c11712723db89476cae4056c6ef5b
|
[
"MIT"
] | null | null | null |
scgrn/src/utils.py
|
Fassial/nibs-intern
|
493a340f431c11712723db89476cae4056c6ef5b
|
[
"MIT"
] | null | null | null |
scgrn/src/utils.py
|
Fassial/nibs-intern
|
493a340f431c11712723db89476cae4056c6ef5b
|
[
"MIT"
] | null | null | null |
###################################
# Created on 22:20, Nov. 16th, 2020
# Author: fassial
# Filename: utils.py
###################################
# dep
import os
import pandas as pd
import scanpy as sp
from collections import defaultdict
# local dep
# macro
# def get_data_lm func
def get_data_lm(sce_fname, sparse = False):
# read sce
sce = sp.read_loom(
sce_fname,
sparse = sparse
)
return sce.to_df()
# def get_data_csv func
def get_data_csv(sce_fname):
# read sce
sce = pd.read_csv(sce_fname,
sep = ',',
header = 0,
index_col = 0
)
return sce
# def UTILS_GET_DATA_FUNC dict
UTILS_GET_DATA_FUNC = defaultdict(lambda : get_data_csv, {
".loom": get_data_lm,
".csv": get_data_csv
})
# def get_data func
def get_data(sce_fname):
sce = UTILS_GET_DATA_FUNC[os.path.splitext(sce_fname)[1]](
sce_fname = sce_fname
)
return sce
| 19.765957
| 62
| 0.603875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 295
| 0.317546
|
18feec8ad8d14751af185b1bf50263837f32d416
| 1,376
|
py
|
Python
|
PQencryption/pub_key/pk_signature/quantum_vulnerable/signing_Curve25519_PyNaCl.py
|
OleMussmann/PQencryption
|
e9a550e285c4b5145210425fbaa2cac338f3d266
|
[
"Apache-2.0"
] | null | null | null |
PQencryption/pub_key/pk_signature/quantum_vulnerable/signing_Curve25519_PyNaCl.py
|
OleMussmann/PQencryption
|
e9a550e285c4b5145210425fbaa2cac338f3d266
|
[
"Apache-2.0"
] | null | null | null |
PQencryption/pub_key/pk_signature/quantum_vulnerable/signing_Curve25519_PyNaCl.py
|
OleMussmann/PQencryption
|
e9a550e285c4b5145210425fbaa2cac338f3d266
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 10 16:26:41 CEST 2017
@author: BMMN
"""
import gc # garbage collector
import nacl.signing
import nacl.encoding
def sign(signing_key, message):
return signing_key.sign(message)
def key_gen():
signing_key = nacl.signing.SigningKey.generate()
verify_key = signing_key.verify_key
return signing_key, verify_key
if __name__ == "__main__":
# This in an example. In production, you would want to read the key from an
# external file or the command line. The key must be 32 bytes long.
# DON'T DO THIS IN PRODUCTION!
signing_key, verify_key = key_gen()
message = 'This is my message.'
print("message : " + message)
# signing
signed = sign(signing_key, message)
verify_key_hex = verify_key.encode(encoder=nacl.encoding.HexEncoder)
print("signed: " + signed)
print("verify_key_hex: " + verify_key_hex)
# verification
verify_key = nacl.signing.VerifyKey(verify_key_hex,
encoder=nacl.encoding.HexEncoder)
print()
print("verification positive:")
print(verify_key.verify(signed))
print()
print("verification negative:")
print(verify_key.verify("0"*len(signed)))
# make sure all memory is flushed after operations
del signing_key
del signed
del message
del verify_key
del verify_key_hex
gc.collect()
| 25.018182
| 75
| 0.699855
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 495
| 0.359738
|
18ff8d36aadc1e7329aa5016280d4db4c68e6086
| 17,187
|
py
|
Python
|
app.py
|
otsaloma/bort-proxy
|
28ac4ab2c249d4a47f71a4e39cf21c44d2fdf991
|
[
"MIT"
] | 2
|
2016-10-02T01:33:24.000Z
|
2016-12-12T09:20:06.000Z
|
app.py
|
otsaloma/bort-proxy
|
28ac4ab2c249d4a47f71a4e39cf21c44d2fdf991
|
[
"MIT"
] | 2
|
2019-12-15T20:17:09.000Z
|
2020-12-28T01:10:26.000Z
|
app.py
|
otsaloma/bort-proxy
|
28ac4ab2c249d4a47f71a4e39cf21c44d2fdf991
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Osmo Salomaa
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import base64
import bs4
import cairosvg
import contextlib
import dotenv
import flask
import functools
import imghdr
import io
import json
import os
import pickle
import PIL.Image
import random
import re
import requests
import traceback
import tweepy
import unicodedata
import urllib.parse
import xml.etree.ElementTree as ET
dotenv.load_dotenv()
FALLBACK_PNG = open("letter-icons/x.png", "rb").read()
LINK_REL_PATTERNS = [
re.compile("^apple-touch-icon$"),
re.compile("^apple-touch-icon-precomposed$"),
re.compile("^icon$"),
re.compile("^shortcut icon$"),
]
app = flask.Flask(__name__)
blacklist = set()
if app.config["ENV"] == "production":
import redis
cache = redis.from_url(os.environ["REDISCLOUD_URL"])
else:
import redislite
cache = redislite.Redis()
# Cache HTTP connections for better performance.
# https://urllib3.readthedocs.io/en/latest/advanced-usage.html#customizing-pool-behavior
adapter = requests.adapters.HTTPAdapter(pool_connections=10,
pool_maxsize=100,
max_retries=0,
pool_block=False)
rs = requests.Session()
rs.headers = {"User-Agent": "Mozilla/5.0"}
rs.mount("http://", adapter)
rs.mount("https://", adapter)
@app.route("/facebook-icon")
def facebook_icon():
"""Return a downscaled Facebook profile image."""
user = flask.request.args["user"]
size = int(flask.request.args["size"])
format = flask.request.args.get("format", "png")
key = "facebook-icon:{}:{:d}".format(user, size)
if cache.exists(key):
print("Found in cache: {}".format(key))
image, ttl = get_from_cache(key)
return make_response(image, format, ttl)
url = "https://graph.facebook.com/{user}/picture?type=large"
url = url.format(user=urllib.parse.quote(user))
try:
print("Requesting {}".format(url))
image = request_image(url, max_size=5)
image = resize_image(image, size)
if imghdr.what(None, image) != "png":
raise ValueError("Non-PNG data received")
cache.set(key, image, ex=rex(3, 5))
return make_response(image, format)
except Exception as error:
print("Error requesting {}: {}".format(
flask.request.full_path, str(error)))
image = resize_image(FALLBACK_PNG, size)
cache.set(key, image, ex=7200)
return make_response(image, format, 7200)
@app.route("/favicon")
def favicon():
"""Return a 16x16 favicon for website."""
domain = flask.request.args["url"]
domain = re.sub("/.*$", "", re.sub("^.*?://", "", domain))
format = flask.request.args.get("format", "png")
key = "favicon:{}".format(domain)
if cache.exists(key):
print("Found in cache: {}".format(key))
image, ttl = get_from_cache(key)
return make_response(image, format, ttl)
url = "https://www.google.com/s2/favicons?domain={domain}"
url = url.format(domain=urllib.parse.quote(domain))
try:
print("Requesting {}".format(url))
image = request_image(url, max_size=1)
if imghdr.what(None, image) != "png":
raise ValueError("Non-PNG data received")
cache.set(key, image, ex=rex(3, 5))
return make_response(image, format)
except Exception as error:
print("Error requesting {}: {}".format(
flask.request.full_path, str(error)))
image = resize_image(FALLBACK_PNG, 16)
cache.set(key, image, ex=7200)
return make_response(image, format, 7200)
def find_icons(url):
"""Yield icon entries specified in the HTML HEAD of `url`."""
url, page = get_page(url)
soup = bs4.BeautifulSoup(page, "html.parser")
for pattern in LINK_REL_PATTERNS:
for tag in soup.find_all("link", dict(rel=pattern)):
href = urllib.parse.urljoin(url, tag.attrs["href"])
size = tag.attrs.get("sizes", "0x0")
if size == "any":
size = "1000x1000"
yield dict(url=href, size=int(size.split("x")[0]))
# Fall back on looking for icons at the server root.
join = lambda x: urllib.parse.urljoin(url, x)
yield dict(url=join("/apple-touch-icon.png"), fallback=True)
yield dict(url=join("/apple-touch-icon-precomposed.png"), fallback=True)
def get_cache_control(max_age):
"""Return a Cache-Control header for `max_age`."""
return "public, max-age={:d}".format(max_age)
def get_from_cache(key):
"""Return value, ttl for `key` from cache."""
return cache.get(key), cache.ttl(key)
def get_letter(url):
"""Return letter to represent `url`."""
if "://" not in url:
url = "http://{}".format(url)
url = urllib.parse.urlparse(url).netloc
url = url.split(".")
url = url[-2] if len(url) > 1 else url[0]
return url[0].lower() if url else "x"
@functools.lru_cache(256)
def get_letter_icon(letter):
"""Return letter icon PNG bytes for `url`."""
fname = "letter-icons/{}.png".format(letter)
if os.path.isfile(fname):
with open(fname, "rb") as f:
return f.read()
name = unicodedata.name(letter)
name = name.lower().replace(" ", "-")
fname = "letter-icons/{}.png".format(name)
if os.path.isfile(fname):
with open(fname, "rb") as f:
return f.read()
return FALLBACK_PNG
def get_page(url, timeout=15):
"""Return evaluated `url`, HTML page as text."""
if "://" in url:
response = rs.get(url, timeout=timeout)
response.raise_for_status()
return response.url, response.text
for scheme in ("https", "http"):
with silent(Exception):
return get_page("{}://{}".format(scheme, url))
raise Exception("Failed to get page")
@functools.lru_cache(1)
def get_twitter_api():
"""Return Twitter API object."""
key = os.environ["TWITTER_API_KEY"]
secret = os.environ["TWITTER_API_SECRET"]
auth = tweepy.AppAuthHandler(key, secret)
return tweepy.API(auth)
@app.route("/google-search-suggestions")
def google_search_suggestions():
"""Return a JSON array of Google search suggestions for query."""
query = flask.request.args["query"]
lang = flask.request.args.get("lang", "en")
key = "google-search-suggestions:{}:{}".format(query, lang)
if cache.exists(key):
print("Found in cache: {}".format(key))
data, ttl = get_from_cache(key)
return make_response(pickle.loads(data), "json", ttl)
url = "https://suggestqueries.google.com/complete/search?output=toolbar&q={query}&hl={lang}"
url = url.format(query=urllib.parse.quote_plus(query), lang=lang)
try:
print("Requesting {}".format(url))
response = rs.get(url, timeout=5)
response.raise_for_status()
root = ET.fromstring(response.text)
suggestions = [x.get("data") for x in root.iter("suggestion")]
cache.set(key, pickle.dumps(suggestions), ex=3600)
return make_response(suggestions, "json")
except Exception as error:
print("Error requesting {}: {}".format(
flask.request.full_path, str(error)))
cache.set(key, pickle.dumps([]), ex=3600)
return make_response([], "json", 3600)
@app.route("/icon")
def icon():
"""Return apple-touch-icon or favicon for website."""
url = flask.request.args["url"]
size = int(flask.request.args["size"])
format = flask.request.args.get("format", "png")
key = "icon:{}:{:d}".format(url, size)
if cache.exists(key):
print("Found in cache: {}".format(key))
image, ttl = get_from_cache(key)
return make_response(image, format, ttl)
try:
print("Parsing {}".format(url))
icons = list(find_icons(url))
icons.sort(key=lambda x: x.get("size", 0) or 1000)
except Exception as error:
print("Error parsing {}: {}".format(
flask.request.full_path, str(error)))
icons = []
for icon in icons:
# Ignore icons with a known size less than requested.
icon.setdefault("size", 0)
if 0 < icon["size"] < size: continue
try:
print("Requesting {}".format(icon["url"]))
image = request_image(icon["url"])
if not is_svg(image):
with PIL.Image.open(io.BytesIO(image)) as pi:
if min(pi.width, pi.height) < size: continue
image = resize_image(image, size)
if imghdr.what(None, image) != "png":
raise ValueError("Non-PNG data received")
cache.set(key, image, ex=rex(3, 5))
return make_response(image, format)
except Exception as error:
print("Error requesting {}: {}".format(
icon["url"], str(error)))
# Fall back on letter icons for domain.
image = get_letter_icon(get_letter(url))
image = resize_image(image, size)
cache.set(key, image, ex=rex(3, 5))
return make_response(image, format)
@app.route("/icons")
def icons():
"""Return JSON listing of icons for website."""
url = flask.request.args["url"]
key = "icons:{}".format(url)
if cache.exists(key):
print("Found in cache: {}".format(key))
data, ttl = get_from_cache(key)
return make_response(pickle.loads(data), "json", ttl)
try:
print("Parsing {}".format(url))
icons = list(find_icons(url))
except Exception as error:
print("Error parsing {}: {}".format(
flask.request.full_path, str(error)))
icons = []
for i in list(range(len(icons) - 1, -1, -1)):
if icons[i].get("size", 1) < 1: del icons[i]["size"]
if icons[i].get("fallback", False): del icons[i]
data = dict(icons=icons)
cache.set(key, pickle.dumps(data), ex=300)
return make_response(data, "json", 300)
@app.route("/image")
def image():
"""Return a downscaled image read from URL."""
url = flask.request.args["url"]
size = int(flask.request.args["size"])
format = flask.request.args.get("format", "png")
key = "image:{}:{:d}".format(url, size)
if cache.exists(key):
print("Found in cache: {}".format(key))
image, ttl = get_from_cache(key)
return make_response(image, format, ttl)
try:
print("Requesting {}".format(url))
image = request_image(url, max_size=1)
image = resize_image(image, size)
if imghdr.what(None, image) != "png":
raise ValueError("Non-PNG data received")
cache.set(key, image, ex=rex(3, 5))
return make_response(image, format)
except Exception as error:
print("Error requesting {}: {}".format(
flask.request.full_path, str(error)))
image = resize_image(FALLBACK_PNG, size)
cache.set(key, image, ex=7200)
return make_response(image, format, 7200)
def is_svg(image):
return (isinstance(image, str) and
image.lstrip().startswith("<svg"))
def make_response(data, format, max_age=None):
"""Return response 200 for `data` as `format`."""
if format == "base64":
text = base64.b64encode(data)
max_age = max_age or random.randint(1, 3) * 86400
return flask.Response(text, 200, {
"Access-Control-Allow-Origin": "*",
"Content-Type": "text/plain",
"Content-Encoding": "UTF-8",
"Content-Length": str(len(text)),
"Cache-Control": get_cache_control(max_age),
})
if format == "json":
text = json.dumps(data, ensure_ascii=False)
max_age = max_age or 3600
return flask.Response(text, 200, {
"Access-Control-Allow-Origin": "*",
"Content-Type": "application/json",
"Content-Encoding": "UTF-8",
"Content-Length": str(len(text)),
"Cache-Control": get_cache_control(max_age),
})
if format == "png":
max_age = max_age or random.randint(1, 3) * 86400
return flask.Response(data, 200, {
"Access-Control-Allow-Origin": "*",
"Content-Type": "image/png",
"Content-Length": str(len(data)),
"Cache-Control": get_cache_control(max_age),
})
def request_image(url, max_size=1, timeout=15):
"""Request and return image at `url` at most `max_size` MB."""
# Avoid getting caught reading insanely large files.
# http://docs.python-requests.org/en/master/user/advanced/#body-content-workflow
if url in blacklist:
raise ValueError("URL blacklisted")
max_size = max_size * 1024 * 1024
with contextlib.closing(rs.get(
url, timeout=timeout, stream=True)) as response:
response.raise_for_status()
if ("content-length" in response.headers and
response.headers["content-length"].isdigit() and
int(response.headers["content-length"]) > max_size):
raise ValueError("Too large")
content_type = response.headers.get("content-type", "").lower()
if url.endswith(".svg") or content_type == "image/svg+xml":
# SVG, return as string.
image = response.text
if len(image) > max_size:
blacklist.add(url)
raise ValueError("Too large")
return image
# Raster, return as bytes.
image = response.raw.read(max_size+1, decode_content=True)
if len(image) > max_size:
blacklist.add(url)
raise ValueError("Too large")
return image
def resize_image(image, size):
"""Resize `image` to `size` and return PNG bytes."""
if is_svg(image):
image = cairosvg.svg2png(bytestring=image.encode("utf-8"),
output_width=size,
output_height=size)
with PIL.Image.open(io.BytesIO(image)) as pi:
if pi.mode not in ("RGB", "RGBA"):
pi = pi.convert("RGBA")
pi.thumbnail((size, size), PIL.Image.BICUBIC)
if pi.width != pi.height:
# Add transparent margins to make a square image.
bg = PIL.Image.new("RGBA", (size, size), (255, 255, 255, 0))
bg.paste(pi, ((size - pi.width) // 2, (size - pi.height) // 2))
pi = bg
out = io.BytesIO()
pi.save(out, "PNG")
return out.getvalue()
def rex(a, b):
"""Return a random amount of seconds between a and b days."""
return random.randint(int(a*86400), int(b*86400))
@contextlib.contextmanager
def silent(*exceptions, tb=False):
"""Try to execute body, ignoring `exceptions`."""
try:
yield
except exceptions:
if tb: traceback.print_exc()
@app.route("/twitter-icon")
def twitter_icon():
"""Return a downscaled Twitter profile image."""
user = flask.request.args["user"]
size = int(flask.request.args["size"])
format = flask.request.args.get("format", "png")
key = "twitter-icon:{}:{:d}".format(user, size)
if cache.exists(key):
print("Found in cache: {}".format(key))
image, ttl = get_from_cache(key)
return make_response(image, format, ttl)
try:
api = get_twitter_api()
user_object = api.get_user(user)
url = user_object.profile_image_url_https
# Remove size variant to get the full "original" image.
# https://developer.twitter.com/en/docs/accounts-and-users/user-profile-images-and-banners
url = re.sub(r"_([^/_.]+)(\.\w+)$", r"\2", url)
print("Found profile image URL {}".format(url))
image = request_image(url, max_size=5)
image = resize_image(image, size)
if imghdr.what(None, image) != "png":
raise ValueError("Non-PNG data received")
cache.set(key, image, ex=rex(3, 5))
return make_response(image, format)
except Exception as error:
print("Error requesting {}: {}".format(
flask.request.full_path, str(error)))
image = resize_image(FALLBACK_PNG, size)
cache.set(key, image, ex=7200)
return make_response(image, format, 7200)
| 38.535874
| 98
| 0.615872
| 0
| 0
| 926
| 0.053878
| 9,446
| 0.549601
| 0
| 0
| 4,986
| 0.290103
|
18ffb685c2a877f7f518f970f9a6eafbcd304771
| 2,099
|
py
|
Python
|
apps/comments/migrations/0001_initial.py
|
puertoricanDev/horas
|
28597af13409edd088a71143d2f4c94cd7fd83f5
|
[
"MIT"
] | 10
|
2015-01-18T02:39:35.000Z
|
2021-11-09T22:53:10.000Z
|
apps/comments/migrations/0001_initial.py
|
puertoricanDev/horas
|
28597af13409edd088a71143d2f4c94cd7fd83f5
|
[
"MIT"
] | 52
|
2015-03-02T17:46:23.000Z
|
2022-02-10T13:23:11.000Z
|
apps/comments/migrations/0001_initial.py
|
puertoricanDev/horas
|
28597af13409edd088a71143d2f4c94cd7fd83f5
|
[
"MIT"
] | 7
|
2015-03-02T01:23:35.000Z
|
2021-11-09T22:58:39.000Z
|
# Generated by Django 1.10.6 on 2017-03-13 04:46
# Modified by Raúl Negrón on 2019-06-22 16:48
import django.db.models.deletion
import django.utils.timezone
from django.conf import settings
from django.db import migrations, models
import apps.core.models
class Migration(migrations.Migration):
initial = True
dependencies = [
("contenttypes", "0002_remove_content_type_name"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Comment",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"date_created",
apps.core.models.DateTimeCreatedField(
blank=True, default=django.utils.timezone.now, editable=False
),
),
(
"date_modified",
apps.core.models.DateTimeModifiedField(
blank=True, default=django.utils.timezone.now, editable=False
),
),
("object_id", models.PositiveIntegerField()),
("comment", models.TextField()),
(
"content_type",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="contenttypes.ContentType",
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="users",
to=settings.AUTH_USER_MODEL,
),
),
],
options={"ordering": ("date_created",)},
)
]
| 31.328358
| 85
| 0.45212
| 1,839
| 0.875297
| 0
| 0
| 0
| 0
| 0
| 0
| 283
| 0.134698
|
18ffb7e91b90c1915102493dee2fe7ea4b7d621d
| 9,607
|
py
|
Python
|
IRIS_data_download/IRIS_download_support/obspy/io/nied/knet.py
|
earthinversion/Fnet_IRIS_data_automated_download
|
09a6e0c992662feac95744935e038d1c68539fa1
|
[
"MIT"
] | 2
|
2020-03-05T01:03:01.000Z
|
2020-12-17T05:04:07.000Z
|
IRIS_data_download/IRIS_download_support/obspy/io/nied/knet.py
|
earthinversion/Fnet_IRIS_data_automated_download
|
09a6e0c992662feac95744935e038d1c68539fa1
|
[
"MIT"
] | 4
|
2021-03-31T19:25:55.000Z
|
2021-12-13T20:32:46.000Z
|
IRIS_data_download/IRIS_download_support/obspy/io/nied/knet.py
|
earthinversion/Fnet_IRIS_data_automated_download
|
09a6e0c992662feac95744935e038d1c68539fa1
|
[
"MIT"
] | 2
|
2020-09-08T19:33:40.000Z
|
2021-04-05T09:47:50.000Z
|
# -*- coding: utf-8 -*-
"""
obspy.io.nied.knet - K-NET/KiK-net read support for ObsPy
=========================================================
Reading of the K-NET and KiK-net ASCII format as defined on
http://www.kyoshin.bosai.go.jp.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA @UnusedWildImport
import re
import numpy as np
from obspy import UTCDateTime, Stream, Trace
from obspy.core.trace import Stats
class KNETException(Exception):
pass
def _buffer_proxy(filename_or_buf, function, reset_fp=True,
file_mode="rb", *args, **kwargs):
"""
Calls a function with an open file or file-like object as the first
argument. If the file originally was a filename, the file will be
opened, otherwise it will just be passed to the underlying function.
:param filename_or_buf: File to pass.
:type filename_or_buf: str, open file, or file-like object.
:param function: The function to call.
:param reset_fp: If True, the file pointer will be set to the initial
position after the function has been called.
:type reset_fp: bool
:param file_mode: Mode to open file in if necessary.
"""
try:
position = filename_or_buf.tell()
is_buffer = True
except AttributeError:
is_buffer = False
if is_buffer is True:
ret_val = function(filename_or_buf, *args, **kwargs)
if reset_fp:
filename_or_buf.seek(position, 0)
return ret_val
else:
with open(filename_or_buf, file_mode) as fh:
return function(fh, *args, **kwargs)
def _is_knet_ascii(filename_or_buf):
"""
Checks if the file is a valid K-NET/KiK-net ASCII file.
:param filename_or_buf: File to test.
:type filename_or_buf: str or file-like object.
"""
try:
return _buffer_proxy(filename_or_buf, _internal_is_knet_ascii,
reset_fp=True)
# Happens for example when passing the data as a string which would be
# interpreted as a filename.
except (OSError, UnicodeDecodeError):
return False
def _internal_is_knet_ascii(buf):
"""
Checks if the file is a valid K-NET/KiK-net ASCII file.
:param buf: File to read.
:type buf: Open file or open file like object.
"""
first_string = buf.read(11).decode()
# File has less than 11 characters
if len(first_string) != 11:
return False
if first_string == 'Origin Time':
return True
return False
def _prep_hdr_line(name, line):
"""
Helper function to check the contents of a header line and split it.
:param name: String that the line should start with.
:type name: str
:param line: Line to check and split.
:type line: str
"""
if not line.startswith(name):
raise KNETException("Expected line to start with %s but got %s "
% (name, line))
else:
return line.split()
def _read_knet_hdr(hdrlines, convert_stnm=False, **kwargs):
"""
Read the header values into a dictionary.
:param hdrlines: List of the header lines of a a K-NET/KiK-net ASCII file
:type hdrlines: list
:param convert_stnm: For station names with 6 letters write the last two
letters of the station code to the 'location' field
:type convert_stnm: bool
"""
hdrdict = {'knet': {}}
hdrnames = ['Origin Time', 'Lat.', 'Long.', 'Depth. (km)', 'Mag.',
'Station Code', 'Station Lat.', 'Station Long.',
'Station Height(m)', 'Record Time', 'Sampling Freq(Hz)',
'Duration Time(s)', 'Dir.', 'Scale Factor', 'Max. Acc. (gal)',
'Last Correction', 'Memo.']
_i = 0
# Event information
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
dt = flds[2] + ' ' + flds[3]
dt = UTCDateTime.strptime(dt, '%Y/%m/%d %H:%M:%S')
# All times are in Japanese standard time which is 9 hours ahead of UTC
dt -= 9 * 3600.
hdrdict['knet']['evot'] = dt
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
lat = float(flds[1])
hdrdict['knet']['evla'] = lat
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
lon = float(flds[1])
hdrdict['knet']['evlo'] = lon
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
dp = float(flds[2])
hdrdict['knet']['evdp'] = dp
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
mag = float(flds[1])
hdrdict['knet']['mag'] = mag
# Station information
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
# K-NET and KiK-Net station names can be more than 5 characters long
# which will cause the station name to be truncated when writing the
# the trace as miniSEED; if convert_stnm is enabled, the last two
# letters of the station code are written to the 'location' field
stnm = flds[2]
location = ''
if convert_stnm and len(stnm) > 5:
location = stnm[-2:]
stnm = stnm[:-2]
if len(stnm) > 7:
raise KNETException(
"Station name can't be more than 7 characters long!")
hdrdict['station'] = stnm
hdrdict['location'] = location
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
hdrdict['knet']['stla'] = float(flds[2])
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
hdrdict['knet']['stlo'] = float(flds[2])
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
hdrdict['knet']['stel'] = float(flds[2])
# Data information
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
dt = flds[2] + ' ' + flds[3]
# A 15 s delay is added to the record time by the
# the K-NET and KiK-Net data logger
dt = UTCDateTime.strptime(dt, '%Y/%m/%d %H:%M:%S') - 15.0
# All times are in Japanese standard time which is 9 hours ahead of UTC
dt -= 9 * 3600.
hdrdict['starttime'] = dt
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
freqstr = flds[2]
m = re.search('[0-9]*', freqstr)
freq = int(m.group())
hdrdict['sampling_rate'] = freq
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
hdrdict['knet']['duration'] = float(flds[2])
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
channel = flds[1].replace('-', '')
kiknetcomps = {'1': 'NS1', '2': 'EW1', '3': 'UD1',
'4': 'NS2', '5': 'EW2', '6': 'UD2'}
if channel.strip() in kiknetcomps.keys(): # kiknet directions are 1-6
channel = kiknetcomps[channel.strip()]
hdrdict['channel'] = channel
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
eqn = flds[2]
num, denom = eqn.split('/')
num = float(re.search('[0-9]*', num).group())
denom = float(denom)
# convert the calibration from gal to m/s^2
hdrdict['calib'] = 0.01 * num / denom
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
acc = float(flds[3])
hdrdict['knet']['accmax'] = acc
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
dt = flds[2] + ' ' + flds[3]
dt = UTCDateTime.strptime(dt, '%Y/%m/%d %H:%M:%S')
# All times are in Japanese standard time which is 9 hours ahead of UTC
dt -= 9 * 3600.
hdrdict['knet']['last correction'] = dt
# The comment ('Memo') field is optional
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
if len(flds) > 1:
hdrdict['knet']['comment'] = ' '.join(flds[1:])
if len(hdrlines) != _i + 1:
raise KNETException("Expected %d header lines but got %d"
% (_i + 1, len(hdrlines)))
return hdrdict
def _read_knet_ascii(filename_or_buf, **kwargs):
"""
Reads a K-NET/KiK-net ASCII file and returns an ObsPy Stream object.
.. warning::
This function should NOT be called directly, it registers via the
ObsPy :func:`~obspy.core.stream.read` function, call this instead.
:param filename: K-NET/KiK-net ASCII file to be read.
:type filename: str or file-like object.
"""
return _buffer_proxy(filename_or_buf, _internal_read_knet_ascii, **kwargs)
def _internal_read_knet_ascii(buf, **kwargs):
"""
Reads a K-NET/KiK-net ASCII file and returns an ObsPy Stream object.
.. warning::
This function should NOT be called directly, it registers via the
ObsPy :func:`~obspy.core.stream.read` function, call this instead.
:param buf: File to read.
:type buf: Open file or open file like object.
"""
data = []
hdrdict = {}
cur_pos = buf.tell()
buf.seek(0, 2)
size = buf.tell()
buf.seek(cur_pos, 0)
# First read the headerlines
headerlines = []
while buf.tell() < size:
line = buf.readline().decode()
headerlines.append(line)
if line.startswith('Memo'):
hdrdict = _read_knet_hdr(headerlines, **kwargs)
break
while buf.tell() < size:
line = buf.readline()
parts = line.strip().split()
data += [float(p) for p in parts]
hdrdict['npts'] = len(data)
# The FDSN network code for the National Research Institute for Earth
# Science and Disaster Prevention (NEID JAPAN) is BO (Bosai-Ken Network)
hdrdict['network'] = 'BO'
data = np.array(data)
stats = Stats(hdrdict)
trace = Trace(data, header=stats)
return Stream([trace])
if __name__ == '__main__':
import doctest
doctest.testmod(exclude_empty=True)
| 31.498361
| 78
| 0.613407
| 40
| 0.004164
| 0
| 0
| 0
| 0
| 0
| 0
| 4,214
| 0.438638
|
7a0036f8904ef04950506fa3bb65a2bb9ab285ce
| 159
|
py
|
Python
|
great_expectations/dataset/__init__.py
|
avanderm/great_expectations
|
e4619a890700a492441a7ed3cbb9e5abb0953268
|
[
"Apache-2.0"
] | 1
|
2021-01-10T18:00:06.000Z
|
2021-01-10T18:00:06.000Z
|
great_expectations/dataset/__init__.py
|
avanderm/great_expectations
|
e4619a890700a492441a7ed3cbb9e5abb0953268
|
[
"Apache-2.0"
] | null | null | null |
great_expectations/dataset/__init__.py
|
avanderm/great_expectations
|
e4619a890700a492441a7ed3cbb9e5abb0953268
|
[
"Apache-2.0"
] | null | null | null |
from .base import Dataset
from .pandas_dataset import MetaPandasDataset, PandasDataset
from .sqlalchemy_dataset import MetaSqlAlchemyDataset, SqlAlchemyDataset
| 53
| 72
| 0.886792
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
7a00d530de18db23fd30cafb2ab4bd712d82beb0
| 379
|
py
|
Python
|
app/main/routes.py
|
theambidextrous/digitalemployeeapp
|
2c8b593a590621a34c1fa033a720f1e412c76b96
|
[
"MIT"
] | null | null | null |
app/main/routes.py
|
theambidextrous/digitalemployeeapp
|
2c8b593a590621a34c1fa033a720f1e412c76b96
|
[
"MIT"
] | null | null | null |
app/main/routes.py
|
theambidextrous/digitalemployeeapp
|
2c8b593a590621a34c1fa033a720f1e412c76b96
|
[
"MIT"
] | null | null | null |
from flask import Blueprint, jsonify, request, redirect, abort, url_for, render_template
main = Blueprint('main', __name__)
# routes
@main.route('/', methods = ['GET'])
def Abort():
return redirect(url_for('main.index'))
# abort(403)
@main.route('/default.tpl', methods = ['GET'])
def index():
title = 'DE App'
return render_template('dflt.html', title = title)
| 29.153846
| 88
| 0.672823
| 0
| 0
| 0
| 0
| 243
| 0.641161
| 0
| 0
| 84
| 0.221636
|