hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
482dddc4b95b4877167dc37f7f640ecf5a8978b8
| 13,304
|
py
|
Python
|
zfs/dnode.py
|
hiliev/py-zfs-recovery
|
ec3bb3316b28b91c197993d8c0a0803d4ab39605
|
[
"BSD-3-Clause"
] | 14
|
2017-07-09T19:25:14.000Z
|
2020-07-18T11:58:36.000Z
|
zfs/dnode.py
|
hiliev/py-zfs-recovery
|
ec3bb3316b28b91c197993d8c0a0803d4ab39605
|
[
"BSD-3-Clause"
] | 8
|
2018-03-24T08:58:47.000Z
|
2021-01-20T17:18:37.000Z
|
zfs/dnode.py
|
hiliev/py-zfs-recovery
|
ec3bb3316b28b91c197993d8c0a0803d4ab39605
|
[
"BSD-3-Clause"
] | 2
|
2018-03-17T23:16:35.000Z
|
2018-04-14T10:06:04.000Z
|
# Copyright (c) 2017 Hristo Iliev <github@hiliev.eu>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import struct, datetime
from zfs.blockptr import BlockPtr
from zfs.obj_desc import DMU_TYPE_DESC
BLKPTR_OFFSET = 64
class BonusDataset:
def __init__(self, data):
(self.ds_dir_obj, self.ds_prev_snap_obj, self.ds_prev_snap_txg, self.ds_prev_next_obj, self.ds_snapnames_zapobj,
self.ds_num_children, self.ds_creation_time, self.ds_creation_txg, self.ds_deadlist_obj, self.ds_used_bytes,
self.ds_compressed_bytes, self.ds_uncompressed_bytes, self.ds_unique_bytes, self.ds_fsid_guid, self.ds_guid,
self.ds_restoring) = struct.unpack("=16Q", data[:16*8])
self.bptr = BlockPtr()
self.bptr.parse(data[16*8:16*8+128])
def __str__(self):
fields = [
'ds_dir_obj',
'ds_prev_snap_obj',
'ds_prev_snap_txg',
'ds_prev_next_obj',
'ds_snapnames_zapobj',
'ds_num_children',
'ds_creation_time',
'ds_creation_txg',
'ds_deadlist_obj',
'ds_used_bytes',
'ds_compressed_bytes',
'ds_uncompressed_bytes',
'ds_unique_bytes',
'ds_fsid_guid',
'ds_guid',
'ds_restoring',
'ds_bp'
]
fmt = ' '.join([f + '={}' for f in fields])
return fmt.format(
self.ds_dir_obj,
self.ds_prev_snap_obj,
self.ds_prev_snap_txg,
self.ds_prev_next_obj,
self.ds_snapnames_zapobj,
self.ds_num_children,
self.ds_creation_time,
self.ds_creation_txg,
self.ds_deadlist_obj,
self.ds_used_bytes,
self.ds_compressed_bytes,
self.ds_uncompressed_bytes,
self.ds_unique_bytes,
self.ds_fsid_guid,
self.ds_guid,
self.ds_restoring,
self.bptr
)
class BonusDirectory:
def __init__(self, data):
(
self.dd_creation_time,
self.dd_head_dataset_obj,
self.dd_parent_obj,
self.dd_clone_parent_obj,
self.dd_child_dir_zapobj,
self.dd_used_bytes,
self.dd_compressed_bytes,
self.dd_uncompressed_bytes,
self.dd_quota,
self.dd_reserved,
self.dd_props_zapobj
) = struct.unpack("=11Q", data[:11*8])
def __str__(self):
fields = [
'dd_creation_time',
'dd_head_dataset_obj',
'dd_parent_obj',
'dd_clone_parent_obj',
'dd_child_dir_zapobj',
'dd_used_bytes',
'dd_compressed_bytes',
'dd_uncompressed_bytes',
'dd_quota',
'dd_reserved',
'dd_props_zapobj',
]
fmt = ' '.join([f+'={}' for f in fields])
return fmt.format(
self.dd_creation_time,
self.dd_head_dataset_obj,
self.dd_parent_obj,
self.dd_clone_parent_obj,
self.dd_child_dir_zapobj,
self.dd_used_bytes,
self.dd_compressed_bytes,
self.dd_uncompressed_bytes,
self.dd_quota,
self.dd_reserved,
self.dd_props_zapobj
)
class BonusZnode:
def __init__(self, data):
(
self.zp_atime, self.zp_atime_ns,
self.zp_mtime, self.zp_mtime_ns,
self.zp_ctime, self.zp_ctime_ns,
self.zp_crtime, self.zp_crtime_ns,
self.zp_gen,
self.zp_mode,
self.zp_size,
self.zp_parent,
self.zp_links,
self.zp_xattr,
self.zp_rdev,
self.zp_flags,
self.zp_uid, self.zp_gid
) = struct.unpack("=18Q", data[:18*8])
self.zp_inline_content = data[264:]
def size(self):
return self.zp_size
def mtime(self):
return self.zp_mtime
def mode(self):
return self.zp_mode
def uid(self):
return self.zp_uid
def gid(self):
return self.zp_gid
def __str__(self):
fields = [
'zp_atime', 'zp_atime_ns',
'zp_mtime', 'zp_mtime_ns',
'zp_ctime', 'zp_ctime_ns',
'zp_crtime', 'zp_crtime_ns',
'zp_gen',
'zp_mode',
'zp_size',
'zp_parent',
'zp_links',
'zp_xattr',
'zp_rdev',
'zp_flags',
'zp_uid', 'zp_gid'
]
fmt = ' '.join([f+'={}' for f in fields])
return fmt.format(
self.zp_atime, self.zp_atime_ns,
self.zp_mtime, self.zp_mtime_ns,
self.zp_ctime, self.zp_ctime_ns,
self.zp_crtime, self.zp_crtime_ns,
self.zp_gen,
self.zp_mode,
self.zp_size,
self.zp_parent,
self.zp_links,
self.zp_xattr,
self.zp_rdev,
self.zp_flags,
self.zp_uid, self.zp_gid
)
class BonusSysAttr:
def __init__(self, objset, data):
if objset is None:
return;
try:
SA_MAGIC=0x2F505A
(magic,layoutid,hdrsz,l) = struct.unpack("=IBBH",data[0:8])
if not (magic == SA_MAGIC):
print("[-] Error: SA_MAGIC wrong")
hdrsz *= 2
if layoutid == 3:
print("Symlink")
lenidx = 0
if (hdrsz < 8):
hdrsz = 8
ptr = hdrsz
#ptr = 8 #skip sa_hdr_phys_t
for f in objset._sa._lay[str(layoutid)]:
l = f['len']
b = data[ptr:ptr+l]
v = None
if (l == 16):
(v0,v1) = struct.unpack("=QQ",b)
v = [v0,v1];
elif (l == 8):
v, = struct.unpack("=Q",b)
elif (l == 4):
v, = struct.unpack("=I",b)
elif (l == 0):
l, = struct.unpack("=H",data[6+lenidx*2:6+lenidx*2+2])
lenidx += 1
if (f['name'] == "zpl_dacl_aces"):
pass
elif (f['name'] == "zpl_symlink"):
v = data[ptr:ptr+l]
#ptr = len(data)
ptr += l
setattr(self,f['name'], v);
n = f['name'].replace("zpl_","zp_");
setattr(self,n, v);
self.zp_inline_content = None
#ZFS_OLD_ZNODE_PHYS_SIZE=0x108
#if (len(data) > ZFS_OLD_ZNODE_PHYS_SIZE):
self.zp_inline_content = data[ptr:]
except:
pass
def size(self):
return self.zpl_size
def mtime(self):
try:
return self.zpl_mtime[0]
except:
return datetime.datetime.now().timestamp()
def mode(self):
return self.zpl_mode
def uid(self):
return self.zpl_uid
def gid(self):
return self.zpl_gid
def __str__(self):
pass
DNODE_FLAG_USED_BYTES=(1 << 0)
class DNode:
def __init__(self, data=None, objset=None):
self._data = None
self._type = None # uint8_t 1
self._indblkshift = None # uint8_t 1
self._nlevels = None # uint8_t 1
self._nblkptr = None # uint8_t 1
self._bonustype = None # uint8_t 1
self._checksum = None # uint8_t 1
self._compress = None # uint8_t 1
self._flags = None # uint8_t 1
self._datablkszsec = None # uint16_t 2
self._bonuslen = None # uint16_t 2
self._extra_slots = None # uint8_t 1
self._pad2 = None # uint8_t[4] 4
self._maxblkid = None # uint64_t 8
self._used = None # uint64_t 8
self._pad3 = None # uint64_t[4] 32
self._blkptr = None # blkptr_t[N] @64
self._bonus = None # uint8_t[BONUSLEN]
self._datablksize = None
self._objset = objset
if data is not None:
self.parse(data)
def parse(self, data):
if len(data) < 512:
raise ValueError("Data is too small")
# Save data for dumping purposes
self._data = data[:]
(self._type, self._indblkshift, self._nlevels, self._nblkptr,
self._bonustype, self._checksum, self._compress, self._flags,
self._datablkszsec, self._bonuslen, self._extra_slots, self._maxblkid,
self._used) = struct.unpack("=8B2HB3xQQ32x", data[:BLKPTR_OFFSET])
if self._type == 0:
return
# Object type > 100 (or even 53) is probably due to data error
elif self._type > 100:
if self._type==196: # on linux 196 is "zap" with "bonustype dataset"
pass
else:
self._invalidate()
return
self._blkptr = []
if self._nblkptr > 3:
# More than three block pointers is a sign of data error
self._invalidate()
return
self._used = self._used << 9 if not self._flags & DNODE_FLAG_USED_BYTES else self._used;
self._datablksize = self._datablkszsec << 9
ptr = BLKPTR_OFFSET
for bn in range(self._nblkptr):
b = BlockPtr(data=data[ptr:ptr+128])
self._blkptr.append(b)
ptr += 128
bonus_data = data[ptr:ptr+self._bonuslen]
if self._bonuslen and self._bonustype == 12:
self._bonus = BonusDirectory(bonus_data)
elif self._bonuslen and self._bonustype == 16:
self._bonus = BonusDataset(bonus_data)
elif self._bonuslen and self._bonustype == 17:
self._bonus = BonusZnode(bonus_data)
elif self._bonuslen and self._bonustype == 0x2c:
self._bonus = BonusSysAttr(self._objset, bonus_data)
else:
self._bonus = bonus_data
@property
def blkptrs(self):
return self._blkptr
@property
def maxblkid(self):
return self._maxblkid
@property
def bonus(self):
return self._bonus
@property
def type(self):
return self._type
@property
def levels(self):
return self._nlevels
@property
def datablksize(self):
return self._datablksize
@property
def indblkshift(self):
return self._indblkshift
def dump_data(self, file_path):
with open(file_path, 'wb') as f:
f.write(self._data)
def _invalidate(self):
self._type = None
def __str__(self):
if self._type is None:
return "<invalid dnode>"
elif self._type == 0:
return "<unallocated dnode>"
try:
if self._type == 196:
dmu_type = "zap"
else:
dmu_type = DMU_TYPE_DESC[self._type]
except IndexError:
dmu_type = "unk_{}".format(self._type)
bptrs = " ".join(["blkptr[{}]={}".format(i, v) for i, v in enumerate(self._blkptr)])
bonus = " bonus[{}]".format(self._bonuslen) if self._bonuslen else ""
if self._bonustype in [12, 16]:
bonus += "=[{}]".format(self._bonus)
return "[{}] {}B {}L/{} {}{}".format(dmu_type, self._maxblkid+1,
self._nlevels, 1 << self._indblkshift, bptrs, bonus)
@staticmethod
def from_bptr(vdev, bptr, dvas=(0, 1), objset=None):
data = None
for dva in dvas:
data,c = vdev.read_block(bptr, dva=dva)
if data and c:
break
if data is None:
return None
dn = DNode(objset=objset)
dn.parse(data)
return dn
| 33.094527
| 120
| 0.555848
|
8bc8473479b2f60615ebf15d36671f935a2ea859
| 5,391
|
py
|
Python
|
tests/components/test_class_RunTask.py
|
aimakerspace/Synergos
|
ce972f6b031535e82be6724f42118c33f90e9189
|
[
"Apache-2.0"
] | null | null | null |
tests/components/test_class_RunTask.py
|
aimakerspace/Synergos
|
ce972f6b031535e82be6724f42118c33f90e9189
|
[
"Apache-2.0"
] | null | null | null |
tests/components/test_class_RunTask.py
|
aimakerspace/Synergos
|
ce972f6b031535e82be6724f42118c33f90e9189
|
[
"Apache-2.0"
] | 3
|
2021-11-25T03:26:52.000Z
|
2022-01-24T09:48:37.000Z
|
#!/usr/bin/env python
####################
# Required Modules #
####################
# Generic/Built-in
import logging
# Libs
# Custom
from synergos.endpoints import RUN_ENDPOINTS
from conftest import (
PROJECT_KEY,
EXPT_KEY_1, EXPT_KEY_2,
check_resp_structure,
check_availability_in_single_archive,
check_availability_in_bulk_archives
)
##################
# Configurations #
##################
###################
# Tests - RunTask #
###################
# def test_RunTask_generate_bulk_url(init_params, experiment_task):
# bulk_url = EXPERIMENT_ENDPOINTS.EXPERIMENTS.substitute(
# **PROJECT_KEY,
# **init_params
# )
# assert experiment_task._generate_bulk_url(**PROJECT_KEY) == bulk_url
# def test_RunTask_generate_single_url(init_params, experiment_task):
# single_url_1 = EXPERIMENT_ENDPOINTS.EXPERIMENT.substitute(
# **init_params,
# **EXPT_KEY_1
# )
# assert (experiment_task._generate_single_url(**EXPT_KEY_1) == single_url_1)
# single_url_2 = EXPERIMENT_ENDPOINTS.EXPERIMENT.substitute(
# **init_params,
# **EXPT_KEY_2
# )
# assert (experiment_task._generate_single_url(**EXPT_KEY_2) == single_url_2)
# def test_RunTask_create(experiment_task, payloads):
# expt_payloads = payloads['experiment']
# for payload in expt_payloads:
# create_resp = experiment_task.create(**payload)
# check_resp_structure(resp=create_resp)
# check_availability_in_single_archive(
# payload=payload,
# archive=create_resp['data']
# )
# def test_RunTask_read_all(experiment_task, payloads):
# read_all_resp = experiment_task.read_all(**PROJECT_KEY)
# check_resp_structure(read_all_resp)
# expt_payloads = payloads['experiment']
# check_availability_in_bulk_archives(
# payloads=expt_payloads,
# archives=read_all_resp['data']
# )
# def test_RunTask_read(experiment_task, payloads):
# read_resp_1 = experiment_task.read(**EXPT_KEY_1)
# check_resp_structure(read_resp_1)
# check_availability_in_single_archive(
# payload=payloads['experiment'][0],
# archive=read_resp_1['data']
# )
# read_resp_2 = experiment_task.read(**EXPT_KEY_2)
# check_resp_structure(read_resp_2)
# check_availability_in_single_archive(
# payload=payloads['experiment'][1],
# archive=read_resp_2['data']
# )
# def test_RunTask_update(experiment_task, payloads):
# modified_payload_1 = {
# 'model': [
# {
# "activation": "sigmoid",
# "is_input": True,
# "l_type": "Linear",
# "structure": {
# "bias": True,
# "in_features": 20,
# "out_features": 10
# }
# },
# {
# "activation": "sigmoid",
# "is_input": False,
# "l_type": "Linear",
# "structure": {
# "bias": True,
# "in_features": 10,
# "out_features": 1
# }
# }
# ]
# }
# update_resp_1 = experiment_task.update(**EXPT_KEY_1, **modified_payload_1)
# check_resp_structure(update_resp_1)
# check_availability_in_single_archive(
# payload=modified_payload_1,
# archive=update_resp_1['data']
# )
# reverse_resp_1 = experiment_task.update(**payloads['experiment'][0])
# check_resp_structure(reverse_resp_1)
# check_availability_in_single_archive(
# payload=payloads['experiment'][0],
# archive=reverse_resp_1['data']
# )
# modified_payload_2 = {
# 'model': [
# {
# "activation": "relu",
# "is_input": True,
# "l_type": "Linear",
# "structure": {
# "bias": False,
# "in_features": 15,
# "out_features": 1
# }
# }
# ]
# }
# update_resp_2 = experiment_task.update(**EXPT_KEY_2, **modified_payload_2)
# check_resp_structure(update_resp_2)
# check_availability_in_single_archive(
# payload=modified_payload_2,
# archive=update_resp_2['data']
# )
# reverse_resp_2 = experiment_task.update(**payloads['experiment'][1])
# check_resp_structure(reverse_resp_2)
# check_availability_in_single_archive(
# payload=payloads['experiment'][1],
# archive=reverse_resp_2['data']
# )
# def test_RunTask_delete(experiment_task, payloads):
# delete_resp_1 = experiment_task.delete(**EXPT_KEY_1)
# check_resp_structure(delete_resp_1)
# check_availability_in_single_archive(
# payload=payloads['experiment'][0],
# archive=delete_resp_1['data']
# )
# retrieved_expts = experiment_task.read_all(**PROJECT_KEY)['data']
# assert len(retrieved_expts) == 1
# delete_resp_2 = experiment_task.delete(**EXPT_KEY_2)
# check_resp_structure(delete_resp_2)
# check_availability_in_single_archive(
# payload=payloads['experiment'][1],
# archive=delete_resp_2['data']
# )
# retrieved_expts = experiment_task.read_all(**PROJECT_KEY)['data']
# assert len(retrieved_expts) == 0
| 31.16185
| 81
| 0.595993
|
44eb9fe7b537f9bee8a249581408d34ca23f575a
| 904
|
py
|
Python
|
venv/bin/rst2xetex.py
|
RyanHelgoth/CMPUT404-Lab5
|
82424bf5a9b80ff186bd69d224457c8b70a3bdf3
|
[
"Apache-2.0"
] | null | null | null |
venv/bin/rst2xetex.py
|
RyanHelgoth/CMPUT404-Lab5
|
82424bf5a9b80ff186bd69d224457c8b70a3bdf3
|
[
"Apache-2.0"
] | null | null | null |
venv/bin/rst2xetex.py
|
RyanHelgoth/CMPUT404-Lab5
|
82424bf5a9b80ff186bd69d224457c8b70a3bdf3
|
[
"Apache-2.0"
] | null | null | null |
#!/home/student/Lab5/CMPUT404-Lab5/venv/bin/python3
# $Id: rst2xetex.py 7847 2015-03-17 17:30:47Z milde $
# Author: Guenter Milde
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing Lua/XeLaTeX code.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline
description = ('Generates LaTeX documents from standalone reStructuredText '
'sources for compilation with the Unicode-aware TeX variants '
'XeLaTeX or LuaLaTeX. '
'Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sourceforge.net/docs/user/latex.html> for '
'the full reference.')
publish_cmdline(writer_name='xetex', description=description)
| 32.285714
| 77
| 0.675885
|
a247aaffc90e1800c4f0b1cccef3ee92807bd07e
| 533
|
py
|
Python
|
listparser/helpers/enums.py
|
riccardorestagno/BuzzFeed-Reddit-Bot
|
8c8b3c9da3e56c26565aaab2058036f55adebb0d
|
[
"MIT"
] | 7
|
2017-09-27T14:17:39.000Z
|
2019-09-23T05:52:03.000Z
|
listparser/helpers/enums.py
|
riccardorestagno/list-parser-bot
|
8c8b3c9da3e56c26565aaab2058036f55adebb0d
|
[
"MIT"
] | null | null | null |
listparser/helpers/enums.py
|
riccardorestagno/list-parser-bot
|
8c8b3c9da3e56c26565aaab2058036f55adebb0d
|
[
"MIT"
] | 6
|
2017-09-06T17:54:42.000Z
|
2019-09-13T20:35:49.000Z
|
from enum import Enum
class ArticleType(Enum):
All = 1
Business_Insider = 2
BuzzFeed = 3
CollegeHumor = 4
Cracked = 5
Polygon = 6
Screen_Rant = 7
def convert_enum_to_string(enum):
return enum.name.replace("_", " ")
def convert_string_to_articletype_enum(string):
return ArticleType[string.replace(" ", "_")]
def string_in_enum_list(enum_list, string):
for enum in enum_list:
if enum.name.replace("_", " ") == string.replace("_", " "):
return True
return False
| 19.035714
| 67
| 0.641651
|
bcd8bc74dd751569eae9af852f336b9b06fec985
| 8,311
|
py
|
Python
|
cotk/dataloader/sentence_classification.py
|
ZhihongShao/cotk
|
252ebc20c9ce175327e3721a9ddbdbb0bffd2744
|
[
"Apache-2.0"
] | null | null | null |
cotk/dataloader/sentence_classification.py
|
ZhihongShao/cotk
|
252ebc20c9ce175327e3721a9ddbdbb0bffd2744
|
[
"Apache-2.0"
] | null | null | null |
cotk/dataloader/sentence_classification.py
|
ZhihongShao/cotk
|
252ebc20c9ce175327e3721a9ddbdbb0bffd2744
|
[
"Apache-2.0"
] | null | null | null |
"""Dataloader for language generation"""
from collections import Counter
from itertools import chain
import numpy as np
# from .._utils.unordered_hash import UnorderedSha256
from .._utils.file_utils import get_resource_file_path
from .._utils import hooks
from .dataloader import LanguageProcessingBase
from ..metric import MetricChain, AccuracyMetric
# pylint: disable=W0223
class SentenceClassification(LanguageProcessingBase):
r"""Base class for sentence classification datasets. This is an abstract class.
Arguments:{ARGUMENTS}
Attributes:{ATTRIBUTES}
"""
ARGUMENTS = LanguageProcessingBase.ARGUMENTS
ATTRIBUTES = LanguageProcessingBase.ATTRIBUTES
def get_batch(self, key, indexes):
'''Get a batch of specified `indexes`.
Arguments:
key (str): must be contained in `key_name`
indexes (list): a list of specified indexes
Returns:
(dict): A dict at least contains:
* sent_length(:class:`numpy.array`): A 1-d array, the length of sentence in each batch.
Size: `[batch_size]`
* sent(:class:`numpy.array`): A 2-d padding array containing id of words.
Only provide valid words. `unk_id` will be used if a word is not valid.
Size: `[batch_size, max(sent_length)]`
* label(:class:`numpy.array`): A 1-d array, the label of sentence in each batch.
* sent_allvocabs(:class:`numpy.array`): A 2-d padding array containing id of words.
Provide both valid and invalid words.
Size: `[batch_size, max(sent_length)]`
Examples:
>>> # all_vocab_list = ["<pad>", "<unk>", "<go>", "<eos>", "how", "are", "you",
>>> # "hello", "i", "am", "fine"]
>>> # vocab_size = 9
>>> # vocab_list = ["<pad>", "<unk>", "<go>", "<eos>", "how", "are", "you", "hello", "i"]
>>> dataloader.get_batch('train', [0, 1, 2])
{
"sent": numpy.array([
[2, 4, 5, 6, 3, 0], # first sentence: <go> how are you <eos> <pad>
[2, 7, 3, 0, 0, 0], # second sentence: <go> hello <eos> <pad> <pad> <pad>
[2, 7, 8, 1, 1, 3] # third sentence: <go> hello i <unk> <unk> <eos>
]),
"label": numpy.array([1, 2, 0]) # label of sentences
"sent_length": numpy.array([5, 3, 6]), # length of sentences
"sent_allvocabs": numpy.array([
[2, 4, 5, 6, 3, 0], # first sentence: <go> how are you <eos> <pad>
[2, 7, 3, 0, 0, 0], # second sentence: <go> hello <eos> <pad> <pad> <pad>
[2, 7, 8, 9, 10, 3] # third sentence: <go> hello i am fine <eos>
]),
}
'''
if key not in self.key_name:
raise ValueError("No set named %s." % key)
res = {}
batch_size = len(indexes)
res["sent_length"] = np.array( \
list(map(lambda i: len(self.data[key]['sent'][i]), indexes)))
res_sent = res["sent"] = np.zeros( \
(batch_size, np.max(res["sent_length"])), dtype=int)
res["label"] = np.zeros(batch_size, dtype=int)
for i, j in enumerate(indexes):
sentence = self.data[key]['sent'][j]
res["sent"][i, :len(sentence)] = sentence
res["label"][i] = self.data[key]['label'][j]
res["sent_allvocabs"] = res_sent.copy()
res_sent[res_sent >= self.valid_vocab_len] = self.unk_id
return res
def get_metric(self, prediction_key="prediction"):
'''Get metrics for accuracy. In other words, this function
provides metrics for sentence classification task.
It contains:
* :class:`.metric.AccuracyMetric`
Arguments:
prediction_key (str): The key of prediction over sentences.
Refer to :class:`.metric.AccuracyMetric`. Default: ``prediction``.
Returns:
A :class:`.metric.MetricChain` object.
'''
metric = MetricChain()
metric.add_metric(AccuracyMetric(self, \
label_key='label', \
prediction_key=prediction_key))
return metric
class SST(SentenceClassification):
'''A dataloader for preprocessed SST dataset.
Arguments:
file_id (str): a str indicates the source of SST dataset.
file_type (str): a str indicates the type of SST dataset. Default: "SST"
valid_vocab_times (int): A cut-off threshold of valid tokens. All tokens appear
not less than `min_vocab_times` in **training set** will be marked as valid words.
Default: 10.
max_sent_length (int): All sentences longer than `max_sent_length` will be shortened
to first `max_sent_length` tokens. Default: 50.
invalid_vocab_times (int): A cut-off threshold of invalid tokens. All tokens appear
not less than `invalid_vocab_times` in the **whole dataset** (except valid words) will be
marked as invalid words. Otherwise, they are unknown words, both in training or
testing stages. Default: 0 (No unknown words).
Refer to :class:`.LanguageProcessingBase` for attributes and methods.
References:
[1] http://images.cocodataset.org/annotations/annotations_trainval2017.zip
[2] Lin T Y, Maire M, Belongie S, et al. Microsoft COCO: Common Objects in Context. ECCV 2014.
'''
@hooks.hook_dataloader
def __init__(self, file_id, min_vocab_times=10, \
max_sent_length=50, invalid_vocab_times=0):
self._file_id = file_id
self._file_path = get_resource_file_path(file_id)
self._min_vocab_times = min_vocab_times
self._max_sent_length = max_sent_length
self._invalid_vocab_times = invalid_vocab_times
super(SST, self).__init__()
def _load_data(self):
r'''Loading dataset, invoked by `LanguageProcessingBase.__init__`
'''
def parseline(line):
label = int(line[1])
line = line.split(')')
sent = [x.split(' ')[-1].lower() for x in line if x != '']
return (label, sent)
origin_data = {}
for key in self.key_name:
f_file = open("%s/%s.txt" % (self._file_path, key), 'r', encoding='utf-8')
origin_data[key] = {}
_origin_data = list( \
map(parseline, f_file.readlines()))
origin_data[key]['sent'] = list( \
map(lambda line: line[1], _origin_data))
origin_data[key]['label'] = list( \
map(lambda line: line[0], _origin_data))
raw_vocab_list = list(chain(*(origin_data['train']['sent'])))
# Important: Sort the words preventing the index changes between
# different runs
vocab = sorted(Counter(raw_vocab_list).most_common(), \
key=lambda pair: (-pair[1], pair[0]))
left_vocab = list( \
filter( \
lambda x: x[1] >= self._min_vocab_times, \
vocab))
vocab_list = self.ext_vocab + list(map(lambda x: x[0], left_vocab))
valid_vocab_len = len(vocab_list)
valid_vocab_set = set(vocab_list)
for key in self.key_name:
if key == 'train':
continue
raw_vocab_list.extend(list(chain(*(origin_data[key]['sent']))))
vocab = sorted(Counter(raw_vocab_list).most_common(), \
key=lambda pair: (-pair[1], pair[0]))
left_vocab = list( \
filter( \
lambda x: x[1] >= self._invalid_vocab_times and x[0] not in valid_vocab_set, \
vocab))
vocab_list.extend(list(map(lambda x: x[0], left_vocab)))
print("valid vocab list length = %d" % valid_vocab_len)
print("vocab list length = %d" % len(vocab_list))
word2id = {w: i for i, w in enumerate(vocab_list)}
def line2id(line):
return ([self.go_id] + \
list(map(lambda word: word2id[word] if word in word2id else self.unk_id, line)) \
+ [self.eos_id])[:self._max_sent_length]
data = {}
data_size = {}
for key in self.key_name:
data[key] = {}
data[key]['sent'] = list(map(line2id, origin_data[key]['sent']))
data[key]['label'] = origin_data[key]['label']
data_size[key] = len(data[key]['sent'])
vocab = list(chain(*(origin_data[key]['sent'])))
vocab_num = len(vocab)
oov_num = len( \
list( \
filter( \
lambda word: word not in word2id, \
vocab)))
invalid_num = len( \
list( \
filter( \
lambda word: word not in valid_vocab_set, \
vocab))) - oov_num
length = list( \
map(len, origin_data[key]['sent']))
cut_num = np.sum( \
np.maximum( \
np.array(length) - \
self._max_sent_length + \
1, \
0))
print( \
"%s set. invalid rate: %f, unknown rate: %f, max length before cut: %d, cut word rate: %f" % \
(key, invalid_num / vocab_num, oov_num / vocab_num, max(length), cut_num / vocab_num))
return vocab_list, valid_vocab_len, data, data_size
def tokenize(self, sentence):
r'''Convert sentence(str) to list of token(str)
Arguments:
sentence (str)
Returns:
sent (list): list of token(str)
'''
return [x.split(' ')[-1].lower() for x in sentence if x != '']
| 35.216102
| 98
| 0.658405
|
f7b1393de898449000c60dd88afaf04c6ad5bc11
| 11,648
|
py
|
Python
|
config/settings/base.py
|
veglez/my-wallet
|
80b3811d13a3aa8d211b50b0fe37f015ffd5393c
|
[
"MIT"
] | null | null | null |
config/settings/base.py
|
veglez/my-wallet
|
80b3811d13a3aa8d211b50b0fe37f015ffd5393c
|
[
"MIT"
] | null | null | null |
config/settings/base.py
|
veglez/my-wallet
|
80b3811d13a3aa8d211b50b0fe37f015ffd5393c
|
[
"MIT"
] | null | null | null |
"""
Base settings to build other settings files upon.
"""
from pathlib import Path
import environ
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent.parent
# mywallet/
APPS_DIR = ROOT_DIR / "mywallet"
env = environ.Env()
READ_DOT_ENV_FILE = env.bool("DJANGO_READ_DOT_ENV_FILE", default=False)
if READ_DOT_ENV_FILE:
# OS environment variables take precedence over variables from .env
env.read_env(str(ROOT_DIR / ".env"))
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# Local time zone. Choices are
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# though not all of them may be available with every OS.
# In Windows, this must be set to your system time zone.
TIME_ZONE = "America/Mexico_City"
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = "en-us"
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# https://docs.djangoproject.com/en/dev/ref/settings/#locale-paths
LOCALE_PATHS = [str(ROOT_DIR / "locale")]
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {"default": env.db("DATABASE_URL")}
DATABASES["default"]["ATOMIC_REQUESTS"] = True
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = "config.urls"
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = "config.wsgi.application"
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
# "django.contrib.humanize", # Handy template tags
"django.contrib.admin",
"django.forms",
]
THIRD_PARTY_APPS = [
"crispy_forms",
"allauth",
"allauth.account",
"allauth.socialaccount",
"rest_framework",
"rest_framework.authtoken",
"corsheaders",
]
LOCAL_APPS = [
"mywallet.users.apps.UsersConfig",
# Your stuff: custom apps go here
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIGRATIONS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules
MIGRATION_MODULES = {"sites": "mywallet.contrib.sites.migrations"}
# AUTHENTICATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model
AUTH_USER_MODEL = "users.User"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url
LOGIN_REDIRECT_URL = "users:redirect"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-url
LOGIN_URL = "account_login"
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = [
# https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
"django.contrib.auth.hashers.Argon2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher",
"django.contrib.auth.hashers.BCryptSHA256PasswordHasher",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"corsheaders.middleware.CorsMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.common.BrokenLinkEmailsMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR / "staticfiles")
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = "/static/"
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [str(APPS_DIR / "static")]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR / "media")
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = "/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
"BACKEND": "django.template.backends.django.DjangoTemplates",
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
"DIRS": [str(APPS_DIR / "templates")],
"OPTIONS": {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
"loaders": [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"mywallet.utils.context_processors.settings_context",
],
},
}
]
# https://docs.djangoproject.com/en/dev/ref/settings/#form-renderer
FORM_RENDERER = "django.forms.renderers.TemplatesSetting"
# http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = "bootstrap4"
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (str(APPS_DIR / "fixtures"),)
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
SESSION_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
X_FRAME_OPTIONS = "DENY"
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND",
default="django.core.mail.backends.smtp.EmailBackend",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-timeout
EMAIL_TIMEOUT = 5
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL.
ADMIN_URL = "admin/"
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [("""veglez""", "veglez94@gmail.com")]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
}
# django-allauth
# ------------------------------------------------------------------------------
ACCOUNT_ALLOW_REGISTRATION = env.bool("DJANGO_ACCOUNT_ALLOW_REGISTRATION", True)
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_AUTHENTICATION_METHOD = "username"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_REQUIRED = True
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_ADAPTER = "mywallet.users.adapters.AccountAdapter"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
SOCIALACCOUNT_ADAPTER = "mywallet.users.adapters.SocialAccountAdapter"
# django-rest-framework
# -------------------------------------------------------------------------------
# django-rest-framework - https://www.django-rest-framework.org/api-guide/settings/
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.SessionAuthentication",
"rest_framework.authentication.TokenAuthentication",
),
"DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",),
}
# django-cors-headers - https://github.com/adamchainz/django-cors-headers#setup
CORS_URLS_REGEX = r"^/api/.*$"
# Your stuff...
# ------------------------------------------------------------------------------
| 40.585366
| 93
| 0.633156
|
a486585e5392da2ae42464bacfcb2dfd11f22c90
| 159
|
py
|
Python
|
libs/youzan/__init__.py
|
fovegage/python3-youzan-pay
|
793cfad34e2d64b365c0732f30509d1676847b5b
|
[
"MIT"
] | 15
|
2019-01-19T15:11:59.000Z
|
2019-10-22T04:23:24.000Z
|
libs/youzan/__init__.py
|
fovegage/python3-youzan-pay
|
793cfad34e2d64b365c0732f30509d1676847b5b
|
[
"MIT"
] | null | null | null |
libs/youzan/__init__.py
|
fovegage/python3-youzan-pay
|
793cfad34e2d64b365c0732f30509d1676847b5b
|
[
"MIT"
] | 5
|
2019-03-29T17:05:49.000Z
|
2019-07-27T16:01:58.000Z
|
# -*- coding: utf-8 -*-
# @Time : 2019/1/18 9:20
# @Author : fovegage
# @Email : fovegage@gmail.com
# @File : __init__.py.py
# @Software: PyCharm
| 26.5
| 32
| 0.566038
|
0a5e97b7c380da694314b89ea1d07ceb8cf99ed0
| 553
|
py
|
Python
|
ballometer/sht.py
|
wipfli/ballometer
|
db86abe4f9dd541c96c58110579ae9dec729d119
|
[
"MIT"
] | null | null | null |
ballometer/sht.py
|
wipfli/ballometer
|
db86abe4f9dd541c96c58110579ae9dec729d119
|
[
"MIT"
] | 6
|
2020-09-26T06:42:30.000Z
|
2021-02-17T17:12:47.000Z
|
ballometer/sht.py
|
wipfli/ballometer
|
db86abe4f9dd541c96c58110579ae9dec729d119
|
[
"MIT"
] | null | null | null |
try:
import busio
import adafruit_sht31d
except ImportError:
pass
class SHT:
def __init__(self):
self._sensor = adafruit_sht31d.SHT31D(i2c_bus=busio.I2C(24, 23))
@property
def temperature(self):
'''returns the temperature in Kelvin'''
T = self._sensor.temperature # deg C
return round(T + 273.15, 2) # K
@property
def humidity(self):
'''returns the relative humidity in percent'''
RH = self._sensor.relative_humidity # percent
return round(RH, 1) # percent
| 24.043478
| 72
| 0.627486
|
fe0db2fa8304a336b480b95e9e8c73ba6f47d1c9
| 12,119
|
py
|
Python
|
baselines/ecbp/agents/buffer/kbps_process.py
|
MouseHu/emdqn
|
ba907e959f21dd0b5a17117accccae9c82a79a3b
|
[
"MIT"
] | null | null | null |
baselines/ecbp/agents/buffer/kbps_process.py
|
MouseHu/emdqn
|
ba907e959f21dd0b5a17117accccae9c82a79a3b
|
[
"MIT"
] | null | null | null |
baselines/ecbp/agents/buffer/kbps_process.py
|
MouseHu/emdqn
|
ba907e959f21dd0b5a17117accccae9c82a79a3b
|
[
"MIT"
] | 1
|
2021-04-26T13:55:47.000Z
|
2021-04-26T13:55:47.000Z
|
import numpy as np
from sklearn.neighbors import BallTree, KDTree
import os
from baselines.ecbp.agents.buffer.lru_knn_gpu_ps import LRU_KNN_GPU_PS
from baselines.ecbp.agents.buffer.lru_knn_ps import LRU_KNN_PS
import gc
from baselines.deepq.experiments.atari.knn_cuda_fixmem import knn as knn_cuda_fixmem
import copy
from heapq import *
import logging
from baselines.ecbp.agents.buffer.hash_pqueue import HashPQueue
import threading
from multiprocessing import Process
from multiprocessing import Lock, Event
from multiprocessing import Manager
class KernelBasedPriorSweepProcess(Process):
def __init__(self, num_actions, buffer_size, latent_dim, hash_dim, conn, gamma=0.99):
super(KernelBasedPriorSweepProcess, self).__init__()
self.num_actions = num_actions
self.gamma = gamma
self.rmax = 100000
self.logger = logging.getLogger("ecbp")
self.sa_explore = 10
self.max_iter = 1000000
self.run_sweep = True
self.num_iters = 0
self.conn = conn
self.buffer_size = buffer_size
self.latent_dim = latent_dim
self.hash_dim = hash_dim
# self.queue_lock = Lock()
self.pqueue = HashPQueue()
self.b = 0.0001
self.h = 0.0001
self.knn_dist = None
self.knn_ind = None
self.sequence = []
def log(self, *args, logtype='debug', sep=' '):
getattr(self.logger, logtype)(sep.join(str(a) for a in args))
def grow_model(self, sa_pair): # grow model
index_t, action_t, reward_t, z_tp1, done_t = sa_pair
index_tp1, _, _ = self.peek(z_tp1)
if index_tp1 < 0:
index_tp1, override = self.ec_buffer.add_node(z_tp1)
self.log("add node", index_tp1, logtype='debug')
if override:
self.pqueue.remove(index_tp1)
# if (index_t, action_t) not in self.ec_buffer.prev_id[index_tp1]:
self.log("add edge", index_t, action_t, index_tp1, logtype='debug')
sa_count = self.ec_buffer.add_edge(index_t, index_tp1, action_t, reward_t, done_t)
coeff = np.exp(-np.array(self.knn_dist).reshape(-1) / self.b)
self.log("coeff", coeff.shape, coeff)
self.ec_buffer.pseudo_count[index_t][action_t] = {}
self.ec_buffer.pseudo_reward[index_t, action_t] = 0
# self.ec_buffer.pseudo_prev[index_tp1] = {}
assert index_t in self.knn_ind, "self should be a neighbour of self"
for i, s in enumerate(self.knn_ind):
for sp in self.ec_buffer.next_id[s][action_t].keys():
dist = self.ec_buffer.distance(self.ec_buffer.states[sp],
self.ec_buffer.states[sp] + self.ec_buffer.states[index_t] -
self.ec_buffer.states[s])
reweight = np.exp(-np.array(dist).squeeze() / self.h)
weighted_count = reweight * coeff[i] * self.ec_buffer.next_id[s][action_t][sp]
try:
self.ec_buffer.pseudo_count[index_t][action_t][sp] += weighted_count
except KeyError:
self.ec_buffer.pseudo_count[index_t][action_t][sp] = weighted_count
self.ec_buffer.pseudo_prev[sp][(index_t, action_t)] = 1
self.ec_buffer.pseudo_reward[index_t, action_t] += weighted_count * self.ec_buffer.reward[
s, action_t]
if index_t == s:
continue
for sp in self.ec_buffer.next_id[index_t][action_t].keys():
dist = self.ec_buffer.distance(self.ec_buffer.states[sp],
self.ec_buffer.states[sp] + self.ec_buffer.states[s] -
self.ec_buffer.states[index_t])
reweight = np.exp(-np.array(dist).squeeze() / self.h)
weighted_count = reweight * coeff[i] * self.ec_buffer.next_id[index_t][action_t][sp]
try:
self.ec_buffer.pseudo_count[s][action_t][sp] += reweight * coeff[i]
except KeyError:
self.ec_buffer.pseudo_count[s][action_t][sp] = weighted_count
self.ec_buffer.pseudo_prev[sp][(s, action_t)] = 1
self.ec_buffer.pseudo_reward[s, action_t] += reweight * coeff[i] * self.ec_buffer.reward[
index_t, action_t]
if sa_count > self.sa_explore:
self.ec_buffer.internal_value[index_t, action_t] = 0
return index_tp1, sa_count
# def grow_model(self, sa_pair): # grow model
# index_t, action_t, reward_t, z_tp1, done_t = sa_pair
# index_tp1, _, _ = self.ec_buffer.peek(z_tp1)
# # self.log("finish peek")
# if index_tp1 < 0:
# index_tp1, override = self.ec_buffer.add_node(z_tp1)
#
# self.log("add node", index_tp1, logtype='debug')
# if override:
# self.pqueue.remove(index_tp1)
#
# # if (index_t, action_t) not in self.ec_buffer.prev_id[index_tp1]:
# self.log("add edge", index_t, action_t, index_tp1, logtype='debug')
# sa_count = self.ec_buffer.add_edge(index_t, index_tp1, action_t, reward_t, done_t)
# self.ec_buffer.pseudo_count[index_t][action_t] = self.ec_buffer.pseudo_count[index_t][action_t]
# self.ec_buffer.pseudo_count[index_t][action_t] = self.ec_buffer.next_id[index_t][action_t]
# # self.pseudo_count = [[{} for __ in range(num_actions)] for _ in range(capacity)]
# self.ec_buffer.pseudo_reward[index_t,action_t] = reward_t*sum(self.ec_buffer.pseudo_count[index_t][action_t].values())
# self.ec_buffer.pseudo_prev[index_tp1] = {x:1 for x in self.ec_buffer.prev_id[index_tp1]}
# # if sa_coun t > self.sa_explore:
# # self.ec_buffer.internal_value[index_t, action_t] = 0
# return index_tp1, sa_count
def observe(self, sa_pair):
# self.update_enough.wait(timeout=1000)
# self.log("ps pqueue len", len(self.pqueue))
# grow model
index_tp1, count_t = self.grow_model(sa_pair)
# update current value
index_t, action_t, reward_t, z_tp1, done_t = sa_pair
self.sequence.append(index_t)
self.log("self neighbour", index_t, self.knn_ind)
assert index_t in self.knn_ind, "self should be a neighbor of self"
for index in self.knn_ind:
# self.log("q before observe", self.ec_buffer.external_value[index, :],index,action_t)
self.update_q_value(index, action_t)
# self.log("q after observe", self.ec_buffer.external_value[index, :], index, action_t)
self.ec_buffer.state_value_v[index_t] = np.nanmax(self.ec_buffer.external_value[index_t, :])
priority = abs(
self.ec_buffer.state_value_v[index_t] - np.nan_to_num(self.ec_buffer.state_value_u[index_t], copy=True))
if priority > 1e-7:
self.pqueue.push(priority, index_t)
if done_t:
self.update_sequence()
# self.iters_per_step = 0
# self.update_enough.clear()
self.conn.send((2, index_tp1))
def backup(self):
# recursive backup
self.num_iters += 1
if len(self.pqueue) > 0:
priority, index = self.pqueue.pop()
delta_u = self.ec_buffer.state_value_v[index] - np.nan_to_num(self.ec_buffer.state_value_u[index],
copy=True)
self.ec_buffer.state_value_u[index] = self.ec_buffer.state_value_v[index]
self.log("backup node", index, "priority", priority, "new value",
self.ec_buffer.state_value_v[index],
"delta", delta_u)
for sa_pair in self.ec_buffer.pseudo_prev[index].keys():
state_tm1, action_tm1 = sa_pair
# self.log("update s,a,s',delta", state_tm1, action_tm1, index, delta_u)
# self.log("q before backup",self.ec_buffer.external_value[state_tm1,:],state_tm1,action_tm1)
self.update_q_value_backup(state_tm1, action_tm1, index, delta_u)
self.ec_buffer.state_value_v[state_tm1] = np.nanmax(self.ec_buffer.external_value[state_tm1, :])
# self.log("q after backup", self.ec_buffer.external_value[index, :], state_tm1,action_tm1)
priority = abs(
self.ec_buffer.state_value_v[state_tm1] - np.nan_to_num(
self.ec_buffer.state_value_u[state_tm1], copy=True))
if priority > 1e-7:
self.pqueue.push(priority, state_tm1)
if self.num_iters % 100000 == 0:
self.log("backup count", self.num_iters)
def update_sequence(self):
# to make sure that the final signal can be fast propagate through the state,
# we need a sequence update like episodic control
for p, s in enumerate(self.sequence):
# self.pqueue.push(p + self.rmax, s)
self.ec_buffer.newly_added[s] = False
self.sequence = []
# self.ec_buffer.build_tree()
def update_q_value(self, state, action):
n_sa = sum(self.ec_buffer.pseudo_count[state][action].values())
if n_sa < 1e-7:
return
r_smooth = np.nan_to_num(self.ec_buffer.pseudo_reward[state, action] / n_sa)
# n_sasp = sum([coeff[i] * self.ec_buffer.next_id[s][action].get(state_tp1, 0) for i, s in enumerate(self.ind)])
self.ec_buffer.external_value[state, action] = r_smooth
for state_tp1 in self.ec_buffer.pseudo_count[state][action].keys():
value_tp1 = np.nan_to_num(self.ec_buffer.state_value_u[state_tp1])
trans_p = self.ec_buffer.pseudo_count[state][action][state_tp1] / n_sa
self.ec_buffer.external_value[state, action] += trans_p * self.gamma * value_tp1
def update_q_value_backup(self, state, action, state_tp1, delta_u):
n_sa = sum(self.ec_buffer.pseudo_count[state][action].values())
if n_sa < 1e-7:
return
n_sasp = self.ec_buffer.pseudo_count[state][action].get(state_tp1, 0)
trans_p = n_sasp / n_sa
assert 0 <= trans_p <= 1, "nsa{} nsap{} trans{}".format(n_sa, n_sasp, trans_p)
if np.isnan(self.ec_buffer.external_value[state, action]):
self.ec_buffer.external_value[state, action] = 0
self.ec_buffer.external_value[state, action] += self.gamma * trans_p * delta_u
def peek(self, state):
ind = self.ec_buffer.peek(state)
return ind
def run(self):
self.ec_buffer = LRU_KNN_GPU_PS(self.buffer_size, self.hash_dim, 'game', 0, self.num_actions)
while self.run_sweep:
self.backup()
self.recv_msg()
def retrieve_q_value(self, obj):
z, knn = obj
extrinsic_qs, intrinsic_qs, find = self.ec_buffer.act_value_ec(z, knn)
self.conn.send((0, (extrinsic_qs, intrinsic_qs, find)))
def peek_node(self, obj):
z = obj
ind, knn_dist, knn_ind = self.ec_buffer.peek(z)
knn_dist = np.array(knn_dist).reshape(-1).tolist()
knn_ind = np.array(knn_ind).reshape(-1).tolist()
if ind == -1:
ind, _ = self.ec_buffer.add_node(z)
knn_dist = [0] + knn_dist
knn_ind = [ind] + knn_ind
self.log("add node for first ob ", ind)
self.knn_dist = knn_dist
self.knn_ind = knn_ind
self.conn.send((1, ind))
def recv_msg(self):
# 0 —— retrieve q values
# 1 —— peek or add node
# 2 —— observe
# 3 —— kill
while self.conn.poll():
msg, obj = self.conn.recv()
if msg == 0:
self.retrieve_q_value(obj)
elif msg == 1:
self.peek_node(obj)
elif msg == 2:
self.observe(obj)
elif msg == 3:
self.run_sweep = False
self.conn.send((3, True))
else:
raise NotImplementedError
| 48.09127
| 128
| 0.607063
|
0cfe22f9d15cdaff879f4d0346f39ad2ad365f7a
| 1,828
|
py
|
Python
|
zz-practice/learn.py
|
aloneZERO/Py-Party
|
d9f1daf0a4e35269159741b2dbbd905e8823c3bb
|
[
"Apache-2.0"
] | 3
|
2017-04-05T02:10:55.000Z
|
2018-02-07T08:27:47.000Z
|
zz-practice/learn.py
|
aloneZERO/Py-Party
|
d9f1daf0a4e35269159741b2dbbd905e8823c3bb
|
[
"Apache-2.0"
] | null | null | null |
zz-practice/learn.py
|
aloneZERO/Py-Party
|
d9f1daf0a4e35269159741b2dbbd905e8823c3bb
|
[
"Apache-2.0"
] | 3
|
2018-02-07T06:09:49.000Z
|
2020-08-06T08:50:13.000Z
|
#!python3
# coding: utf-8
import copy
import time
import utils
import zz_info
import zz_data
# 燥起来
def fk_zz(session, section):
batchId = 1
jid = section['jid'] # 章节序号
section_status = zz_info.getSectionStatus(session, jid)
last_time = int(section_status['learned_time'])
while True:
learned_time = int(section_status['learned_time'])
total_time = int(section_status['total_time'])
if section_status['status']:
print('该章节已修行完毕:'+section['title'], end='\n\n')
return
else:
if not last_time is learned_time:
print('\r修行进度:'+section['title']+' {:.2f}%'.format(learned_time*100/total_time))
last_time = learned_time
batchId = batchId + 1
# 延迟15秒(模拟学习,时间不够会出错)
time.sleep(15)
learn_header = copy.deepcopy(zz_data.learn_header)
learn_payload = copy.deepcopy(zz_data.learn_payload)
learn_header['Referer'] = learn_header['Referer'].format(jid)
learn_payload['c0-e2'] = learn_payload['c0-e2'].format(jid)
learn_payload['page'] = learn_payload['page'].format(jid)
learn_payload['batchId'] = batchId
learn_payload['scriptSessionId'] = utils.genSSIdBySession(session)
# TODO 测试点:学习请求参数
# print( str(learn_payload) )
r = session.post(
url = zz_data.learn_url,
data = learn_payload,
headers = learn_header
)
r.encoding = 'UTF-8'
# TODO 测试点:学习请求响应信息
# print(r.text)
if r.text.find('flag:1') is -1:
print(section['title']+' 走火入魔啦~~~')
else:
print(section['title']+': 又修行了15秒!')
section_status = zz_info.getSectionStatus(session, section['jid'])
batchId += 1
# 修行开始
def toBeImmortal(session):
print("您已进入修行模式~:")
all_chapter = zz_info.getAllChapter(session)
for chapter in all_chapter:
print(chapter['title']+' '+'*'*100)
sections = chapter['sections']
for section in sections:
fk_zz(session, section)
print('您已完成全部修行~!')
| 23.74026
| 84
| 0.696389
|
b6127f69d52ee25a88e79da8fabd9ade26267d3d
| 2,218
|
py
|
Python
|
runs/nodes/start_ansible_hosts.py
|
Ruilkyu/kubernetes_start
|
9e88a7f1c64899454af8f9be1dd9653ba435e21f
|
[
"Apache-2.0"
] | 2
|
2020-07-24T14:19:57.000Z
|
2020-08-10T18:30:08.000Z
|
runs/nodes/start_ansible_hosts.py
|
Ruilkyu/kubernetes_start
|
9e88a7f1c64899454af8f9be1dd9653ba435e21f
|
[
"Apache-2.0"
] | null | null | null |
runs/nodes/start_ansible_hosts.py
|
Ruilkyu/kubernetes_start
|
9e88a7f1c64899454af8f9be1dd9653ba435e21f
|
[
"Apache-2.0"
] | 1
|
2021-07-09T10:29:11.000Z
|
2021-07-09T10:29:11.000Z
|
"""
时间:2020/6/12
作者:lurui
功能:根据提供的nodes模块列表生成nodes的ansible模块的nodes_hosts文件
时间:2020/6/17
作者:lurui
修改:基路径 basedir = os.path.dirname(os.path.dirname(os.getcwd())),改为调用者路径 basedir = os.path.abspath('.')
时间:2020/8/11
作者:lurui
修改:node名称由k8s-node-{0}-{1}改为三位k8s-node-{0}-{1}-{2}
"""
import os
import configparser
def start_ansible_hosts():
basedir = os.path.abspath('.')
config = configparser.ConfigParser()
# config.read(basedir + '/cfg/ssh.ini')
config.read(basedir + '/cfg/config.ini')
port = config['SSH']['port']
clusterdns = config['RELATED_IP']['cluster_dns']
clustercidr = config['RELATED_IP']['cluster_cidr']
nodes_list = basedir + '/cfg/nodes.txt'
try:
nodes_list_fh = open(nodes_list, mode="r", encoding='utf-8')
except FileNotFoundError:
os.mknod(nodes_list)
nodes_list_fh = open(nodes_list, mode="r", encoding='utf-8')
if os.path.exists(basedir + '/ansible/hosts/nodes_hosts'):
os.remove(basedir + '/ansible/hosts/nodes_hosts')
if not os.path.exists(basedir + '/ansible/hosts'):
os.makedirs(basedir + '/ansible/hosts')
nodes_ansible_hosts_data = ''
nodes_ansible_hosts_data = nodes_ansible_hosts_data + "[all:vars]" + "\n"
nodes_ansible_hosts_data = nodes_ansible_hosts_data + "ansible_ssh_port={0}".format(port) + "\n" + "\n"
nodes_ansible_hosts_data = nodes_ansible_hosts_data + "[nodes]" + "\n"
try:
for k in nodes_list_fh.readlines():
result = k.strip("\n").split(".")
first = result[1]
second = result[2]
third = result[3]
v = k.strip("\n")
nodes_ansible_hosts_data += v + " node_name=k8s-node-{0}-{1}-{2} ".format(first, second, third) + "node_ip={0} ".format(
v) + "cluster_dns={0} ".format(clusterdns) + "cluster_cidr={0}".format(clustercidr) + "\n"
except Exception as e:
print(e)
try:
location = basedir + '/ansible/hosts/nodes_hosts'
file = open(location, 'a')
resultdate = ""
resultdate = nodes_ansible_hosts_data
file.write(resultdate)
file.close()
except Exception as e:
print(e)
# start_ansible_hosts()
| 31.239437
| 132
| 0.628494
|
e04fde7f4b9f1708638975d82aabe46b633fe549
| 3,691
|
py
|
Python
|
speakInOut/autho/forms.py
|
pvgupta24/inout
|
621309cf9a2ff83a0d5aa8c4dd490daa42ed8484
|
[
"MIT"
] | null | null | null |
speakInOut/autho/forms.py
|
pvgupta24/inout
|
621309cf9a2ff83a0d5aa8c4dd490daa42ed8484
|
[
"MIT"
] | 7
|
2020-06-06T00:01:29.000Z
|
2022-02-10T11:07:34.000Z
|
speakInOut/autho/forms.py
|
pvgupta24/inout
|
621309cf9a2ff83a0d5aa8c4dd490daa42ed8484
|
[
"MIT"
] | 2
|
2020-02-11T14:44:32.000Z
|
2020-02-21T17:39:04.000Z
|
from datetime import date
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
def validate_username_available(username):
""" validator that throws an error if the given username already exists."""
if User.objects.filter(username__icontains=username).count():
raise forms.ValidationError("This email is already registered")
def validate_username_exists(username):
""" validator that throws an error if the given username doesn't exists."""
if not User.objects.filter(username__icontains=username).count():
raise forms.ValidationError("This email does not exist")
def setup_field(field, placeholder=None):
"""
This configures the given field to play nice with the bootstrap theme. Additionally, you can add
an additional argument to set a placeholder text on the field.
"""
field.widget.attrs['class'] = 'form-control'
if placeholder is not None:
field.widget.attrs['placeholder'] = placeholder
class BasicForm(forms.Form):
def disable_field(self, field):
"""
marks field as disabled
:param field: name of the field
"""
self.fields[field].widget.attrs['disabled'] = ""
def mark_error(self, field, description):
"""
Marks the given field as errous. The given description is displayed when the form it generated
:param field: name of the field
:param description: The error description
"""
self._errors[field] = self.error_class([description])
del self.cleaned_data[field]
def clear_errors(self):
self._errors = {}
class LoginForm(BasicForm):
email = forms.EmailField(max_length=50,validators=[validate_username_exists])
setup_field(email,'Enter Email here')
password = forms.CharField(max_length=50,widget=forms.PasswordInput())
setup_field(password,'Enter password here')
def clean(self):
"""
This is to make sure the password is valid for the given email.
"""
cleaned_data = super(LoginForm,self).clean()
username = cleaned_data.get('email')
password = cleaned_data.get('password')
if username and password:
user = authenticate(username=username, password=password)
if user is None:
self.mark_error('password', 'Incorrect password')
return cleaned_data
class AccountRegisterForm(BasicForm):
firstname = forms.CharField(label='First Name',max_length=50)
setup_field(firstname,'Enter first name here')
lastname = forms.CharField(label='Last Name', max_length=50)
setup_field(lastname, 'Enter last name here')
email = forms.EmailField(max_length=50, validators=[validate_username_available])
setup_field(email, 'Enter email here')
password_first = forms.CharField(label='Password', min_length=1, max_length=50, widget=forms.PasswordInput())
setup_field(password_first, "Enter password here")
password_second = forms.CharField(label='', min_length=1, max_length=50, widget=forms.PasswordInput())
setup_field(password_second, "Enter password again")
def clean(self):
"""This is to make sure both passwords fields have the same values in them. If they don't mark
them as erroneous."""
cleaned_data = super(AccountRegisterForm, self).clean()
password_first = cleaned_data.get('password_first')
password_second = cleaned_data.get('password_second')
if password_first and password_second and password_first!=password_second:
self.mark_error('password_second','Passwords do not match')
return cleaned_data
| 39.688172
| 113
| 0.700352
|
af97b87f78d6be6188dd49ac209d7c977566241e
| 1,637
|
py
|
Python
|
src/microprobe/model/__init__.py
|
rbertran/microprobe
|
232b60aad88b3541de1a962d6da924b234cd521c
|
[
"Apache-2.0"
] | 2
|
2019-11-20T18:29:02.000Z
|
2019-11-20T18:29:05.000Z
|
src/microprobe/model/__init__.py
|
rbertran/microprobe
|
232b60aad88b3541de1a962d6da924b234cd521c
|
[
"Apache-2.0"
] | null | null | null |
src/microprobe/model/__init__.py
|
rbertran/microprobe
|
232b60aad88b3541de1a962d6da924b234cd521c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""":mod:`microprobe.model` package
"""
# Futures
# Built-in modules
# Third party modules
# Own modules
from __future__ import absolute_import
from microprobe.utils.logger import get_logger
# Local modules
# Constants
LOG = get_logger(__name__)
__all__ = ["GenericModel"]
# Functions
# Classes
class GenericModel(object):
"""GenericModel Class
Base class to represent different types of models.
"""
def __init__(self, name, description):
"""
:param name:
:param description:
"""
super(GenericModel, self).__init__()
self._name = name
self._description = description
@property
def name(self):
"""Name of the model (:class:`str`)."""
return self._name
@property
def description(self):
"""Description of the model (:class:`str`)."""
return self._description
def __str__(self):
"""x.__str__() <==> str(x)"""
return "%s(%s, %s)" % (
self.__class__.__name__, self.name, self.description
)
| 22.736111
| 74
| 0.66402
|
7b86db4dbd3572486f9a0516ee817d64e96f086d
| 900
|
py
|
Python
|
examples/new_theme.py
|
jojoelfe/napari
|
b52a136dad392c091b0008c0b8d7fcc5ef460f66
|
[
"BSD-3-Clause"
] | 7
|
2018-07-03T17:35:46.000Z
|
2018-11-07T15:48:58.000Z
|
examples/new_theme.py
|
maweigert/napari
|
48cdf4d1c4bcf6f76603e90b1c0c7498e2aba6c0
|
[
"BSD-3-Clause"
] | 120
|
2018-09-04T22:05:13.000Z
|
2019-03-02T01:13:57.000Z
|
examples/new_theme.py
|
maweigert/napari
|
48cdf4d1c4bcf6f76603e90b1c0c7498e2aba6c0
|
[
"BSD-3-Clause"
] | 8
|
2018-09-04T21:48:26.000Z
|
2019-01-29T04:48:30.000Z
|
"""
New theme
=========
Displays an image and sets the theme to new custom theme.
"""
from skimage import data
import napari
from napari.utils.theme import available_themes, get_theme, register_theme
# create the viewer with an image
viewer = napari.view_image(data.astronaut(), rgb=True, name='astronaut')
# List themes
print('Originally themes', available_themes())
blue_theme = get_theme('dark', False)
blue_theme.name = "blue"
blue_theme.icon = (
'rgb(0, 255, 255)' # you can provide colors as rgb(XXX, YYY, ZZZ)
)
blue_theme.background = 28, 31, 48 # or as tuples
blue_theme.foreground = [45, 52, 71] # or as list
blue_theme.primary = '#50586c' # or as hexes
blue_theme.current = 'orange' # or as color name
register_theme('blue', blue_theme)
# List themes
print('New themes', available_themes())
# Set theme
viewer.theme = 'blue'
if __name__ == '__main__':
napari.run()
| 22.5
| 74
| 0.71
|
ec78a08a317a2190aff7c791d5928b6fab18eb7e
| 3,367
|
py
|
Python
|
tests/test_sonnendach_reference.py
|
timtroendle/possibility-for-electricity-autarky
|
a3d3c99ef90bbccd7232e2170317a259a77661d3
|
[
"MIT"
] | 11
|
2018-11-12T14:00:19.000Z
|
2021-11-16T19:00:34.000Z
|
tests/test_sonnendach_reference.py
|
timtroendle/possibility-for-electricity-autarky
|
a3d3c99ef90bbccd7232e2170317a259a77661d3
|
[
"MIT"
] | 8
|
2020-04-14T15:54:58.000Z
|
2020-10-23T12:59:59.000Z
|
tests/test_sonnendach_reference.py
|
timtroendle/possibility-for-electricity-autarky
|
a3d3c99ef90bbccd7232e2170317a259a77661d3
|
[
"MIT"
] | 4
|
2019-03-21T01:44:01.000Z
|
2021-06-20T03:16:51.000Z
|
"""Test whether our estimations are close to the ones from sonnendach.ch"""
import os
from pathlib import Path
import pytest
import rasterio
import rasterio.mask
from rasterstats import zonal_stats
import fiona
from src.technical_eligibility import Eligibility
ROOT_DIR = Path(os.path.abspath(__file__)).parent.parent
PATH_TO_CATEGORIES = ROOT_DIR / "build" / "technically-eligible-land.tif"
PATH_TO_AREAS = ROOT_DIR / "build" / "technically-eligible-area-km2.tif"
PATH_TO_ENERGY_YIELD = ROOT_DIR / "build" / "technically-eligible-electricity-yield-pv-prio-twh.tif"
PATH_TO_NUTS = ROOT_DIR / "build" / "administrative-borders-nuts.gpkg"
PATH_TO_SONNENDACH_AREA_ESTIMATE = ROOT_DIR / "data" / "automatic" / "sonnendach" /\
"total-rooftop-area-km2.txt"
PATH_TO_SONNENDACH_YIELD_ESTIMATE = ROOT_DIR / "data" / "automatic" / "sonnendach" /\
"total-yield-twh.txt"
@pytest.mark.skipif(not PATH_TO_AREAS.exists(), reason="Eligible area raster data not available.")
@pytest.mark.skipif(not PATH_TO_NUTS.exists(), reason="Switzerland shape not available.")
@pytest.mark.skipif(not PATH_TO_SONNENDACH_AREA_ESTIMATE.exists(), reason="Sonnendach area estimation not available.")
def test_switzerland_rooftop_area():
with open(PATH_TO_SONNENDACH_AREA_ESTIMATE, "r") as f_sonnendach_estimate:
sonnendach_estimate = float(f_sonnendach_estimate.readline())
with fiona.open(PATH_TO_NUTS.as_posix(), "r", layer="nuts0") as shapefile:
switzerland = [feature["geometry"] for feature in shapefile if feature["properties"]["country_code"] == "CHE"]
assert len(switzerland) == 1
with rasterio.open(PATH_TO_AREAS.as_posix()) as src:
transform = src.transform
areas = src.read(1)
with rasterio.open(PATH_TO_CATEGORIES.as_posix()) as src:
categories = src.read(1)
areas[categories != Eligibility.ROOFTOP_PV] = 0
zs = zonal_stats(switzerland, areas, affine=transform, stats="sum", nodata=-999)
our_estimate = zs[0]["sum"]
assert our_estimate == pytest.approx(sonnendach_estimate, 0.02) # 2% tolerance
@pytest.mark.skipif(not PATH_TO_ENERGY_YIELD.exists(), reason="Eligible energy yield raster data not available.")
@pytest.mark.skipif(not PATH_TO_NUTS.exists(), reason="Switzerland shape not available.")
@pytest.mark.skipif(
not PATH_TO_SONNENDACH_YIELD_ESTIMATE.exists(),
reason="Sonnendach yield estimation not available.")
def test_switzerland_energy_yield():
with open(PATH_TO_SONNENDACH_YIELD_ESTIMATE, "r") as f_sonnendach_estimate:
sonnendach_estimate = float(f_sonnendach_estimate.readline())
with fiona.open(PATH_TO_NUTS.as_posix(), "r", layer="nuts0") as shapefile:
switzerland = [feature["geometry"] for feature in shapefile if feature["properties"]["country_code"] == "CHE"]
assert len(switzerland) == 1
with rasterio.open(PATH_TO_ENERGY_YIELD.as_posix()) as src:
transform = src.transform
energy_yield = src.read(1)
with rasterio.open(PATH_TO_CATEGORIES.as_posix()) as src:
categories = src.read(1)
energy_yield[categories != Eligibility.ROOFTOP_PV] = 0
zs = zonal_stats(switzerland, energy_yield, affine=transform, stats="sum", nodata=-999)
our_estimate = zs[0]["sum"]
assert our_estimate <= sonnendach_estimate
assert our_estimate == pytest.approx(sonnendach_estimate, 0.10) # 10% tolerance
| 51.8
| 118
| 0.743689
|
48eea26ba1f209704c1d838bee10e952b77ebf98
| 2,577
|
py
|
Python
|
dims.py
|
vkopey/Thread-turning-simulator
|
9622d6294ddec56bb5e48db2a7b2ff52f2399467
|
[
"MIT"
] | null | null | null |
dims.py
|
vkopey/Thread-turning-simulator
|
9622d6294ddec56bb5e48db2a7b2ff52f2399467
|
[
"MIT"
] | 1
|
2018-06-28T10:46:44.000Z
|
2019-05-20T11:17:14.000Z
|
dims.py
|
vkopey/Thread-turning-simulator
|
9622d6294ddec56bb5e48db2a7b2ff52f2399467
|
[
"MIT"
] | 1
|
2019-07-16T18:32:51.000Z
|
2019-07-16T18:32:51.000Z
|
# -*- coding: utf-8 -*-
from math import atan, degrees, tan
class Dim:
"Клас описує поняття розміру"
n=0.0 #номінальний розмір
ei=0.0 #нижнє відхилення
es=0.0 #верхнє відхилення
v=0.0 #дійсне значення
def __init__(self,n,ei,es,doc):
"конструктор"
self.n=n
self.ei=ei
self.es=es
self.__doc__=doc.decode('utf-8')
def min(self):
"повертає мінімальний розмір"
return self.n+self.ei
def max(self):
"повертає максимальний розмір"
return self.n+self.es
zn80={'D':Dim(80,-0.5,0.5,"зовнішній діаметр труби ніпеля"),
'D1':Dim(76.5,-0.5,0.5,"зовнішній діаметр упорного торця"),
'd3':Dim(25.0,-0.6,0.6,"внутрішній діаметр ніпеля"),
'd4':Dim(36.0,-0.6,0.6,"внутрішній діаметр муфти"),
'L2':Dim(240.0,0.0,0.0,"довжина муфти*"),
'dsr':Dim(60.080,0.0,0.0,"середній діаметр різьби в основній площині"),
'd5':Dim(66.674,0.0,0.0,"діаметр більшої основи конуса ніпеля*"),
'd6':Dim(47.674,0.0,0.0,"діаметр меншої основи конуса ніпеля*"),
'l3':Dim(76.0,-2.0,0,"довжина конуса ніпеля"),
'd7':Dim(68.3,-0.6,0.6,"діаметр конічної виточки в площині торця муфти"),
'd8':Dim(61.422,0.0,0.0,"внутрішній діаметр різьби в площині торця муфти*"),
'l4':Dim(82.0,0.0,0.0,"відстань від торця до кінця різьби з повним профілем муфти (не менше)"),
'P':Dim(5.080,0.0,0.0,"крок різьби паралельно осі різьби"),
'fi':Dim(atan(0.25/2),0.0,0.0,"кут нахилу (рад.)"),
'H':Dim(4.376,0.0,0.0,"висота гострокутного профілю"),
'h1':Dim(2.993,0.0,0.0,"висота профілю різьби"),
'h':Dim(2.626,0.0,0.0,"робоча висота профілю"),
'l':Dim(0.875,0.0,0.0,"висота зрізу вершин"),
'f':Dim(0.508,0.0,0.0,"відтин впадини"),
'a':Dim(1.016,0.0,0.0,"площадка*"),
'r':Dim(0.508,0.0,0.0,"радіус заокруглень впадин*"),
'r_':Dim(0.38,0.0,0.0,"радіус спряжень (не більше)"),
'lsr':Dim(15.875,0.0,0.0,"відстань від торця муфти/ніпеля до основної площини")}
class ZN: pass
d=ZN()
for key,value in zn80.iteritems():
setattr(d,key,value.n)
# допоміжні параметри:
h=tan(d.fi)*(d.l4-d.lsr)
d._r=d.dsr/2-h # радіус середнього діаметра в площині l4 (менший радіус конуса муфти)
x1,y1 = -d.H/2, 0 # вектор переміщення в 0,0 сер діам різця ніпеля
x2,y2 = -d.H/2+tan(d.fi)*d.P/2, d.P/2 # -//- муфти
d._v1 = x1+d._r, y1+d.l3-d.l4, 0 # поч положення різця ніпеля
d._v2 = x2+d._r, y2+d.l3-d.l4, 0 # поч положення різця муфти
if __name__=='__main__':
print d.d3
#print d.d3.__doc__
| 39.646154
| 100
| 0.611176
|
ddec13a88098a0f7c5f31472189a870dfa84115a
| 32,262
|
py
|
Python
|
safe_relay_service/relay/tasks.py
|
CirclesUBI/safe-relay-service
|
e6844e2b92316ddc099d5b39711487a6e46d5a93
|
[
"MIT"
] | 2
|
2020-10-19T09:59:11.000Z
|
2021-02-04T12:26:12.000Z
|
safe_relay_service/relay/tasks.py
|
CirclesUBI/safe-relay-service
|
e6844e2b92316ddc099d5b39711487a6e46d5a93
|
[
"MIT"
] | 24
|
2019-12-11T14:43:38.000Z
|
2022-03-01T12:37:24.000Z
|
safe_relay_service/relay/tasks.py
|
CirclesUBI/safe-relay-service
|
e6844e2b92316ddc099d5b39711487a6e46d5a93
|
[
"MIT"
] | null | null | null |
from datetime import timedelta
from typing import List
from django.conf import settings
from django.utils import timezone
from celery import app
from celery.utils.log import get_task_logger
from ethereum.utils import check_checksum, checksum_encode, mk_contract_address
from redis.exceptions import LockError
from gnosis.eth import EthereumClientProvider, TransactionAlreadyImported
from gnosis.eth.constants import NULL_ADDRESS
from safe_relay_service.gas_station.gas_station import GasStationProvider
from .models import (SafeContract, SafeCreation, SafeCreation2, SafeFunding,
SafeMultisigTx)
from .repositories.redis_repository import RedisRepository
from .services import (Erc20EventsServiceProvider, FundingServiceProvider,
NotificationServiceProvider,
SafeCreationServiceProvider, TransactionServiceProvider,
CirclesService, GraphQLService)
from .services.safe_creation_service import NotEnoughFundingForCreation
logger = get_task_logger(__name__)
# Lock timeout of 2 minutes (just in the case that the application hangs to avoid a redis deadlock)
LOCK_TIMEOUT = 60 * 2
@app.shared_task(bind=True, max_retries=3, soft_time_limit=LOCK_TIMEOUT)
def fund_deployer_task(self, safe_address: str, retry: bool = True) -> None:
"""
Check if user has sent enough ether or tokens to the safe account
If every condition is met ether is sent to the deployer address and `check_deployer_funded_task`
is called to check that that tx is mined
If everything goes well in SafeFunding `safe_funded=True` and `deployer_funded_tx_hash=tx_hash` are set
:param safe_address: safe account
:param retry: if True, retries are allowed, otherwise don't retry
"""
safe_contract = SafeContract.objects.get(address=safe_address)
try:
safe_creation = SafeCreation.objects.get(safe=safe_address)
except SafeCreation.DoesNotExist:
deploy_create2_safe_task.delay(safe_address)
return
deployer_address = safe_creation.deployer
payment = safe_creation.payment
# These asserts just to make sure we are not wasting money
assert check_checksum(safe_address)
assert check_checksum(deployer_address)
assert checksum_encode(mk_contract_address(sender=deployer_address, nonce=0)) == safe_address
assert payment > 0
redis = RedisRepository().redis
with redis.lock('locks:fund_deployer_task', timeout=LOCK_TIMEOUT):
ethereum_client = EthereumClientProvider()
safe_funding, _ = SafeFunding.objects.get_or_create(safe=safe_contract)
# Nothing to do if everything is funded and mined
if safe_funding.is_all_funded():
logger.debug('Nothing to do here for safe %s. Is all funded', safe_address)
return
# If receipt exists already, let's check
if safe_funding.deployer_funded_tx_hash and not safe_funding.deployer_funded:
logger.debug('Safe %s deployer has already been funded. Checking tx_hash %s',
safe_address,
safe_funding.deployer_funded_tx_hash)
check_deployer_funded_task.delay(safe_address)
elif not safe_funding.deployer_funded:
confirmations = settings.SAFE_FUNDING_CONFIRMATIONS
last_block_number = ethereum_client.current_block_number
assert (last_block_number - confirmations) > 0
if safe_creation.payment_token and safe_creation.payment_token != NULL_ADDRESS:
safe_balance = ethereum_client.erc20.get_balance(safe_address, safe_creation.payment_token)
else:
safe_balance = ethereum_client.get_balance(safe_address, last_block_number - confirmations)
if safe_balance >= payment:
logger.info('Found %d balance for safe=%s', safe_balance, safe_address)
safe_funding.safe_funded = True
safe_funding.save()
# Check deployer has no eth. This should never happen
balance = ethereum_client.get_balance(deployer_address)
if balance:
logger.error('Deployer=%s for safe=%s has eth already (%d wei)',
deployer_address, safe_address, balance)
else:
logger.info('Safe=%s. Transferring deployment-cost=%d to deployer=%s',
safe_address, safe_creation.wei_deploy_cost(), deployer_address)
tx_hash = FundingServiceProvider().send_eth_to(deployer_address,
safe_creation.wei_deploy_cost(),
retry=True)
if tx_hash:
tx_hash = tx_hash.hex()
logger.info('Safe=%s. Transferred deployment-cost=%d to deployer=%s with tx-hash=%s',
safe_address, safe_creation.wei_deploy_cost(), deployer_address, tx_hash)
safe_funding.deployer_funded_tx_hash = tx_hash
safe_funding.save()
logger.debug('Safe=%s deployer has just been funded. tx_hash=%s', safe_address, tx_hash)
check_deployer_funded_task.apply_async((safe_address,), countdown=20)
else:
logger.error('Cannot send payment=%d to deployer safe=%s', payment, deployer_address)
if retry:
raise self.retry(countdown=30)
else:
logger.info('Not found required balance=%d for safe=%s', payment, safe_address)
if retry:
raise self.retry(countdown=30)
@app.shared_task(bind=True,
soft_time_limit=LOCK_TIMEOUT,
max_retries=settings.SAFE_CHECK_DEPLOYER_FUNDED_RETRIES,
default_retry_delay=settings.SAFE_CHECK_DEPLOYER_FUNDED_DELAY)
def check_deployer_funded_task(self, safe_address: str, retry: bool = True) -> None:
"""
Check the `deployer_funded_tx_hash`. If receipt can be retrieved, in SafeFunding `deployer_funded=True`.
If not, after the number of retries `deployer_funded_tx_hash=None`
:param safe_address: safe account
:param retry: if True, retries are allowed, otherwise don't retry
"""
try:
redis = RedisRepository().redis
with redis.lock(f"tasks:check_deployer_funded_task:{safe_address}", blocking_timeout=1, timeout=LOCK_TIMEOUT):
ethereum_client = EthereumClientProvider()
logger.debug('Starting check deployer funded task for safe=%s', safe_address)
safe_funding = SafeFunding.objects.get(safe=safe_address)
deployer_funded_tx_hash = safe_funding.deployer_funded_tx_hash
if safe_funding.deployer_funded:
logger.warning('Tx-hash=%s for safe %s is already checked', deployer_funded_tx_hash, safe_address)
return
elif not deployer_funded_tx_hash:
logger.error('No deployer_funded_tx_hash for safe=%s', safe_address)
return
logger.debug('Checking safe=%s deployer tx-hash=%s', safe_address, deployer_funded_tx_hash)
if ethereum_client.get_transaction_receipt(deployer_funded_tx_hash):
logger.info('Found transaction to deployer of safe=%s with receipt=%s', safe_address,
deployer_funded_tx_hash)
safe_funding.deployer_funded = True
safe_funding.save()
else:
logger.debug('Not found transaction receipt for tx-hash=%s', deployer_funded_tx_hash)
# If no more retries
if not retry or (self.request.retries == self.max_retries):
safe_creation = SafeCreation.objects.get(safe=safe_address)
balance = ethereum_client.get_balance(safe_creation.deployer)
if balance >= safe_creation.wei_deploy_cost():
logger.warning('Safe=%s. Deployer=%s. Cannot find transaction receipt with tx-hash=%s, '
'but balance is there. This should never happen',
safe_address, safe_creation.deployer, deployer_funded_tx_hash)
safe_funding.deployer_funded = True
safe_funding.save()
else:
logger.error('Safe=%s. Deployer=%s. Transaction receipt with tx-hash=%s not mined after %d '
'retries. Setting `deployer_funded_tx_hash` back to `None`',
safe_address,
safe_creation.deployer,
deployer_funded_tx_hash,
self.request.retries)
safe_funding.deployer_funded_tx_hash = None
safe_funding.save()
else:
logger.debug('Retry finding transaction receipt %s', deployer_funded_tx_hash)
if retry:
raise self.retry(countdown=self.request.retries * 10 + 15) # More countdown every retry
except LockError:
logger.info('check_deployer_funded_task is locked for safe=%s', safe_address)
@app.shared_task(soft_time_limit=LOCK_TIMEOUT)
def deploy_safes_task(retry: bool = True) -> None:
"""
Deploy pending safes (deployer funded and tx-hash checked). Then raw creation tx is sent to the ethereum network.
If something goes wrong (maybe a reorg), `deployer_funded` will be set False again and `check_deployer_funded_task`
is called again.
:param retry: if True, retries are allowed, otherwise don't retry
"""
try:
redis = RedisRepository().redis
with redis.lock("tasks:deploy_safes_task", blocking_timeout=1, timeout=LOCK_TIMEOUT):
ethereum_client = EthereumClientProvider()
logger.debug('Starting deploy safes task')
pending_to_deploy = SafeFunding.objects.pending_just_to_deploy()
logger.debug('%d safes pending to deploy', len(pending_to_deploy))
for safe_funding in pending_to_deploy:
safe_contract = safe_funding.safe
safe_address = safe_contract.address
safe_creation = SafeCreation.objects.get(safe=safe_contract)
safe_deployed_tx_hash = safe_funding.safe_deployed_tx_hash
if not safe_deployed_tx_hash:
# Deploy the Safe
try:
creation_tx_hash = ethereum_client.send_raw_transaction(safe_creation.signed_tx)
if creation_tx_hash:
creation_tx_hash = creation_tx_hash.hex()
logger.info('Safe=%s creation tx has just been sent to the network with tx-hash=%s',
safe_address, creation_tx_hash)
safe_funding.safe_deployed_tx_hash = creation_tx_hash
safe_funding.save()
except TransactionAlreadyImported:
logger.warning("Safe=%s transaction was already imported by the node", safe_address)
safe_funding.safe_deployed_tx_hash = safe_creation.tx_hash
safe_funding.save()
except ValueError:
# Usually "ValueError: {'code': -32000, 'message': 'insufficient funds for gas*price+value'}"
# A reorg happened
logger.warning("Safe=%s was affected by reorg, let's check again receipt for tx-hash=%s",
safe_address, safe_funding.deployer_funded_tx_hash, exc_info=True)
safe_funding.deployer_funded = False
safe_funding.save()
check_deployer_funded_task.apply_async((safe_address,), {'retry': retry}, countdown=20)
else:
# Check if safe proxy deploy transaction has already been sent to the network
logger.debug('Safe=%s creation tx has already been sent to the network with tx-hash=%s',
safe_address, safe_deployed_tx_hash)
if ethereum_client.check_tx_with_confirmations(safe_deployed_tx_hash,
settings.SAFE_FUNDING_CONFIRMATIONS):
logger.info('Safe=%s was deployed', safe_funding.safe.address)
safe_funding.safe_deployed = True
safe_funding.save()
# Send creation notification
send_create_notification.delay(safe_address, safe_creation.owners)
elif (safe_funding.modified + timedelta(minutes=10) < timezone.now()
and not ethereum_client.get_transaction_receipt(safe_deployed_tx_hash)):
# A reorg happened
logger.warning('Safe=%s deploy tx=%s was not found after 10 minutes. Trying deploying again...',
safe_funding.safe.address, safe_deployed_tx_hash)
safe_funding.safe_deployed_tx_hash = None
safe_funding.save()
except LockError:
pass
@app.shared_task(bind=True, soft_time_limit=LOCK_TIMEOUT, max_retries=3)
def deploy_create2_safe_task(self, safe_address: str, retry: bool = True) -> None:
"""
Check if user has sent enough ether or tokens to the safe account
If every condition is met safe is deployed
:param safe_address: safe account
:param retry: if True, retries are allowed, otherwise don't retry
"""
assert check_checksum(safe_address)
redis = RedisRepository().redis
lock_name = f'locks:deploy_create2_safe:{safe_address}'
try:
with redis.lock(lock_name, blocking_timeout=1, timeout=LOCK_TIMEOUT):
try:
SafeCreationServiceProvider().deploy_create2_safe_tx(safe_address)
except NotEnoughFundingForCreation:
if retry:
raise self.retry(countdown=30)
except LockError:
logger.warning('Cannot get lock={} for deploying safe={}'.format(lock_name, safe_address))
@app.shared_task(soft_time_limit=LOCK_TIMEOUT)
def check_create2_deployed_safes_task() -> None:
"""
Check if create2 safes were deployed and store the `blockNumber` if there are enough confirmations
"""
try:
redis = RedisRepository().redis
with redis.lock('tasks:check_create2_deployed_safes_task', blocking_timeout=1, timeout=LOCK_TIMEOUT):
ethereum_client = EthereumClientProvider()
confirmations = 6
current_block_number = ethereum_client.current_block_number
for safe_creation2 in SafeCreation2.objects.pending_to_check():
safe_address = safe_creation2.safe_id
ethereum_tx = TransactionServiceProvider().create_or_update_ethereum_tx(safe_creation2.tx_hash)
if ethereum_tx and ethereum_tx.block_id is not None:
block_number = ethereum_tx.block_id
if (current_block_number - block_number) >= confirmations:
logger.info('Safe=%s with tx-hash=%s was confirmed in block-number=%d',
safe_address, safe_creation2.tx_hash, block_number)
safe_creation2.block_number = block_number
safe_creation2.save(update_fields=['block_number'])
else:
# If safe was not included in any block after 30 minutes (mempool limit is 30 minutes)
# try to increase a little the gas price
if safe_creation2.modified + timedelta(minutes=30) < timezone.now():
logger.warning('Safe=%s with tx-hash=%s was not deployed after 30 minutes. '
'Increasing the gas price', safe_address, safe_creation2.tx_hash)
safe_creation2 = SafeCreationServiceProvider().deploy_again_create2_safe_tx(safe_address)
logger.warning('Safe=%s has a new tx-hash=%s with increased gas price.', safe_address,
safe_creation2.tx_hash)
for safe_creation2 in SafeCreation2.objects.not_deployed().filter(
created__gte=timezone.now() - timedelta(days=10)):
deploy_create2_safe_task.delay(safe_creation2.safe.address, retry=False)
except LockError:
pass
@app.shared_task(soft_time_limit=300)
def send_create_notification(safe_address: str, owners: List[str]) -> None:
"""
Send create notification to owner
:param safe_address: Address of the safe created
:param owners: List of owners of the safe
"""
logger.info('Safe=%s creation ended, sending notification to %s', safe_address, owners)
return NotificationServiceProvider().send_create_notification(safe_address, owners)
@app.shared_task(soft_time_limit=300)
def check_balance_of_accounts_task() -> bool:
"""
Checks if balance of relayer accounts (tx sender, safe funder) are less than the configured threshold
:return: True if every account have enough ether, False otherwise
"""
balance_warning_wei = settings.SAFE_ACCOUNTS_BALANCE_WARNING
addresses = FundingServiceProvider().funder_account.address, TransactionServiceProvider().tx_sender_account.address
ethereum_client = EthereumClientProvider()
result = True
for address in addresses:
balance_wei = ethereum_client.get_balance(address)
if balance_wei <= balance_warning_wei:
logger.error('Relayer account=%s current balance=%d . Balance must be greater than %d',
address, balance_wei, balance_warning_wei)
result = False
return result
@app.shared_task(soft_time_limit=60 * 30)
def find_erc_20_721_transfers_task() -> int:
"""
Find and process internal txs for existing safes
:return: Number of safes processed
"""
number_safes = 0
try:
redis = RedisRepository().redis
with redis.lock('tasks:find_internal_txs_task', blocking_timeout=1, timeout=60 * 30):
number_safes = Erc20EventsServiceProvider().process_all()
logger.info('Find ERC20/721 task processed %d safes', number_safes)
except LockError:
pass
return number_safes
@app.shared_task(soft_time_limit=60)
def check_pending_transactions() -> int:
"""
Find txs that have not been mined after a while and resend again
:return: Number of pending transactions
"""
number_txs = 0
try:
redis = RedisRepository().redis
with redis.lock('tasks:check_pending_transactions', blocking_timeout=1, timeout=60):
tx_not_mined_alert = settings.SAFE_TX_NOT_MINED_ALERT_MINUTES
multisig_txs = SafeMultisigTx.objects.pending(
older_than=tx_not_mined_alert * 60
).select_related(
'ethereum_tx'
)
for multisig_tx in multisig_txs:
gas_price = GasStationProvider().get_gas_prices().fast
old_fee = multisig_tx.ethereum_tx.fee
ethereum_tx = TransactionServiceProvider().resend(gas_price, multisig_tx)
if ethereum_tx:
logger.error('Safe=%s - Tx with tx-hash=%s and safe-tx-hash=%s has not been mined after '
'a while, created=%s. Sent again with tx-hash=%s. Old fee=%d and new fee=%d',
multisig_tx.safe_id, multisig_tx.ethereum_tx_id,
multisig_tx.safe_tx_hash, multisig_tx.created, ethereum_tx.tx_hash,
old_fee, ethereum_tx.fee)
else:
logger.error('Safe=%s - Tx with tx-hash=%s and safe-tx-hash=%s has not been mined after '
'a while, created=%s',
multisig_tx.safe_id, multisig_tx.ethereum_tx_id,
multisig_tx.safe_tx_hash, multisig_tx.created)
number_txs += 1
except LockError:
pass
return number_txs
@app.shared_task(soft_time_limit=60)
def check_and_update_pending_transactions() -> int:
"""
Check if pending txs have been mined and update them
:return: Number of pending transactions
"""
number_txs = 0
try:
redis = RedisRepository().redis
with redis.lock('tasks:check_and_update_pending_transactions', blocking_timeout=1, timeout=60):
transaction_service = TransactionServiceProvider()
multisig_txs = SafeMultisigTx.objects.pending(older_than=150).select_related('ethereum_tx')
for multisig_tx in multisig_txs:
ethereum_tx = transaction_service.create_or_update_ethereum_tx(multisig_tx.ethereum_tx_id)
if ethereum_tx and ethereum_tx.block_id:
if ethereum_tx.success:
logger.info('Safe=%s - Tx with tx-hash=%s was mined on block=%d ',
multisig_tx.safe_id, ethereum_tx.tx_hash, ethereum_tx.block_id)
else:
logger.error('Safe=%s - Tx with tx-hash=%s was mined on block=%d and failed',
multisig_tx.safe_id, ethereum_tx.tx_hash, ethereum_tx.block_id)
number_txs += 1
except LockError:
pass
return number_txs
@app.shared_task(bind=True, soft_time_limit=LOCK_TIMEOUT, max_retries=6)
def begin_circles_onboarding_task(self, safe_address: str) -> None:
"""
Starts a multi-step onboarding task for Circles users which 1. funds
deploys a Gnosis Safe for them 2. funds the deployment of their Token.
:param safe_address: Address of the safe to-be-created
"""
assert check_checksum(safe_address)
redis = RedisRepository().redis
lock_name = f'locks:begin_circles_onboarding_task:{safe_address}'
try:
with redis.lock(lock_name, blocking_timeout=1, timeout=LOCK_TIMEOUT):
ethereum_client = EthereumClientProvider()
# Do nothing if Token is already deployed
if CirclesService(ethereum_client).is_token_deployed(safe_address):
logger.info('Token is already deployed for {}'.format(safe_address))
return
logger.info('No token found, start onboarding for Circles Safe {}'.format(safe_address))
# Deploy Safe when it does not exist yet
safe_creation2 = SafeCreation2.objects.get(safe=safe_address)
if not safe_creation2.tx_hash:
logger.info('Safe does not exist yet, start deploying it {}'.format(safe_address))
circles_onboarding_safe_task.delay(safe_address)
else:
logger.info('Safe exists, we are done with safe {}'.format(safe_address))
except LockError:
pass
@app.shared_task(bind=True, soft_time_limit=LOCK_TIMEOUT, max_retries=3)
def circles_onboarding_safe_task(self, safe_address: str) -> None:
"""
Check if create2 Safe has enough incoming trust connections to fund and
deploy it
:param safe_address: Address of the safe to-be-created
"""
assert check_checksum(safe_address)
try:
redis = RedisRepository().redis
lock_name = f'locks:circles_onboarding_safe_task:{safe_address}'
with redis.lock(lock_name, blocking_timeout=1, timeout=LOCK_TIMEOUT):
logger.info('Check deploying Safe .. {}'.format(safe_address))
try:
SafeCreationServiceProvider().deploy_create2_safe_tx(safe_address)
except SafeCreation2.DoesNotExist:
pass
except NotEnoughFundingForCreation:
logger.info('Safe does not have enough fund for deployment, '
'check trust connections {}'.format(safe_address))
# If we have enough trust connections, fund safe
if GraphQLService().check_trust_connections(safe_address):
logger.info('Fund Safe deployment for {}'.format(safe_address))
ethereum_client = EthereumClientProvider()
safe_creation = SafeCreation2.objects.get(safe=safe_address)
# Estimate costs of safe creation
safe_deploy_cost = safe_creation.wei_estimated_deploy_cost()
logger.info('Estimating %d for safe creation', safe_deploy_cost)
# Estimate costs of token creation
transaction_service = TransactionServiceProvider()
token_deploy_cost = transaction_service.estimate_circles_signup_tx(safe_address)
logger.info('Estimating %d for token deployment', token_deploy_cost)
# Find total onboarding costs
payment = safe_deploy_cost + token_deploy_cost
# Get current safe balance
safe_balance = ethereum_client.get_balance(safe_address)
logger.info('Found %d balance for token deployment of safe=%s. Required=%d',
safe_balance, safe_address, payment)
if safe_balance >= payment:
logger.info('Onboarding is already funded {}'.format(safe_address))
return
FundingServiceProvider().send_eth_to(safe_address,
payment,
gas=24000)
# Retry later to check for enough funding and successful deployment
raise self.retry(countdown=30)
else:
logger.info('Not enough trust connections for funding deployment {}'.format(safe_address))
except LockError:
pass
@app.shared_task(bind=True, soft_time_limit=LOCK_TIMEOUT, max_retries=6)
def begin_circles_onboarding_organization_task(self, safe_address: str, owner_address: str) -> None:
"""
Starts a multi-step onboarding task for Circles organizations which 1. funds
deploys a Gnosis Safe for them 2. funds the deployment of their Organization.
:param safe_address: Address of the safe to-be-created
:param owner_address: Address of the first safe owner
"""
assert check_checksum(safe_address)
assert check_checksum(owner_address)
redis = RedisRepository().redis
lock_name = f'locks:begin_circles_onboarding_organization_task:{safe_address}'
try:
with redis.lock(lock_name, blocking_timeout=1, timeout=LOCK_TIMEOUT):
logger.info('Start onboarding for Circles Organization Safe {}'.format(safe_address))
# Deploy Safe when it does not exist yet
safe_creation2 = SafeCreation2.objects.get(safe=safe_address)
if not safe_creation2.tx_hash:
logger.info('Safe does not exist yet, start deploying it {}'.format(safe_address))
circles_onboarding_organization_safe_task.delay(safe_address, owner_address)
# Retry later to check for signup funding
raise self.retry(countdown=30)
else:
logger.info('Safe exists, start funding organizationSignup for {}'.format(safe_address))
# Fund deployment when Organization does not exist yet
circles_onboarding_organization_signup_task.delay(safe_address)
except LockError:
pass
@app.shared_task(soft_time_limit=LOCK_TIMEOUT, max_retries=3)
def circles_onboarding_organization_safe_task(safe_address: str, owner_address: str) -> None:
"""
Check if create2 Safe is being created by a trusted user
:param safe_address: Address of the safe to-be-created
:param owner_address: Address of the first safe owner
"""
assert check_checksum(safe_address)
assert check_checksum(owner_address)
try:
redis = RedisRepository().redis
lock_name = f'locks:circles_onboarding_organization_safe_task:{safe_address}'
with redis.lock(lock_name, blocking_timeout=1, timeout=LOCK_TIMEOUT):
logger.info('Check deploying Safe for organization .. {}'.format(safe_address))
try:
SafeCreationServiceProvider().deploy_create2_safe_tx(safe_address)
except SafeCreation2.DoesNotExist:
pass
except NotEnoughFundingForCreation:
logger.info('Safe does not have enough fund for deployment, '
'check owner {}'.format(owner_address))
# If we have enough trust connections, fund safe
if GraphQLService().check_trust_connections_by_user(owner_address):
logger.info('Fund Safe deployment for {}'.format(safe_address))
safe_creation = SafeCreation2.objects.get(safe=safe_address)
safe_deploy_cost = safe_creation.wei_estimated_deploy_cost()
FundingServiceProvider().send_eth_to(safe_address,
safe_deploy_cost,
gas=24000)
else:
logger.info('Owner {} does not have a deployed safe'.format(owner_address))
except LockError:
pass
@app.shared_task(soft_time_limit=LOCK_TIMEOUT)
def circles_onboarding_organization_signup_task(safe_address: str) -> None:
"""
Check if Organization Safe is already registered in the Hub, if not, fund it
:param safe_address: Address of the created safe
"""
assert check_checksum(safe_address)
# Additional funds for organization deployments (it should at least cover
# one `trust` method call) next to the `organizationSignup` method
ADDITIONAL_START_FUNDS = 100000000000000
try:
redis = RedisRepository().redis
lock_name = f'locks:circles_onboarding_organization_signup_task:{safe_address}'
with redis.lock(lock_name, blocking_timeout=1, timeout=LOCK_TIMEOUT):
logger.info('Fund organizationSignup task for {}'.format(safe_address))
ethereum_client = EthereumClientProvider()
# Do nothing if account already exists in Hub
if CirclesService(ethereum_client).is_organization_deployed(safe_address):
logger.info('Organization is already deployed for {}'.format(safe_address))
return
# Do nothing if the signup is already funded
transaction_service = TransactionServiceProvider()
# Sum `organizationSignup` and additional `trust` transactions
# costs as the organization needs to trust at least one user in the
# beginning to receive more funds
payment = transaction_service.estimate_circles_organization_signup_tx(safe_address) + ADDITIONAL_START_FUNDS
safe_balance = ethereum_client.get_balance(safe_address)
logger.info('Found %d balance for organization deployment of safe=%s. Required=%d',
safe_balance, safe_address, payment)
if safe_balance >= payment:
logger.info('Organization is already funded {}'.format(safe_address))
return
# Otherwise fund deployment
logger.info('Fund Organization {}'.format(safe_address))
FundingServiceProvider().send_eth_to(
safe_address,
payment - safe_balance,
gas=30000,
retry=True
)
except LockError:
pass
| 51.209524
| 120
| 0.632044
|
7ff83a28c7570b23d121fa730b8ac7d7d99015bd
| 5,291
|
py
|
Python
|
tests/test_django_models.py
|
rubickcz/django-choice-enumfields
|
1b11115eb0631c156a788ce9b1b207f672b9a0e9
|
[
"MIT"
] | null | null | null |
tests/test_django_models.py
|
rubickcz/django-choice-enumfields
|
1b11115eb0631c156a788ce9b1b207f672b9a0e9
|
[
"MIT"
] | null | null | null |
tests/test_django_models.py
|
rubickcz/django-choice-enumfields
|
1b11115eb0631c156a788ce9b1b207f672b9a0e9
|
[
"MIT"
] | null | null | null |
# -- encoding: UTF-8 --
from django.core.exceptions import ValidationError
from django.db import connection
import pytest
from .enums import Color, IntegerEnum, LabeledEnum, StateFlow, StateFlowAnyFirst, SubIntegerEnum, Taste, ZeroEnum
from .models import MyModel
@pytest.mark.django_db
def test_field_value():
m = MyModel(color=Color.RED)
m.save()
assert m.color == Color.RED
m = MyModel.objects.filter(color=Color.RED)[0]
assert m.color == Color.RED
# Passing the value should work the same way as passing the enum
assert Color.RED.value == 'r'
m = MyModel.objects.filter(color='r')[0]
assert m.color == Color.RED
with pytest.raises(ValueError):
MyModel.objects.filter(color='xx')[0]
@pytest.mark.django_db
def test_db_value():
m = MyModel(color=Color.RED)
m.save()
cursor = connection.cursor()
cursor.execute('SELECT color FROM %s WHERE id = %%s' % MyModel._meta.db_table, [m.pk])
assert cursor.fetchone()[0] == Color.RED.value
@pytest.mark.django_db
def test_enum_int_field_validators():
if not hasattr(connection.ops, 'integer_field_range'):
return pytest.skip('Needs connection.ops.integer_field_range')
# Make sure that integer_field_range returns a range.
# This is needed to make SQLite emulate a "real" db
orig_method = connection.ops.integer_field_range
connection.ops.integer_field_range = (lambda *args: (-100, 100))
m = MyModel(color=Color.RED)
# Uncache validators property of taste_int
for f in m._meta.fields:
if f.name == 'taste_int':
if 'validators' in f.__dict__:
del f.__dict__['validators']
# Run the validators
m.full_clean()
# Revert integer_field_range method
connection.ops.integer_field_range = orig_method
@pytest.mark.django_db
def test_zero_enum_loads():
# Verifies that we can save and load enums with the value of 0 (zero).
m = MyModel(zero_field=ZeroEnum.ZERO,
color=Color.GREEN)
m.save()
assert m.zero_field == ZeroEnum.ZERO
m = MyModel.objects.get(id=m.id)
assert m.zero_field == ZeroEnum.ZERO
@pytest.mark.django_db
def test_int_enum():
m = MyModel(int_enum=IntegerEnum.A, color=Color.RED)
m.save()
m = MyModel.objects.get(id=m.id)
assert m.int_enum == IntegerEnum.A
assert isinstance(m.int_enum, IntegerEnum)
def test_serialization():
from django.core.serializers.python import Serializer as PythonSerializer
m = MyModel(color=Color.RED, taste=Taste.SALTY)
ser = PythonSerializer()
ser.serialize([m])
fields = ser.getvalue()[0]["fields"]
assert fields["color"] == m.color.value
assert fields["taste"] == m.taste.value
@pytest.mark.django_db
def test_nonunique_label():
obj = MyModel.objects.create(
color=Color.BLUE,
labeled_enum=LabeledEnum.FOOBAR
)
assert obj.labeled_enum is LabeledEnum.FOOBAR
obj = MyModel.objects.get(pk=obj.pk)
assert obj.labeled_enum is LabeledEnum.FOOBAR
def test_sub_enum_field():
with pytest.raises(ValidationError):
MyModel(color=Color.RED, int_enum=IntegerEnum.A, sub_int_enum=SubIntegerEnum.D).full_clean()
MyModel(color=Color.RED, int_enum=IntegerEnum.C).full_clean()
MyModel(color=Color.RED, int_enum=IntegerEnum.A, sub_int_enum=SubIntegerEnum.C).full_clean()
MyModel(color=Color.RED, int_enum=IntegerEnum.B, sub_int_enum=SubIntegerEnum.C).full_clean()
MyModel(color=Color.RED, int_enum=IntegerEnum.B, sub_int_enum=SubIntegerEnum.D).full_clean()
MyModel(color=Color.RED).full_clean()
@pytest.mark.django_db
def test_next_states_enum_field():
model = MyModel.objects.create(color=Color.RED)
with pytest.raises(ValidationError):
# invalid transition from START to END
model.any_first_state = StateFlowAnyFirst.END
model.full_clean()
model.any_first_state = StateFlowAnyFirst.PROCESSING
model.full_clean()
# does not update initial value of any_first_state field
model.save(update_fields=['color'])
with pytest.raises(ValidationError):
# invalid transition from START to END
model.any_first_state = StateFlowAnyFirst.END
model.full_clean()
# initial values of fields during save are updated
model.any_first_state = StateFlowAnyFirst.PROCESSING
model.save()
model.any_first_state = StateFlowAnyFirst.END
model.full_clean()
model.state = StateFlow.PROCESSING
model.save(update_fields=['state'])
assert model.state is StateFlow.PROCESSING
# field values are updated correctly from model loaded from db
model_from_db = MyModel.objects.get(pk=model.pk)
model_from_db.any_first_state = StateFlowAnyFirst.END
model_from_db.full_clean()
with pytest.raises(ValidationError):
# invalid transition from PROCESSING to START
model_from_db.any_first_state = StateFlowAnyFirst.START
model_from_db.full_clean()
MyModel(color=Color.RED, any_first_state=StateFlowAnyFirst.END).full_clean()
def test_initial_enum_field():
MyModel(color=Color.RED, state=StateFlow.START).full_clean()
with pytest.raises(ValidationError):
# END is not initial state
MyModel(color=Color.RED, state=StateFlow.END).full_clean()
| 32.066667
| 113
| 0.715933
|
ad88045039f95387b537725e9f8512ac9d311d45
| 14,542
|
py
|
Python
|
homeassistant/helpers/entity_component.py
|
wanman/home-assistant
|
633aaed22b0de0129d1e72e23bcd974b9ce13656
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/helpers/entity_component.py
|
wanman/home-assistant
|
633aaed22b0de0129d1e72e23bcd974b9ce13656
|
[
"Apache-2.0"
] | 1
|
2017-03-10T22:17:06.000Z
|
2017-03-10T22:17:06.000Z
|
homeassistant/helpers/entity_component.py
|
wanman/home-assistant
|
633aaed22b0de0129d1e72e23bcd974b9ce13656
|
[
"Apache-2.0"
] | null | null | null |
"""Helpers for components that manage entities."""
import asyncio
from datetime import timedelta
from homeassistant import config as conf_util
from homeassistant.bootstrap import (
async_prepare_setup_platform, async_prepare_setup_component)
from homeassistant.const import (
ATTR_ENTITY_ID, CONF_SCAN_INTERVAL, CONF_ENTITY_NAMESPACE,
DEVICE_DEFAULT_NAME)
from homeassistant.core import callback, valid_entity_id
from homeassistant.exceptions import HomeAssistantError
from homeassistant.loader import get_component
from homeassistant.helpers import config_per_platform, discovery
from homeassistant.helpers.entity import async_generate_entity_id
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.service import extract_entity_ids
from homeassistant.util.async import (
run_callback_threadsafe, run_coroutine_threadsafe)
DEFAULT_SCAN_INTERVAL = timedelta(seconds=15)
class EntityComponent(object):
"""Helper class that will help a component manage its entities."""
def __init__(self, logger, domain, hass,
scan_interval=DEFAULT_SCAN_INTERVAL, group_name=None):
"""Initialize an entity component."""
self.logger = logger
self.hass = hass
self.domain = domain
self.entity_id_format = domain + '.{}'
self.scan_interval = scan_interval
self.group_name = group_name
self.entities = {}
self.group = None
self.config = None
self._platforms = {
'core': EntityPlatform(self, domain, self.scan_interval, None),
}
self.async_add_entities = self._platforms['core'].async_add_entities
self.add_entities = self._platforms['core'].add_entities
def setup(self, config):
"""Set up a full entity component.
Loads the platforms from the config and will listen for supported
discovered platforms.
"""
run_coroutine_threadsafe(
self.async_setup(config), self.hass.loop
).result()
@asyncio.coroutine
def async_setup(self, config):
"""Set up a full entity component.
Loads the platforms from the config and will listen for supported
discovered platforms.
This method must be run in the event loop.
"""
self.config = config
# Look in config for Domain, Domain 2, Domain 3 etc and load them
tasks = []
for p_type, p_config in config_per_platform(config, self.domain):
tasks.append(self._async_setup_platform(p_type, p_config))
if tasks:
yield from asyncio.wait(tasks, loop=self.hass.loop)
# Generic discovery listener for loading platform dynamically
# Refer to: homeassistant.components.discovery.load_platform()
@callback
def component_platform_discovered(platform, info):
"""Callback to load a platform."""
self.hass.async_add_job(
self._async_setup_platform(platform, {}, info))
discovery.async_listen_platform(
self.hass, self.domain, component_platform_discovered)
def extract_from_service(self, service, expand_group=True):
"""Extract all known entities from a service call.
Will return all entities if no entities specified in call.
Will return an empty list if entities specified but unknown.
"""
return run_callback_threadsafe(
self.hass.loop, self.async_extract_from_service, service,
expand_group
).result()
def async_extract_from_service(self, service, expand_group=True):
"""Extract all known entities from a service call.
Will return all entities if no entities specified in call.
Will return an empty list if entities specified but unknown.
This method must be run in the event loop.
"""
if ATTR_ENTITY_ID not in service.data:
return list(self.entities.values())
return [self.entities[entity_id] for entity_id
in extract_entity_ids(self.hass, service, expand_group)
if entity_id in self.entities]
@asyncio.coroutine
def _async_setup_platform(self, platform_type, platform_config,
discovery_info=None):
"""Setup a platform for this component.
This method must be run in the event loop.
"""
platform = yield from async_prepare_setup_platform(
self.hass, self.config, self.domain, platform_type)
if platform is None:
return
# Config > Platform > Component
scan_interval = (platform_config.get(CONF_SCAN_INTERVAL) or
getattr(platform, 'SCAN_INTERVAL', None) or
self.scan_interval)
entity_namespace = platform_config.get(CONF_ENTITY_NAMESPACE)
key = (platform_type, scan_interval, entity_namespace)
if key not in self._platforms:
self._platforms[key] = EntityPlatform(
self, platform_type, scan_interval, entity_namespace)
entity_platform = self._platforms[key]
try:
self.logger.info("Setting up %s.%s", self.domain, platform_type)
if getattr(platform, 'async_setup_platform', None):
yield from platform.async_setup_platform(
self.hass, platform_config,
entity_platform.async_add_entities, discovery_info
)
else:
yield from self.hass.loop.run_in_executor(
None, platform.setup_platform, self.hass, platform_config,
entity_platform.add_entities, discovery_info
)
self.hass.config.components.add(
'{}.{}'.format(self.domain, platform_type))
except Exception: # pylint: disable=broad-except
self.logger.exception(
'Error while setting up platform %s', platform_type)
def add_entity(self, entity, platform=None, update_before_add=False):
"""Add entity to component."""
return run_coroutine_threadsafe(
self.async_add_entity(entity, platform, update_before_add),
self.hass.loop
).result()
@asyncio.coroutine
def async_add_entity(self, entity, platform=None, update_before_add=False):
"""Add entity to component.
This method must be run in the event loop.
"""
if entity is None or entity in self.entities.values():
return False
entity.hass = self.hass
# update/init entity data
if update_before_add:
if hasattr(entity, 'async_update'):
yield from entity.async_update()
else:
yield from self.hass.loop.run_in_executor(None, entity.update)
if getattr(entity, 'entity_id', None) is None:
object_id = entity.name or DEVICE_DEFAULT_NAME
if platform is not None and platform.entity_namespace is not None:
object_id = '{} {}'.format(platform.entity_namespace,
object_id)
entity.entity_id = async_generate_entity_id(
self.entity_id_format, object_id,
self.entities.keys())
# Make sure it is valid in case an entity set the value themselves
if entity.entity_id in self.entities:
raise HomeAssistantError(
'Entity id already exists: {}'.format(entity.entity_id))
elif not valid_entity_id(entity.entity_id):
raise HomeAssistantError(
'Invalid entity id: {}'.format(entity.entity_id))
self.entities[entity.entity_id] = entity
if hasattr(entity, 'async_added_to_hass'):
yield from entity.async_added_to_hass()
yield from entity.async_update_ha_state()
return True
def update_group(self):
"""Set up and/or update component group."""
run_callback_threadsafe(
self.hass.loop, self.async_update_group).result()
@asyncio.coroutine
def async_update_group(self):
"""Set up and/or update component group.
This method must be run in the event loop.
"""
if self.group is None and self.group_name is not None:
group = get_component('group')
self.group = yield from group.Group.async_create_group(
self.hass, self.group_name, self.entities.keys(),
user_defined=False
)
elif self.group is not None:
yield from self.group.async_update_tracked_entity_ids(
self.entities.keys())
def reset(self):
"""Remove entities and reset the entity component to initial values."""
run_coroutine_threadsafe(self.async_reset(), self.hass.loop).result()
@asyncio.coroutine
def async_reset(self):
"""Remove entities and reset the entity component to initial values.
This method must be run in the event loop.
"""
tasks = [platform.async_reset() for platform
in self._platforms.values()]
if tasks:
yield from asyncio.wait(tasks, loop=self.hass.loop)
self._platforms = {
'core': self._platforms['core']
}
self.entities = {}
self.config = None
if self.group is not None:
yield from self.group.async_stop()
self.group = None
def prepare_reload(self):
"""Prepare reloading this entity component."""
return run_coroutine_threadsafe(
self.async_prepare_reload(), loop=self.hass.loop).result()
@asyncio.coroutine
def async_prepare_reload(self):
"""Prepare reloading this entity component.
This method must be run in the event loop.
"""
try:
conf = yield from \
conf_util.async_hass_config_yaml(self.hass)
except HomeAssistantError as err:
self.logger.error(err)
return None
conf = yield from async_prepare_setup_component(
self.hass, conf, self.domain)
if conf is None:
return None
yield from self.async_reset()
return conf
class EntityPlatform(object):
"""Keep track of entities for a single platform and stay in loop."""
def __init__(self, component, platform, scan_interval, entity_namespace):
"""Initalize the entity platform."""
self.component = component
self.platform = platform
self.scan_interval = scan_interval
self.entity_namespace = entity_namespace
self.platform_entities = []
self._async_unsub_polling = None
self._process_updates = asyncio.Lock(loop=component.hass.loop)
def add_entities(self, new_entities, update_before_add=False):
"""Add entities for a single platform."""
if update_before_add:
for entity in new_entities:
entity.update()
run_coroutine_threadsafe(
self.async_add_entities(list(new_entities), False),
self.component.hass.loop
).result()
@asyncio.coroutine
def async_add_entities(self, new_entities, update_before_add=False):
"""Add entities for a single platform async.
This method must be run in the event loop.
"""
# handle empty list from component/platform
if not new_entities:
return
tasks = [self._async_process_entity(entity, update_before_add)
for entity in new_entities]
yield from asyncio.wait(tasks, loop=self.component.hass.loop)
yield from self.component.async_update_group()
if self._async_unsub_polling is not None or \
not any(entity.should_poll for entity
in self.platform_entities):
return
self._async_unsub_polling = async_track_time_interval(
self.component.hass, self._update_entity_states, self.scan_interval
)
@asyncio.coroutine
def _async_process_entity(self, new_entity, update_before_add):
"""Add entities to StateMachine."""
ret = yield from self.component.async_add_entity(
new_entity, self, update_before_add=update_before_add
)
if ret:
self.platform_entities.append(new_entity)
@asyncio.coroutine
def async_reset(self):
"""Remove all entities and reset data.
This method must be run in the event loop.
"""
if not self.platform_entities:
return
tasks = [entity.async_remove() for entity in self.platform_entities]
yield from asyncio.wait(tasks, loop=self.component.hass.loop)
if self._async_unsub_polling is not None:
self._async_unsub_polling()
self._async_unsub_polling = None
@asyncio.coroutine
def _update_entity_states(self, now):
"""Update the states of all the polling entities.
To protect from flooding the executor, we will update async entities
in parallel and other entities sequential.
This method must be run in the event loop.
"""
if self._process_updates.locked():
self.component.logger.warning(
"Updating %s %s took longer than the scheduled update "
"interval %s", self.platform, self.component.domain,
self.scan_interval)
return
with (yield from self._process_updates):
tasks = []
to_update = []
for entity in self.platform_entities:
if not entity.should_poll:
continue
update_coro = entity.async_update_ha_state(True)
if hasattr(entity, 'async_update'):
tasks.append(
self.component.hass.loop.create_task(update_coro))
else:
to_update.append(update_coro)
for update_coro in to_update:
try:
yield from update_coro
except Exception: # pylint: disable=broad-except
self.component.logger.exception(
'Error while update entity from %s in %s',
self.platform, self.component.domain)
if tasks:
yield from asyncio.wait(tasks, loop=self.component.hass.loop)
| 35.99505
| 79
| 0.630587
|
ae69c11f416f244d870439628fed39cdddb017f3
| 11,010
|
py
|
Python
|
frappe/core/doctype/user/test_user.py
|
ektai/frappe3
|
44aa948b4d5a0d729eacfb3dabdc9c8894ae1799
|
[
"MIT"
] | null | null | null |
frappe/core/doctype/user/test_user.py
|
ektai/frappe3
|
44aa948b4d5a0d729eacfb3dabdc9c8894ae1799
|
[
"MIT"
] | null | null | null |
frappe/core/doctype/user/test_user.py
|
ektai/frappe3
|
44aa948b4d5a0d729eacfb3dabdc9c8894ae1799
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, unittest
from frappe.model.delete_doc import delete_doc
from frappe.utils.data import today, add_to_date
from frappe import _dict
from frappe.limits import update_limits, clear_limit
from frappe.utils import get_url
from frappe.core.doctype.user.user import get_total_users
from frappe.core.doctype.user.user import MaxUsersReachedError, test_password_strength
from frappe.core.doctype.user.user import extract_mentions
import requests
test_records = frappe.get_test_records('User')
class TestUser(unittest.TestCase):
def tearDown(self):
# disable password strength test
frappe.db.set_value("System Settings", "System Settings", "enable_password_policy", 0)
frappe.db.set_value("System Settings", "System Settings", "minimum_password_score", "")
def test_user_type(self):
new_user = frappe.get_doc(dict(doctype='User', email='test-for-type@ektai.mail',
first_name='Tester')).insert()
self.assertEqual(new_user.user_type, 'Website User')
# social login userid for frappe
self.assertTrue(new_user.social_logins[0].userid)
self.assertEqual(new_user.social_logins[0].provider, "frappe")
# role with desk access
new_user.add_roles('_Test Role 2')
new_user.save()
self.assertEqual(new_user.user_type, 'System User')
# clear role
new_user.roles = []
new_user.save()
self.assertEqual(new_user.user_type, 'Website User')
# role without desk access
new_user.add_roles('_Test Role 4')
new_user.save()
self.assertEqual(new_user.user_type, 'Website User')
delete_contact(new_user.name)
frappe.delete_doc('User', new_user.name)
def test_delete(self):
frappe.get_doc("User", "test@ektai.mail").add_roles("_Test Role 2")
self.assertRaises(frappe.LinkExistsError, delete_doc, "Role", "_Test Role 2")
frappe.db.sql("""delete from `tabHas Role` where role='_Test Role 2'""")
delete_doc("Role","_Test Role 2")
if frappe.db.exists("User", "_test@ektai.mail"):
delete_contact("_test@ektai.mail")
delete_doc("User", "_test@ektai.mail")
user = frappe.copy_doc(test_records[1])
user.email = "_test@ektai.mail"
user.insert()
frappe.get_doc({"doctype": "ToDo", "description": "_Test"}).insert()
delete_contact("_test@ektai.mail")
delete_doc("User", "_test@ektai.mail")
self.assertTrue(not frappe.db.sql("""select * from `tabToDo` where owner=%s""",
("_test@ektai.mail",)))
from frappe.core.doctype.role.test_role import test_records as role_records
frappe.copy_doc(role_records[1]).insert()
def test_get_value(self):
self.assertEqual(frappe.db.get_value("User", "test@ektai.mail"), "test@ektai.mail")
self.assertEqual(frappe.db.get_value("User", {"email":"test@ektai.mail"}), "test@ektai.mail")
self.assertEqual(frappe.db.get_value("User", {"email":"test@ektai.mail"}, "email"), "test@ektai.mail")
self.assertEqual(frappe.db.get_value("User", {"email":"test@ektai.mail"}, ["first_name", "email"]),
("_Test", "test@ektai.mail"))
self.assertEqual(frappe.db.get_value("User",
{"email":"test@ektai.mail", "first_name": "_Test"},
["first_name", "email"]),
("_Test", "test@ektai.mail"))
test_user = frappe.db.sql("select * from tabUser where name='test@ektai.mail'",
as_dict=True)[0]
self.assertEqual(frappe.db.get_value("User", {"email":"test@ektai.mail"}, "*", as_dict=True),
test_user)
self.assertEqual(frappe.db.get_value("User", "xxxtest@ektai.mail"), None)
frappe.db.set_value("Website Settings", "Website Settings", "_test", "_test_val")
self.assertEqual(frappe.db.get_value("Website Settings", None, "_test"), "_test_val")
self.assertEqual(frappe.db.get_value("Website Settings", "Website Settings", "_test"), "_test_val")
def test_high_permlevel_validations(self):
user = frappe.get_meta("User")
self.assertTrue("roles" in [d.fieldname for d in user.get_high_permlevel_fields()])
me = frappe.get_doc("User", "testperm@ektai.mail")
me.remove_roles("System Manager")
frappe.set_user("testperm@ektai.mail")
me = frappe.get_doc("User", "testperm@ektai.mail")
self.assertRaises(frappe.PermissionError, me.add_roles, "System Manager")
frappe.set_user("Administrator")
me = frappe.get_doc("User", "testperm@ektai.mail")
me.add_roles("System Manager")
self.assertTrue("System Manager" in [d.role for d in me.get("roles")])
def test_user_limit_for_site(self):
update_limits({'users': get_total_users()})
# reload site config
from frappe import _dict
frappe.local.conf = _dict(frappe.get_site_config())
# Create a new user
user = frappe.new_doc('User')
user.email = 'test_max_users@ektai.mail'
user.first_name = 'Test_max_user'
self.assertRaises(MaxUsersReachedError, user.add_roles, 'System Manager')
if frappe.db.exists('User', 'test_max_users@ektai.mail'):
delete_contact('test_max_users@ektai.mail')
frappe.delete_doc('User', 'test_max_users@ektai.mail')
# Clear the user limit
clear_limit('users')
def test_user_limit_for_site_with_simultaneous_sessions(self):
clear_limit('users')
# make sure this user counts
user = frappe.get_doc('User', 'test@ektai.mail')
user.add_roles('Website Manager')
user.save()
update_limits({'users': get_total_users()})
user.simultaneous_sessions = user.simultaneous_sessions + 1
self.assertRaises(MaxUsersReachedError, user.save)
# Clear the user limit
clear_limit('users')
# def test_deny_multiple_sessions(self):
# from frappe.installer import update_site_config
# clear_limit('users')
#
# # allow one session
# user = frappe.get_doc('User', 'test@ektai.mail')
# user.simultaneous_sessions = 1
# user.new_password = 'Eastern_43A1W'
# user.save()
#
# def test_request(conn):
# value = conn.get_value('User', 'first_name', {'name': 'test@ektai.mail'})
# self.assertTrue('first_name' in value)
#
# from frappe.frappeclient import FrappeClient
# update_site_config('deny_multiple_sessions', 0)
#
# conn1 = FrappeClient(get_url(), "test@ektai.mail", "Eastern_43A1W", verify=False)
# test_request(conn1)
#
# conn2 = FrappeClient(get_url(), "test@ektai.mail", "Eastern_43A1W", verify=False)
# test_request(conn2)
#
# update_site_config('deny_multiple_sessions', 1)
# conn3 = FrappeClient(get_url(), "test@ektai.mail", "Eastern_43A1W", verify=False)
# test_request(conn3)
#
# # first connection should fail
# test_request(conn1)
def test_site_expiry(self):
user = frappe.get_doc('User', 'test@ektai.mail')
user.enabled = 1
user.new_password = 'Eastern_43A1W'
user.save()
update_limits({'expiry': add_to_date(today(), days=-1), 'support_email': 'support@ektai.mail'})
frappe.local.conf = _dict(frappe.get_site_config())
frappe.db.commit()
res = requests.post(get_url(), params={'cmd': 'login', 'usr':
'test@ektai.mail', 'pwd': 'Eastern_43A1W', 'device': 'desktop'})
# While site is expired status code returned is 417 Failed Expectation
self.assertEqual(res.status_code, 417)
clear_limit("expiry")
frappe.local.conf = _dict(frappe.get_site_config())
def test_delete_user(self):
new_user = frappe.get_doc(dict(doctype='User', email='test-for-delete@ektai.mail',
first_name='Tester Delete User')).insert()
self.assertEqual(new_user.user_type, 'Website User')
# role with desk access
new_user.add_roles('_Test Role 2')
new_user.save()
self.assertEqual(new_user.user_type, 'System User')
comm = frappe.get_doc({
"doctype":"Communication",
"subject": "To check user able to delete even if linked with communication",
"content": "To check user able to delete even if linked with communication",
"sent_or_received": "Sent",
"user": new_user.name
})
comm.insert(ignore_permissions=True)
delete_contact(new_user.name)
frappe.delete_doc('User', new_user.name)
self.assertFalse(frappe.db.exists('User', new_user.name))
def test_deactivate_additional_users(self):
update_limits({'users': get_total_users()+1})
if not frappe.db.exists("User", "test_deactivate_additional_users@ektai.mail"):
user = frappe.new_doc('User')
user.email = 'test_deactivate_additional_users@ektai.mail'
user.first_name = 'Test Deactivate Additional Users'
user.add_roles("System Manager")
#update limits
update_limits({"users": get_total_users()-1})
self.assertEqual(frappe.db.get_value("User", "test_deactivate_additional_users@ektai.mail", "enabled"), 0)
if frappe.db.exists("User", "test_deactivate_additional_users@ektai.mail"):
delete_contact('test_deactivate_additional_users@ektai.mail')
frappe.delete_doc('User', 'test_deactivate_additional_users@ektai.mail')
# Clear the user limit
clear_limit('users')
def test_password_strength(self):
# Test Password without Password Strenth Policy
frappe.db.set_value("System Settings", "System Settings", "enable_password_policy", 0)
# password policy is disabled, test_password_strength should be ignored
result = test_password_strength("test_password")
self.assertFalse(result.get("feedback", None))
# Test Password with Password Strenth Policy Set
frappe.db.set_value("System Settings", "System Settings", "enable_password_policy", 1)
frappe.db.set_value("System Settings", "System Settings", "minimum_password_score", 2)
# Score 1; should now fail
result = test_password_strength("bee2ve")
self.assertEqual(result['feedback']['password_policy_validation_passed'], False)
# Score 4; should pass
result = test_password_strength("Eastern_43A1W")
self.assertEqual(result['feedback']['password_policy_validation_passed'], True)
def test_comment_mentions(self):
comment = '''
<span class="mention" data-id="test.comment@ektai.mail" data-value="Test" data-denotation-char="@">
<span><span class="ql-mention-denotation-char">@</span>Test</span>
</span>
'''
self.assertEqual(extract_mentions(comment)[0], "test.comment@ektai.mail")
comment = '''
<div>
Testing comment,
<span class="mention" data-id="test.comment@ektai.mail" data-value="Test" data-denotation-char="@">
<span><span class="ql-mention-denotation-char">@</span>Test</span>
</span>
please check
</div>
'''
self.assertEqual(extract_mentions(comment)[0], "test.comment@ektai.mail")
comment = '''
<div>
Testing comment for
<span class="mention" data-id="test_user@ektai.mail" data-value="Test" data-denotation-char="@">
<span><span class="ql-mention-denotation-char">@</span>Test</span>
</span>
and
<span class="mention" data-id="test.again@example1.com" data-value="Test" data-denotation-char="@">
<span><span class="ql-mention-denotation-char">@</span>Test</span>
</span>
please check
</div>
'''
self.assertEqual(extract_mentions(comment)[0], "test_user@ektai.mail")
self.assertEqual(extract_mentions(comment)[1], "test.again@example1.com")
def delete_contact(user):
frappe.db.sql("DELETE FROM `tabContact` WHERE `email_id`= %s", user)
| 36.098361
| 108
| 0.726703
|
4308dec371ea1e409bbedd65c18c9ae2a20e36e1
| 4,239
|
py
|
Python
|
toppra/constraint/canonical_conic.py
|
shintarokkk/toppra
|
1a7be8feb68fec91459d6dc625f0114692dac885
|
[
"MIT"
] | null | null | null |
toppra/constraint/canonical_conic.py
|
shintarokkk/toppra
|
1a7be8feb68fec91459d6dc625f0114692dac885
|
[
"MIT"
] | 1
|
2020-06-01T21:27:23.000Z
|
2020-06-01T21:27:23.000Z
|
toppra/constraint/canonical_conic.py
|
shintarokkk/toppra
|
1a7be8feb68fec91459d6dc625f0114692dac885
|
[
"MIT"
] | 2
|
2020-04-06T16:22:25.000Z
|
2020-06-12T00:45:10.000Z
|
from .constraint import Constraint
from .constraint import ConstraintType, DiscretizationType
import numpy as np
class CanonicalConicConstraint(Constraint):
"""Base class for all canonical conic constraints.
A canonical conic constraint is one with the following form
.. math::
(a[i] + da[i]) u + (b[i] + db[i]) x + (c[i] + dc[i]) \leq 0, \\\\
[da[i, j], db[i, j], dc[i, j]]^\top = P[i, j] u, \|u\|_2 \leq 1,
where P[i, j] is a 3x3 matrix. Notice that by setting P[i, j] to
the zero matrix,
Constraints of this form can be translated to conic-quadratic
constraints. This transformation can be found in [1]. The
resulting conic-quadratic constraint is given below
.. math::
a[i, j]u + b[i, j]x + c[i, j] + \|P[i, j]^T [u, x, 1]^T \|_2 \leq 0,
where i is the stage index, and j is the constraint index.
Refs:
----
[1] Ben-Tal, A., & Nemirovski, A. (2001). Lectures on modern convex
optimization: analysis, algorithms, and engineering applications
(Vol. 2). Siam.
"""
def __init__(self):
self.constraint_type = ConstraintType.CanonicalConic
self.discretization_type = DiscretizationType.Collocation
self.n_extra_vars = 0
self.dof = -1
self._format_string = ""
def compute_constraint_params(self, path, gridpoints):
raise NotImplementedError
class RobustCanonicalLinearConstraint(CanonicalConicConstraint):
"""The simple canonical conic constraint.
This constraint can be seen as a more robust version of a
CanonicalLinear constraint. In particular, the perturbations term,
[\Delta a[i, j], \Delta b[i, j], \Delta c[i, j]] is assumed to lie
in a centered ellipsoid:
.. math::
[\Delta a[i, j], \Delta b[i, j], \Delta c[i, j]]^\\top = diag(ru, rx, rc) \mathbf e,
where \|\mathbf e\|_2 \leq 1.
Parameters
----------
cnst: :class:`~toppra.constraint.CanonicalLinearConstraint`
The base constraint to robustify.
ellipsoid_axes_lengths: (3,)array
Lengths of the axes of the perturbation ellipsoid. Must all be
non-negative.
discretization_scheme: :class:`~.constraint.DiscretizationType`
Constraint discretization scheme to use.
"""
def __init__(self, cnst, ellipsoid_axes_lengths, discretization_scheme=DiscretizationType.Collocation):
super(RobustCanonicalLinearConstraint, self).__init__()
self.dof = cnst.get_dof()
assert cnst.get_constraint_type() == ConstraintType.CanonicalLinear
self.set_discretization_type(discretization_scheme)
if np.any(np.r_[ellipsoid_axes_lengths] < 0):
raise ValueError("Perturbation must be non-negative. Input {:}".format(ellipsoid_axes_lengths))
self.base_constraint = cnst
self.ellipsoid_axes_lengths = ellipsoid_axes_lengths
self._format_string += " Robust constraint generated from a canonical linear constraint\n"
def compute_constraint_params(self, path, gridpoints):
self.base_constraint.set_discretization_type(self.discretization_type)
a_, b_, c_, F_, g_, u_, _ = self.base_constraint.compute_constraint_params(path, gridpoints)
N = len(gridpoints) - 1
if self.base_constraint.identical:
d = F_.shape[0] # number of rows
else:
d = F_.shape[1]
a = np.zeros((N + 1, d + 2))
b = np.zeros((N + 1, d + 2))
c = np.zeros((N + 1, d + 2))
if self.base_constraint.identical:
for i in range(len(gridpoints)):
a[i, :d] = F_.dot(a_[i])
b[i, :d] = F_.dot(b_[i])
c[i, :d] = F_.dot(c_[i]) - g_
a[i, d:] = [1, -1]
c[i, d:] = [- u_[i, 1], u_[i, 0]]
else:
for i in range(len(gridpoints)):
a[i, :d] = F_[i].dot(a_[i])
b[i, :d] = F_[i].dot(b_[i])
c[i, :d] = F_[i].dot(c_[i]) - g_[i]
a[i, d:] = [1, -1]
c[i, d:] = [- u_[i, 1], u_[i, 0]]
P = np.zeros((N + 1, d + 2, 3, 3))
diag_ = np.diag(self.ellipsoid_axes_lengths)
P[:] = diag_
return a, b, c, P
| 37.513274
| 107
| 0.601085
|
5c31d0d6747a3adf2a53b70769120350e9d3eaea
| 28,920
|
py
|
Python
|
source/file_utils.py
|
ohadrozen/inferbert
|
2e450aba894937e5769dcf028e4a8a597991fe43
|
[
"Apache-2.0"
] | null | null | null |
source/file_utils.py
|
ohadrozen/inferbert
|
2e450aba894937e5769dcf028e4a8a597991fe43
|
[
"Apache-2.0"
] | 1
|
2021-08-22T08:10:10.000Z
|
2021-08-23T02:38:23.000Z
|
source/file_utils.py
|
ohadrozen/inferbert
|
2e450aba894937e5769dcf028e4a8a597991fe43
|
[
"Apache-2.0"
] | 2
|
2021-08-22T08:13:31.000Z
|
2021-08-22T08:19:29.000Z
|
"""
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
import fnmatch
import json
import logging
import os
import shutil
import sys
import tarfile
import tempfile
from contextlib import contextmanager
from functools import partial, wraps
from hashlib import sha256
from pathlib import Path
from typing import Dict, Optional, Union
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import requests
from filelock import FileLock
from tqdm import tqdm
# from transformers import __version__
__version__ = '3.0.1'
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
try:
USE_TF = os.environ.get("USE_TF", "AUTO").upper()
USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
if USE_TORCH in ("1", "ON", "YES", "AUTO") and USE_TF not in ("1", "ON", "YES"):
import torch
_torch_available = True # pylint: disable=invalid-name
logger.info("PyTorch version {} available.".format(torch.__version__))
else:
logger.info("Disabling PyTorch because USE_TF is set")
_torch_available = False
except ImportError:
_torch_available = False # pylint: disable=invalid-name
try:
USE_TF = os.environ.get("USE_TF", "AUTO").upper()
USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
if USE_TF in ("1", "ON", "YES", "AUTO") and USE_TORCH not in ("1", "ON", "YES"):
import tensorflow as tf
assert hasattr(tf, "__version__") and int(tf.__version__[0]) >= 2
_tf_available = True # pylint: disable=invalid-name
logger.info("TensorFlow version {} available.".format(tf.__version__))
else:
logger.info("Disabling Tensorflow because USE_TORCH is set")
_tf_available = False
except (ImportError, AssertionError):
_tf_available = False # pylint: disable=invalid-name
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
try:
import torch_xla.core.xla_model as xm # noqa: F401
if _torch_available:
_torch_tpu_available = True # pylint: disable=
else:
_torch_tpu_available = False
except ImportError:
_torch_tpu_available = False
try:
import psutil # noqa: F401
_psutil_available = True
except ImportError:
_psutil_available = False
try:
import py3nvml # noqa: F401
_py3nvml_available = True
except ImportError:
_py3nvml_available = False
try:
from apex import amp # noqa: F401
_has_apex = True
except ImportError:
_has_apex = False
default_cache_path = os.path.join(torch_cache_home, "transformers")
PYTORCH_PRETRAINED_BERT_CACHE = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
PYTORCH_TRANSFORMERS_CACHE = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
TRANSFORMERS_CACHE = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
WEIGHTS_NAME = "pytorch_model.bin"
TF2_WEIGHTS_NAME = "tf_model.h5"
TF_WEIGHTS_NAME = "model.ckpt"
CONFIG_NAME = "config.json"
MODEL_CARD_NAME = "modelcard.json"
MULTIPLE_CHOICE_DUMMY_INPUTS = [[[0], [1]], [[0], [1]]]
DUMMY_INPUTS = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
DUMMY_MASK = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
S3_BUCKET_PREFIX = "https://s3.amazonaws.com/models.huggingface.co/bert"
CLOUDFRONT_DISTRIB_PREFIX = "https://cdn.huggingface.co"
def is_torch_available():
return _torch_available
def is_tf_available():
return _tf_available
def is_torch_tpu_available():
return _torch_tpu_available
def is_psutil_available():
return _psutil_available
def is_py3nvml_available():
return _py3nvml_available
def is_apex_available():
return _has_apex
def add_start_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "")
return fn
return docstring_decorator
def add_start_docstrings_to_callable(*docstr):
def docstring_decorator(fn):
class_name = ":class:`~transformers.{}`".format(fn.__qualname__.split(".")[0])
intro = " The {} forward method, overrides the :func:`__call__` special method.".format(class_name)
note = r"""
.. note::
Although the recipe for forward pass needs to be defined within
this function, one should call the :class:`Module` instance afterwards
instead of this since the former takes care of running the
pre and post processing steps while the latter silently ignores them.
"""
fn.__doc__ = intro + note + "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "")
return fn
return docstring_decorator
def add_end_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = fn.__doc__ + "".join(docstr)
return fn
return docstring_decorator
PT_TOKEN_CLASSIFICATION_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import torch
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1
>>> outputs = model(**inputs, labels=labels)
>>> loss, scores = outputs[:2]
"""
PT_QUESTION_ANSWERING_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import torch
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> start_positions = torch.tensor([1])
>>> end_positions = torch.tensor([3])
>>> outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions)
>>> loss, start_scores, end_scores = outputs[:3]
"""
PT_SEQUENCE_CLASSIFICATION_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import torch
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
>>> outputs = model(**inputs, labels=labels)
>>> loss, logits = outputs[:2]
"""
PT_MASKED_LM_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import torch
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> input_ids = tokenizer("Hello, my dog is cute", return_tensors="pt")["input_ids"]
>>> outputs = model(input_ids, labels=input_ids)
>>> loss, prediction_scores = outputs[:2]
"""
PT_BASE_MODEL_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import torch
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
PT_MULTIPLE_CHOICE_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import torch
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> choice0 = "It is eaten with a fork and a knife."
>>> choice1 = "It is eaten while held in the hand."
>>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1
>>> encoding = tokenizer([[prompt, prompt], [choice0, choice1]], return_tensors='pt', padding=True)
>>> outputs = model(**{{k: v.unsqueeze(0) for k,v in encoding.items()}}, labels=labels) # batch size is 1
>>> # the linear classifier still needs to be trained
>>> loss, logits = outputs[:2]
"""
PT_CAUSAL_LM_SAMPLE = r"""
Example::
>>> import torch
>>> from transformers import {tokenizer_class}, {model_class}
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs, labels=inputs["input_ids"])
>>> loss, logits = outputs[:2]
"""
TF_TOKEN_CLASSIFICATION_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
>>> input_ids = inputs["input_ids"]
>>> inputs["labels"] = tf.reshape(tf.constant([1] * tf.size(input_ids).numpy()), (-1, tf.size(input_ids))) # Batch size 1
>>> outputs = model(inputs)
>>> loss, scores = outputs[:2]
"""
TF_QUESTION_ANSWERING_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
>>> input_dict = tokenizer(question, text, return_tensors='tf')
>>> start_scores, end_scores = model(input_dict)
>>> all_tokens = tokenizer.convert_ids_to_tokens(input_dict["input_ids"].numpy()[0])
>>> answer = ' '.join(all_tokens[tf.math.argmax(start_scores, 1)[0] : tf.math.argmax(end_scores, 1)[0]+1])
"""
TF_SEQUENCE_CLASSIFICATION_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
>>> inputs["labels"] = tf.reshape(tf.constant(1), (-1, 1)) # Batch size 1
>>> outputs = model(inputs)
>>> loss, logits = outputs[:2]
"""
TF_MASKED_LM_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
>>> outputs = model(input_ids)
>>> prediction_scores = outputs[0]
"""
TF_BASE_MODEL_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
>>> outputs = model(inputs)
>>> last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
TF_MULTIPLE_CHOICE_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> choice0 = "It is eaten with a fork and a knife."
>>> choice1 = "It is eaten while held in the hand."
>>> encoding = tokenizer([[prompt, prompt], [choice0, choice1]], return_tensors='tf', padding=True)
>>> inputs = {{k: tf.expand_dims(v, 0) for k, v in encoding.items()}}
>>> outputs = model(inputs) # batch size is 1
>>> # the linear classifier still needs to be trained
>>> logits = outputs[0]
"""
TF_CAUSAL_LM_SAMPLE = r"""
Example::
>>> from transformers import {tokenizer_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}')
>>> model = {model_class}.from_pretrained('{checkpoint}')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
>>> outputs = model(inputs)
>>> logits = outputs[0]
"""
def add_code_sample_docstrings(*docstr, tokenizer_class=None, checkpoint=None):
def docstring_decorator(fn):
model_class = fn.__qualname__.split(".")[0]
is_tf_class = model_class[:2] == "TF"
if "SequenceClassification" in model_class:
code_sample = TF_SEQUENCE_CLASSIFICATION_SAMPLE if is_tf_class else PT_SEQUENCE_CLASSIFICATION_SAMPLE
elif "QuestionAnswering" in model_class:
code_sample = TF_QUESTION_ANSWERING_SAMPLE if is_tf_class else PT_QUESTION_ANSWERING_SAMPLE
elif "TokenClassification" in model_class:
code_sample = TF_TOKEN_CLASSIFICATION_SAMPLE if is_tf_class else PT_TOKEN_CLASSIFICATION_SAMPLE
elif "MultipleChoice" in model_class:
code_sample = TF_MULTIPLE_CHOICE_SAMPLE if is_tf_class else PT_MULTIPLE_CHOICE_SAMPLE
elif "MaskedLM" in model_class:
code_sample = TF_MASKED_LM_SAMPLE if is_tf_class else PT_MASKED_LM_SAMPLE
elif "LMHead" in model_class:
code_sample = TF_CAUSAL_LM_SAMPLE if is_tf_class else PT_CAUSAL_LM_SAMPLE
elif "Model" in model_class:
code_sample = TF_BASE_MODEL_SAMPLE if is_tf_class else PT_BASE_MODEL_SAMPLE
else:
raise ValueError(f"Docstring can't be built for model {model_class}")
built_doc = code_sample.format(model_class=model_class, tokenizer_class=tokenizer_class, checkpoint=checkpoint)
fn.__doc__ = (fn.__doc__ or "") + "".join(docstr) + built_doc
return fn
return docstring_decorator
def is_remote_url(url_or_filename):
parsed = urlparse(url_or_filename)
return parsed.scheme in ("http", "https")
def hf_bucket_url(model_id: str, filename: str, use_cdn=True) -> str:
"""
Resolve a model identifier, and a file name, to a HF-hosted url
on either S3 or Cloudfront (a Content Delivery Network, or CDN).
Cloudfront is replicated over the globe so downloads are way faster
for the end user (and it also lowers our bandwidth costs). However, it
is more aggressively cached by default, so may not always reflect the
latest changes to the underlying file (default TTL is 24 hours).
In terms of client-side caching from this library, even though
Cloudfront relays the ETags from S3, using one or the other
(or switching from one to the other) will affect caching: cached files
are not shared between the two because the cached file's name contains
a hash of the url.
"""
endpoint = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
legacy_format = "/" not in model_id
if legacy_format:
return f"{endpoint}/{model_id}-{filename}"
else:
return f"{endpoint}/{model_id}/{filename}"
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
If the url ends with .h5 (Keras HDF5 weights) adds '.h5' to the name
so that TF 2.0 can identify it as a HDF5 file
(see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380)
"""
url_bytes = url.encode("utf-8")
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode("utf-8")
etag_hash = sha256(etag_bytes)
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5"):
filename += ".h5"
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + ".json"
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata["url"]
etag = metadata["etag"]
return url, etag
def cached_path(
url_or_filename,
cache_dir=None,
force_download=False,
proxies=None,
resume_download=False,
user_agent: Union[Dict, str, None] = None,
extract_compressed_file=False,
force_extract=False,
local_files_only=False,
) -> Optional[str]:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
Args:
cache_dir: specify a cache directory to save the file to (overwrite the default cache dir).
force_download: if True, re-dowload the file even if it's already cached in the cache dir.
resume_download: if True, resume the download if incompletly recieved file is found.
user_agent: Optional string or dict that will be appended to the user-agent on remote requests.
extract_compressed_file: if True and the path point to a zip or tar file, extract the compressed
file in a folder along the archive.
force_extract: if True when extract_compressed_file is True and the archive was already extracted,
re-extract the archive and overide the folder where it was extracted.
Return:
None in case of non-recoverable file (non-existent or inaccessible url + no cache on disk).
Local path (string) otherwise
"""
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if is_remote_url(url_or_filename):
# URL, so get it from the cache (downloading if necessary)
output_path = get_from_cache(
url_or_filename,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
user_agent=user_agent,
local_files_only=local_files_only,
)
elif os.path.exists(url_or_filename):
# File, and it exists.
output_path = url_or_filename
elif urlparse(url_or_filename).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
if extract_compressed_file:
if not is_zipfile(output_path) and not tarfile.is_tarfile(output_path):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
output_dir, output_file = os.path.split(output_path)
output_extract_dir_name = output_file.replace(".", "-") + "-extracted"
output_path_extracted = os.path.join(output_dir, output_extract_dir_name)
if os.path.isdir(output_path_extracted) and os.listdir(output_path_extracted) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lock_path = output_path + ".lock"
with FileLock(lock_path):
shutil.rmtree(output_path_extracted, ignore_errors=True)
os.makedirs(output_path_extracted)
if is_zipfile(output_path):
with ZipFile(output_path, "r") as zip_file:
zip_file.extractall(output_path_extracted)
zip_file.close()
elif tarfile.is_tarfile(output_path):
tar_file = tarfile.open(output_path)
tar_file.extractall(output_path_extracted)
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(output_path))
return output_path_extracted
return output_path
def http_get(url, temp_file, proxies=None, resume_size=0, user_agent: Union[Dict, str, None] = None):
ua = "transformers/{}; python/{}".format(__version__, sys.version.split()[0])
if is_torch_available():
ua += "; torch/{}".format(torch.__version__)
if is_tf_available():
ua += "; tensorflow/{}".format(tf.__version__)
if isinstance(user_agent, dict):
ua += "; " + "; ".join("{}/{}".format(k, v) for k, v in user_agent.items())
elif isinstance(user_agent, str):
ua += "; " + user_agent
headers = {"user-agent": ua}
if resume_size > 0:
headers["Range"] = "bytes=%d-" % (resume_size,)
response = requests.get(url, stream=True, proxies=proxies, headers=headers)
if response.status_code == 416: # Range not satisfiable
return
content_length = response.headers.get("Content-Length")
total = resume_size + int(content_length) if content_length is not None else None
progress = tqdm(
unit="B",
unit_scale=True,
total=total,
initial=resume_size,
desc="Downloading",
disable=bool(logger.getEffectiveLevel() == logging.NOTSET),
)
for chunk in response.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(
url,
cache_dir=None,
force_download=False,
proxies=None,
etag_timeout=10,
resume_download=False,
user_agent: Union[Dict, str, None] = None,
local_files_only=False,
) -> Optional[str]:
"""
Given a URL, look for the corresponding file in the local cache.
If it's not there, download it. Then return the path to the cached file.
Return:
None in case of non-recoverable file (non-existent or inaccessible url + no cache on disk).
Local path (string) otherwise
"""
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
etag = None
if not local_files_only:
try:
response = requests.head(url, allow_redirects=True, proxies=proxies, timeout=etag_timeout)
if response.status_code == 200:
etag = response.headers.get("ETag")
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(cache_path):
return cache_path
else:
matching_files = [
file
for file in fnmatch.filter(os.listdir(cache_dir), filename + ".*")
if not file.endswith(".json") and not file.endswith(".lock")
]
if len(matching_files) > 0:
return os.path.join(cache_dir, matching_files[-1])
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False."
)
return None
# From now on, etag is not None.
if os.path.exists(cache_path) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lock_path = cache_path + ".lock"
with FileLock(lock_path):
# If the download just completed while the lock was activated.
if os.path.exists(cache_path) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
incomplete_path = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(incomplete_path, "a+b") as f:
yield f
temp_file_manager = _resumable_file_manager
if os.path.exists(incomplete_path):
resume_size = os.stat(incomplete_path).st_size
else:
resume_size = 0
else:
temp_file_manager = partial(tempfile.NamedTemporaryFile, dir=cache_dir, delete=False)
resume_size = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
logger.info("%s not found in cache or force_download set to True, downloading to %s", url, temp_file.name)
http_get(url, temp_file, proxies=proxies, resume_size=resume_size, user_agent=user_agent)
logger.info("storing %s in cache at %s", url, cache_path)
os.replace(temp_file.name, cache_path)
logger.info("creating metadata file for %s", cache_path)
meta = {"url": url, "etag": etag}
meta_path = cache_path + ".json"
with open(meta_path, "w") as meta_file:
json.dump(meta, meta_file)
return cache_path
class cached_property(property):
"""
Descriptor that mimics @property but caches output in member variable.
From tensorflow_datasets
Built-in in functools from Python 3.8.
"""
def __get__(self, obj, objtype=None):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute")
attr = "__cached_" + self.fget.__name__
cached = getattr(obj, attr, None)
if cached is None:
cached = self.fget(obj)
setattr(obj, attr, cached)
return cached
def torch_required(func):
# Chose a different decorator name than in tests so it's clear they are not the same.
@wraps(func)
def wrapper(*args, **kwargs):
if is_torch_available():
return func(*args, **kwargs)
else:
raise ImportError(f"Method `{func.__name__}` requires PyTorch.")
return wrapper
def tf_required(func):
# Chose a different decorator name than in tests so it's clear they are not the same.
@wraps(func)
def wrapper(*args, **kwargs):
if is_tf_available():
return func(*args, **kwargs)
else:
raise ImportError(f"Method `{func.__name__}` requires TF.")
return wrapper
| 35.747837
| 144
| 0.651418
|
13dd948606273f5c9bd67c58e2c9a1062f4d2422
| 2,078
|
py
|
Python
|
main.py
|
Noel-jediknight/full-stackwebapp
|
0628b9aac90bde9fdacd94e81fc64e7fd4a905cf
|
[
"MIT"
] | null | null | null |
main.py
|
Noel-jediknight/full-stackwebapp
|
0628b9aac90bde9fdacd94e81fc64e7fd4a905cf
|
[
"MIT"
] | null | null | null |
main.py
|
Noel-jediknight/full-stackwebapp
|
0628b9aac90bde9fdacd94e81fc64e7fd4a905cf
|
[
"MIT"
] | null | null | null |
from fastapi import FastAPI
from pydantic import BaseModel
from fastapi.encoders import jsonable_encoder
from fastapi.middleware.cors import CORSMiddleware
app=FastAPI()
origin=["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origin,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
class todolist(BaseModel):
item:int
chore:str
class delitem(BaseModel):
item:int
class Name(BaseModel):
person_name:str
age:int
@app.get("/")
def basic():
return "Hello world"
@app.get("/info")
def info():
#info={"Name":"Noel","srn":"PES2UG20CS446","fun fact":"You are alive(for now)"}
#return info
name="Noel"
SRN="PES2UG20CS446"
funfact="You are alive(for now)"
return(name,SRN,funfact)
@app.get("/date")
def return_date():
res={"date":"today"}
return "today is 4th"
@app.post("/name")
def name(name_var: Name):
name_encoded=jsonable_encoder(name_var)
pname= name_encoded['person_name']
with open("names.txt","a") as f:
f.write('{}\n'.format(pname))
f.write("\n")
age= name_encoded['age']
print(age)
print(type(age))
return "Hello "+pname
todo={}
@app.post("/todolist")
def ITEM(item_var: todolist):
item_encoded=jsonable_encoder(item_var)
itemno= item_encoded['item']
choreno= item_encoded['chore']
#with open("todo.txt","a") as file:
# file.write('{}{}\n'.format(itemno,choreno))
todo[itemno]=choreno
print (todo)
return(todo)
@app.put("/todolist")
def ITEM(item_var: todolist):
item_encoded=jsonable_encoder(item_var)
itemno= item_encoded['item']
choreno= item_encoded['chore']
todo.update({itemno:choreno})
return(todo)
@app.delete("/todolist")
def ITEM(item_var: delitem):
item_encoded=jsonable_encoder(item_var)
itemno= item_encoded['item']
del todo[itemno]
return(todo)
@app.get("/todolist")
def mystuff():
return(todo)
| 21.645833
| 84
| 0.621752
|
01424a92b3efca6a1e14ffbcfb50d4a140fd3beb
| 2,683
|
py
|
Python
|
python/dnd-character/dnd_character_test.py
|
ropable/exercism
|
9dde2a7952badec03428b5f9dfb8499a7ce55458
|
[
"MIT"
] | 9
|
2020-12-12T03:29:33.000Z
|
2021-08-11T13:08:06.000Z
|
python/dnd-character/dnd_character_test.py
|
ropable/exercism
|
9dde2a7952badec03428b5f9dfb8499a7ce55458
|
[
"MIT"
] | null | null | null |
python/dnd-character/dnd_character_test.py
|
ropable/exercism
|
9dde2a7952badec03428b5f9dfb8499a7ce55458
|
[
"MIT"
] | 1
|
2020-11-02T10:40:06.000Z
|
2020-11-02T10:40:06.000Z
|
import unittest
from dnd_character import Character, modifier
# Tests adapted from `problem-specifications//canonical-data.json`
class DndCharacterTest(unittest.TestCase):
def test_ability_modifier_for_score_3_is_n4(self):
self.assertEqual(modifier(3), -4)
def test_ability_modifier_for_score_4_is_n3(self):
self.assertEqual(modifier(4), -3)
def test_ability_modifier_for_score_5_is_n3(self):
self.assertEqual(modifier(5), -3)
def test_ability_modifier_for_score_6_is_n2(self):
self.assertEqual(modifier(6), -2)
def test_ability_modifier_for_score_7_is_n2(self):
self.assertEqual(modifier(7), -2)
def test_ability_modifier_for_score_8_is_n1(self):
self.assertEqual(modifier(8), -1)
def test_ability_modifier_for_score_9_is_n1(self):
self.assertEqual(modifier(9), -1)
def test_ability_modifier_for_score_10_is_0(self):
self.assertEqual(modifier(10), 0)
def test_ability_modifier_for_score_11_is_0(self):
self.assertEqual(modifier(11), 0)
def test_ability_modifier_for_score_12_is_1(self):
self.assertEqual(modifier(12), 1)
def test_ability_modifier_for_score_13_is_1(self):
self.assertEqual(modifier(13), 1)
def test_ability_modifier_for_score_14_is_2(self):
self.assertEqual(modifier(14), 2)
def test_ability_modifier_for_score_15_is_2(self):
self.assertEqual(modifier(15), 2)
def test_ability_modifier_for_score_16_is_3(self):
self.assertEqual(modifier(16), 3)
def test_ability_modifier_for_score_17_is_3(self):
self.assertEqual(modifier(17), 3)
def test_ability_modifier_for_score_18_is_4(self):
self.assertEqual(modifier(18), 4)
def test_random_ability_is_within_range(self):
score = Character().ability()
self.assertIs(score >= 3 and score <= 18, True)
def test_random_character_is_valid(self):
Char = Character()
self.assertIs(Char.strength >= 3 and Char.strength <= 18, True)
self.assertIs(Char.dexterity >= 3 and Char.dexterity <= 18, True)
self.assertIs(Char.constitution >= 3 and Char.constitution <= 18, True)
self.assertIs(Char.intelligence >= 3 and Char.intelligence <= 18, True)
self.assertIs(Char.wisdom >= 3 and Char.wisdom <= 18, True)
self.assertIs(Char.charisma >= 3 and Char.charisma <= 18, True)
self.assertIs(Char.hitpoints == 10 + modifier(Char.constitution), True)
def test_each_ability_is_only_calculated_once(self):
Char = Character()
self.assertIs(Char.strength == Char.strength, True)
if __name__ == "__main__":
unittest.main()
| 34.397436
| 79
| 0.712262
|
718c42d009346e9ead6e3efcc8e562b69d7bfeb5
| 5,824
|
py
|
Python
|
intersight/models/hyperflex_feature_limit_entry_ref.py
|
sdnit-se/intersight-python
|
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
|
[
"Apache-2.0"
] | 21
|
2018-03-29T14:20:35.000Z
|
2021-10-13T05:11:41.000Z
|
intersight/models/hyperflex_feature_limit_entry_ref.py
|
sdnit-se/intersight-python
|
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
|
[
"Apache-2.0"
] | 14
|
2018-01-30T15:45:46.000Z
|
2022-02-23T14:23:21.000Z
|
intersight/models/hyperflex_feature_limit_entry_ref.py
|
sdnit-se/intersight-python
|
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
|
[
"Apache-2.0"
] | 18
|
2018-01-03T15:09:56.000Z
|
2021-07-16T02:21:54.000Z
|
# coding: utf-8
"""
Cisco Intersight OpenAPI specification.
The Cisco Intersight OpenAPI specification.
OpenAPI spec version: 1.0.9-1461
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class HyperflexFeatureLimitEntryRef(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'object_type': 'str',
'moid': 'str',
'selector': 'str'
}
attribute_map = {
'object_type': 'ObjectType',
'moid': 'Moid',
'selector': 'Selector'
}
def __init__(self, object_type=None, moid=None, selector=None):
"""
HyperflexFeatureLimitEntryRef - a model defined in Swagger
"""
self._object_type = None
self._moid = None
self._selector = None
if object_type is not None:
self.object_type = object_type
if moid is not None:
self.moid = moid
if selector is not None:
self.selector = selector
@property
def object_type(self):
"""
Gets the object_type of this HyperflexFeatureLimitEntryRef.
The Object Type of the referenced REST resource.
:return: The object_type of this HyperflexFeatureLimitEntryRef.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this HyperflexFeatureLimitEntryRef.
The Object Type of the referenced REST resource.
:param object_type: The object_type of this HyperflexFeatureLimitEntryRef.
:type: str
"""
self._object_type = object_type
@property
def moid(self):
"""
Gets the moid of this HyperflexFeatureLimitEntryRef.
The Moid of the referenced REST resource.
:return: The moid of this HyperflexFeatureLimitEntryRef.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this HyperflexFeatureLimitEntryRef.
The Moid of the referenced REST resource.
:param moid: The moid of this HyperflexFeatureLimitEntryRef.
:type: str
"""
self._moid = moid
@property
def selector(self):
"""
Gets the selector of this HyperflexFeatureLimitEntryRef.
An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of 'moid' by clients. If 'moid' is set this field is ignored. If 'selector' is set and 'moid' is empty/absent from the request, Intersight will determine the Moid of the resource matching the filter expression and populate it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq '3AA8B7T11'.
:return: The selector of this HyperflexFeatureLimitEntryRef.
:rtype: str
"""
return self._selector
@selector.setter
def selector(self, selector):
"""
Sets the selector of this HyperflexFeatureLimitEntryRef.
An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of 'moid' by clients. If 'moid' is set this field is ignored. If 'selector' is set and 'moid' is empty/absent from the request, Intersight will determine the Moid of the resource matching the filter expression and populate it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq '3AA8B7T11'.
:param selector: The selector of this HyperflexFeatureLimitEntryRef.
:type: str
"""
self._selector = selector
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, HyperflexFeatureLimitEntryRef):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 32
| 576
| 0.613839
|
33193c222f5dc20ab18e2323fba2f18d48e98f1e
| 1,031
|
py
|
Python
|
examples/sharepoint/connect_with_client_certificate_adal.py
|
theodoriss/Office365-REST-Python-Client
|
3bd7a62dadcd3f0a0aceeaff7584fff3fd44886e
|
[
"MIT"
] | 544
|
2016-08-04T17:10:16.000Z
|
2022-03-31T07:17:20.000Z
|
examples/sharepoint/connect_with_client_certificate_adal.py
|
theodoriss/Office365-REST-Python-Client
|
3bd7a62dadcd3f0a0aceeaff7584fff3fd44886e
|
[
"MIT"
] | 438
|
2016-10-11T12:24:22.000Z
|
2022-03-31T19:30:35.000Z
|
examples/sharepoint/connect_with_client_certificate_adal.py
|
theodoriss/Office365-REST-Python-Client
|
3bd7a62dadcd3f0a0aceeaff7584fff3fd44886e
|
[
"MIT"
] | 202
|
2016-08-22T19:29:40.000Z
|
2022-03-30T20:26:15.000Z
|
import os
from office365.runtime.auth.token_response import TokenResponse
from office365.sharepoint.client_context import ClientContext
from tests import test_site_url, test_tenant
cert_settings = {
'client_id': '51d03106-4726-442c-86db-70b32fa7547f',
'thumbprint': "6B36FBFC86FB1C019EB6496494B9195E6D179DDB",
'certificate_path': '{0}/selfsigncert.pem'.format(os.path.dirname(__file__))
}
def acquire_token():
import adal
authority_url = 'https://login.microsoftonline.com/{0}'.format(test_tenant)
auth_ctx = adal.AuthenticationContext(authority_url)
with open(cert_settings['certificate_path'], 'r') as file:
key = file.read()
json_token = auth_ctx.acquire_token_with_client_certificate(
test_site_url,
cert_settings['client_id'],
key,
cert_settings['thumbprint'])
return TokenResponse(**json_token)
ctx = ClientContext(test_site_url).with_access_token(acquire_token)
current_web = ctx.web.get().execute_query()
print("{0}".format(current_web.url))
| 34.366667
| 80
| 0.748788
|
163e86af2291e497d8ba576bef3d6ff2a3505314
| 3,364
|
py
|
Python
|
tests/test_visitors/test_ast/test_naming/conftest.py
|
sourya/wemake-python-styleguide
|
313a11a62fac2fb2067252db4e6a6530e070e382
|
[
"MIT"
] | null | null | null |
tests/test_visitors/test_ast/test_naming/conftest.py
|
sourya/wemake-python-styleguide
|
313a11a62fac2fb2067252db4e6a6530e070e382
|
[
"MIT"
] | null | null | null |
tests/test_visitors/test_ast/test_naming/conftest.py
|
sourya/wemake-python-styleguide
|
313a11a62fac2fb2067252db4e6a6530e070e382
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import pytest
# Imports:
import_alias = """
import os as {0}
"""
from_import_alias = """
from os import path as {0}
"""
# Function names:
function_name = 'def {0}(): ...'
method_name = """
class Input(object):
def {0}(self): ...
"""
# Function arguments:
function_argument = 'def test(arg, {0}): ...'
method_argument = """
class Input(object):
def validate(self, {0}): ...
"""
function_keyword_argument = 'def test(arg, {0}=None): ...'
method_keyword_argument = """
class Input(object):
def validate(self, {0}=None): ...
"""
function_args_argument = 'def test(arg, *{0}): ...'
function_kwargs_argument = 'def test(arg, **{0}): ...'
method_args_argument = """
class Input(object):
def validate(self, *{0}): ...
"""
method_kwargs_argument = """
class Input(object):
def validate(self, **{0}): ...
"""
function_kwonly_argument = """
def test(*, {0}): ...
"""
function_kwonly_default_argument = """
def test(*, {0}=True): ...
"""
method_kwonly_argument = """
class Input(object):
def test(self, *, {0}=True): ...
"""
lambda_argument = 'lambda {0}: ...'
# Class attributes:
static_attribute = """
class Test:
{0} = None
"""
static_typed_attribute = """
class Test:
{0}: int = None
"""
static_typed_annotation = """
class Test:
{0}: int
"""
instance_attribute = """
class Test(object):
def __init__(self):
self.{0} = 123
"""
instance_typed_attribute = """
class Test(object):
def __init__(self):
self.{0}: int = 123
"""
# Variables:
variable_def = """
{0} = 'test'
"""
variable_typed_def = """
{0}: str = 'test'
"""
variable_typed = """
{0}: str
"""
# See: https://github.com/wemake-services/wemake-python-styleguide/issues/405
unpacking_variables = """
first.attr, {0} = range(2)
"""
unpacking_star_variables = """
first, *{0} = range(2)
"""
for_variable = """
def container():
for {0} in []:
...
"""
for_star_variable = """
def container():
for index, *{0} in []:
...
"""
with_variable = """
def container():
with open('test.py') as {0}:
...
"""
with_star_variable = """
def container():
with open('test.py') as (first, *{0}):
...
"""
exception = """
try:
1 / 0
except Exception as {0}:
raise
"""
# Fixtures:
@pytest.fixture(params=[
# Imports:
import_alias,
from_import_alias,
# Function names, we don't use async function because we generate them:
function_name,
method_name,
# Function arguments:
function_argument,
method_argument,
function_keyword_argument,
method_keyword_argument,
function_args_argument,
function_kwargs_argument,
method_args_argument,
method_kwargs_argument,
function_kwonly_argument,
function_kwonly_default_argument,
method_kwonly_argument,
lambda_argument,
# Class attributes:
static_attribute,
static_typed_attribute,
static_typed_annotation,
instance_attribute,
instance_typed_attribute,
# Variables:
variable_def,
variable_typed_def,
variable_typed,
unpacking_variables,
unpacking_star_variables,
for_variable,
for_star_variable,
with_variable,
with_star_variable,
exception,
])
def naming_template(request):
"""Parametrized fixture that contains all possible naming templates."""
return request.param
| 16.904523
| 77
| 0.629905
|
160a4f7fca5245668abdd7575e192c7737797fdb
| 1,497
|
py
|
Python
|
src/cltl/backend/api/backend.py
|
leolani/cltl-backend
|
4ecc6227f9d48e40b9f59e6d78e0fcee9cdadbd4
|
[
"MIT"
] | null | null | null |
src/cltl/backend/api/backend.py
|
leolani/cltl-backend
|
4ecc6227f9d48e40b9f59e6d78e0fcee9cdadbd4
|
[
"MIT"
] | null | null | null |
src/cltl/backend/api/backend.py
|
leolani/cltl-backend
|
4ecc6227f9d48e40b9f59e6d78e0fcee9cdadbd4
|
[
"MIT"
] | null | null | null |
import logging
from cltl.backend.api.microphone import Microphone
from cltl.backend.api.text_to_speech import TextToSpeech
logger = logging.getLogger(__name__)
class Backend:
"""
Abstract Backend on which all Backends are based
Exposes
:class:`~cltl.backend.api.microphone.Microphone`
Parameters
----------
microphone: Microphone
Backend :class:`~cltl.backend.api.microphone.Microphone`
"""
def __init__(self, microphone: Microphone):
self._microphone = microphone
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def start(self):
if self._microphone:
self._microphone.start()
def stop(self):
self._stop_safe(self._microphone)
def _stop_safe(self, component):
if component:
try:
component.stop()
except:
logger.exception("Failed to stop " + str(component))
@property
def microphone(self) -> Microphone:
"""
Reference to :class:`~cltl.backend.api.microphone.Microphone`
Returns
-------
Microphone
"""
return self._microphone
@property
def text_to_speech(self) -> TextToSpeech:
"""
Reference to :class:`~cltl.backend.api.text_to_speech.TextToSpeech`
Returns
-------
TextToSpeech
"""
return self._text_to_speech
| 22.343284
| 75
| 0.602538
|
27354d423aba4087b35fa36658a151f96ee42882
| 1,395
|
py
|
Python
|
tests/compas/datastructures/test_mesh_operations.py
|
mpopescu/compas
|
55f259607deea501f862cbaea79bd97d7e56ead6
|
[
"MIT"
] | null | null | null |
tests/compas/datastructures/test_mesh_operations.py
|
mpopescu/compas
|
55f259607deea501f862cbaea79bd97d7e56ead6
|
[
"MIT"
] | 9
|
2019-09-11T08:53:19.000Z
|
2019-09-16T08:35:39.000Z
|
tests/compas/datastructures/test_mesh_operations.py
|
Licini/compas
|
34f65adb3d0abc3f403312ffba62aa76f3376292
|
[
"MIT"
] | null | null | null |
import pytest
from compas.datastructures import Mesh
from compas.datastructures import mesh_insert_vertex_on_edge
from compas.datastructures import mesh_substitute_vertex_in_faces
@pytest.fixture
def mesh_0():
vertices = [
[1.0, 0.0, 0.0],
[1.0, 2.0, 0.0],
[0.0, 1.0, 0.0],
[2.0, 1.0, 0.0],
[0.0, 0.0, 0.0]
]
faces = [
[0, 1, 2],
[0, 3, 1]
]
return Mesh.from_vertices_and_faces(vertices, faces)
def test_insert_vertex_on_edge(mesh_0):
mesh_insert_vertex_on_edge(mesh_0, 0, 1)
assert len(mesh_0.face_vertices(0)) == 4
assert len(mesh_0.face_vertices(1)) == 4
assert mesh_0.face_vertex_descendant(0, 0) == 5
assert mesh_0.face_vertex_descendant(1, 1) == 5
mesh_insert_vertex_on_edge(mesh_0, 0, 2, 4)
assert len(mesh_0.face_vertices(0)) == 5
assert mesh_0.face_vertex_descendant(0, 2) == 4
def test_mesh_substitute_vertex_in_faces(mesh_0):
mesh_substitute_vertex_in_faces(mesh_0, 0, 4)
assert 4 in mesh_0.face_vertices(0)
assert 0 not in mesh_0.face_vertices(0)
assert 4 in mesh_0.face_vertices(1)
assert 0 not in mesh_0.face_vertices(1)
mesh_substitute_vertex_in_faces(mesh_0, 4, 0, [1])
assert 4 in mesh_0.face_vertices(0)
assert 0 not in mesh_0.face_vertices(0)
assert 0 in mesh_0.face_vertices(1)
assert 4 not in mesh_0.face_vertices(1)
| 29.0625
| 65
| 0.683154
|
e224e6f09fa3c1b650494125a75b61b08462eaf3
| 76
|
py
|
Python
|
src/__tests__/integration/failures/setup_test.py
|
jest-community/jest-pytest
|
b197b0b31e3ca5c411202d97583cbd2d2b0b92e9
|
[
"MIT"
] | 37
|
2018-05-22T07:17:26.000Z
|
2022-03-03T13:14:46.000Z
|
src/__tests__/integration/failures/setup_test.py
|
jondot/jest-pytest
|
b197b0b31e3ca5c411202d97583cbd2d2b0b92e9
|
[
"MIT"
] | 34
|
2018-05-22T07:19:40.000Z
|
2022-03-11T23:21:03.000Z
|
src/__tests__/integration/failures/setup_test.py
|
jondot/jest-pytest
|
b197b0b31e3ca5c411202d97583cbd2d2b0b92e9
|
[
"MIT"
] | 8
|
2018-05-30T20:05:26.000Z
|
2021-02-19T14:17:05.000Z
|
def setup_module(module):
wtf
def test_something():
assert 1 == 1
| 10.857143
| 25
| 0.644737
|
a6a784da76db769a97ee3f032c89939167918c1a
| 7,898
|
py
|
Python
|
sdk/python/pulumi_azure_native/costmanagement/v20190401preview/get_budget.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/costmanagement/v20190401preview/get_budget.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/costmanagement/v20190401preview/get_budget.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetBudgetResult',
'AwaitableGetBudgetResult',
'get_budget',
]
@pulumi.output_type
class GetBudgetResult:
"""
A budget resource.
"""
def __init__(__self__, amount=None, category=None, current_spend=None, e_tag=None, filter=None, id=None, name=None, notifications=None, time_grain=None, time_period=None, type=None):
if amount and not isinstance(amount, float):
raise TypeError("Expected argument 'amount' to be a float")
pulumi.set(__self__, "amount", amount)
if category and not isinstance(category, str):
raise TypeError("Expected argument 'category' to be a str")
pulumi.set(__self__, "category", category)
if current_spend and not isinstance(current_spend, dict):
raise TypeError("Expected argument 'current_spend' to be a dict")
pulumi.set(__self__, "current_spend", current_spend)
if e_tag and not isinstance(e_tag, str):
raise TypeError("Expected argument 'e_tag' to be a str")
pulumi.set(__self__, "e_tag", e_tag)
if filter and not isinstance(filter, dict):
raise TypeError("Expected argument 'filter' to be a dict")
pulumi.set(__self__, "filter", filter)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if notifications and not isinstance(notifications, dict):
raise TypeError("Expected argument 'notifications' to be a dict")
pulumi.set(__self__, "notifications", notifications)
if time_grain and not isinstance(time_grain, str):
raise TypeError("Expected argument 'time_grain' to be a str")
pulumi.set(__self__, "time_grain", time_grain)
if time_period and not isinstance(time_period, dict):
raise TypeError("Expected argument 'time_period' to be a dict")
pulumi.set(__self__, "time_period", time_period)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def amount(self) -> float:
"""
The total amount of cost to track with the budget
"""
return pulumi.get(self, "amount")
@property
@pulumi.getter
def category(self) -> str:
"""
The category of the budget, whether the budget tracks cost or usage.
"""
return pulumi.get(self, "category")
@property
@pulumi.getter(name="currentSpend")
def current_spend(self) -> 'outputs.CurrentSpendResponse':
"""
The current amount of cost which is being tracked for a budget.
"""
return pulumi.get(self, "current_spend")
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> Optional[str]:
"""
eTag of the resource. To handle concurrent update scenario, this field will be used to determine whether the user is updating the latest version or not.
"""
return pulumi.get(self, "e_tag")
@property
@pulumi.getter
def filter(self) -> Optional['outputs.ReportConfigFilterResponse']:
"""
May be used to filter budgets.
"""
return pulumi.get(self, "filter")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def notifications(self) -> Optional[Mapping[str, 'outputs.NotificationResponse']]:
"""
Dictionary of notifications associated with the budget. Budget can have up to five notifications.
"""
return pulumi.get(self, "notifications")
@property
@pulumi.getter(name="timeGrain")
def time_grain(self) -> str:
"""
The time covered by a budget. Tracking of the amount will be reset based on the time grain.
"""
return pulumi.get(self, "time_grain")
@property
@pulumi.getter(name="timePeriod")
def time_period(self) -> 'outputs.BudgetTimePeriodResponse':
"""
Has start and end date of the budget. The start date must be first of the month and should be less than the end date. Budget start date must be on or after June 1, 2017. Future start date should not be more than three months. Past start date should be selected within the timegrain period. There are no restrictions on the end date.
"""
return pulumi.get(self, "time_period")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetBudgetResult(GetBudgetResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetBudgetResult(
amount=self.amount,
category=self.category,
current_spend=self.current_spend,
e_tag=self.e_tag,
filter=self.filter,
id=self.id,
name=self.name,
notifications=self.notifications,
time_grain=self.time_grain,
time_period=self.time_period,
type=self.type)
def get_budget(budget_name: Optional[str] = None,
scope: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetBudgetResult:
"""
A budget resource.
:param str budget_name: Budget Name.
:param str scope: The scope associated with budget operations. This includes '/subscriptions/{subscriptionId}/' for subscription scope, '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' for resourceGroup scope, '/providers/Microsoft.Billing/billingAccounts/{billingAccountId}' for Billing Account scope, '/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/departments/{departmentId}' for Department scope, '/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/enrollmentAccounts/{enrollmentAccountId}' for EnrollmentAccount scope, '/providers/Microsoft.Management/managementGroups/{managementGroupId}' for Management Group scope, '/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}' for billingProfile scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/invoiceSections/{invoiceSectionId}' for invoiceSection scope.
"""
__args__ = dict()
__args__['budgetName'] = budget_name
__args__['scope'] = scope
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:costmanagement/v20190401preview:getBudget', __args__, opts=opts, typ=GetBudgetResult).value
return AwaitableGetBudgetResult(
amount=__ret__.amount,
category=__ret__.category,
current_spend=__ret__.current_spend,
e_tag=__ret__.e_tag,
filter=__ret__.filter,
id=__ret__.id,
name=__ret__.name,
notifications=__ret__.notifications,
time_grain=__ret__.time_grain,
time_period=__ret__.time_period,
type=__ret__.type)
| 40.091371
| 929
| 0.660167
|
db1bbfc702a77182b67dfc3bd80978d3f5c3cb38
| 5,653
|
py
|
Python
|
flask_monitoringdashboard/controllers/endpoints.py
|
mcknz-gy/Flask-MonitoringDashboard
|
c3126971ce4af6abb3bbf763f042bc0a6dfb48b2
|
[
"MIT"
] | 630
|
2018-03-03T23:52:07.000Z
|
2022-03-30T10:55:46.000Z
|
flask_monitoringdashboard/controllers/endpoints.py
|
mcknz-gy/Flask-MonitoringDashboard
|
c3126971ce4af6abb3bbf763f042bc0a6dfb48b2
|
[
"MIT"
] | 292
|
2018-03-05T11:27:57.000Z
|
2022-03-28T23:05:48.000Z
|
flask_monitoringdashboard/controllers/endpoints.py
|
mcknz-gy/Flask-MonitoringDashboard
|
c3126971ce4af6abb3bbf763f042bc0a6dfb48b2
|
[
"MIT"
] | 146
|
2018-03-22T09:53:36.000Z
|
2022-02-03T08:13:50.000Z
|
import datetime
from numpy import median
from sqlalchemy import and_
from flask_monitoringdashboard import config
from flask_monitoringdashboard.core import cache
from flask_monitoringdashboard.core.colors import get_color
from flask_monitoringdashboard.core.measurement import add_decorator
from flask_monitoringdashboard.core.timezone import to_local_datetime, to_utc_datetime
from flask_monitoringdashboard.core.utils import simplify
from flask_monitoringdashboard.database import Request
from flask_monitoringdashboard.database.count_group import count_requests_group, get_value
from flask_monitoringdashboard.database.data_grouped import (
get_endpoint_data_grouped,
get_user_data_grouped,
get_version_data_grouped,
)
from flask_monitoringdashboard.database.endpoint import (
get_last_requested,
get_endpoints,
get_endpoint_by_name,
update_endpoint,
)
from flask_monitoringdashboard.database.versions import get_first_requests
def get_endpoint_overview(session):
"""
:param session: session for the database
:return: A list of properties for each endpoint that is found in the database
"""
week_ago = datetime.datetime.utcnow() - datetime.timedelta(days=7)
now_local = to_local_datetime(datetime.datetime.utcnow())
today_local = now_local.replace(hour=0, minute=0, second=0, microsecond=0)
today_utc = to_utc_datetime(today_local)
# First flush last requested info to db
cache.flush_cache()
error_hits_criterion = and_(Request.status_code >= 400, Request.status_code < 600)
hits_today = count_requests_group(session, Request.time_requested > today_utc)
hits_today_errors = count_requests_group(
session, and_(Request.time_requested > today_utc, error_hits_criterion)
)
hits_week = count_requests_group(session, Request.time_requested > week_ago)
hits_week_errors = count_requests_group(
session, and_(Request.time_requested > week_ago, error_hits_criterion)
)
hits = count_requests_group(session)
median_today = get_endpoint_data_grouped(session, median, Request.time_requested > today_utc)
median_week = get_endpoint_data_grouped(session, median, Request.time_requested > week_ago)
median_overall = get_endpoint_data_grouped(session, median)
access_times = get_last_requested(session)
return [
{
'id': endpoint.id,
'name': endpoint.name,
'monitor': endpoint.monitor_level,
'color': get_color(endpoint.name),
'hits-today': get_value(hits_today, endpoint.id),
'hits-today-errors': get_value(hits_today_errors, endpoint.id),
'hits-week': get_value(hits_week, endpoint.id),
'hits-week-errors': get_value(hits_week_errors, endpoint.id),
'hits-overall': get_value(hits, endpoint.id),
'median-today': get_value(median_today, endpoint.id),
'median-week': get_value(median_week, endpoint.id),
'median-overall': get_value(median_overall, endpoint.id),
'last-accessed': get_value(access_times, endpoint.name, default=None),
}
for endpoint in get_endpoints(session)
]
def get_endpoint_users(session, endpoint_id, users):
"""
:param session: session for the database
:param endpoint_id: id for the endpoint
:param users: a list of users to be filtered on
:return: a list of dicts with the performance of each user
"""
times = get_user_data_grouped(
session, lambda x: simplify(x, 100), Request.endpoint_id == endpoint_id
)
first_requests = get_first_requests(session, endpoint_id)
return [
{
'user': u,
'date': get_value(first_requests, u),
'values': get_value(times, u),
'color': get_color(u),
}
for u in users
]
def get_endpoint_versions(session, endpoint_id, versions):
"""
:param session: session for the database
:param endpoint_id: id for the endpoint
:param versions: a list of version to be filtered on
:return: a list of dicts with the performance of each version
"""
times = get_version_data_grouped(
session, lambda x: simplify(x, 100), Request.endpoint_id == endpoint_id
)
first_requests = get_first_requests(session, endpoint_id)
return [
{
'version': v,
'date': get_value(first_requests, v),
'values': get_value(times, v),
'color': get_color(v),
}
for v in versions
]
def get_api_performance(session, endpoints):
"""
:param session: session for the database
:param endpoints: a list of endpoints, encoded by their name
:return: for every endpoint in endpoints, a list with the performance
"""
db_endpoints = [get_endpoint_by_name(session, end) for end in endpoints]
data = get_endpoint_data_grouped(session, lambda x: simplify(x, 10))
return [
{'name': end.name, 'values': get_value(data, end.id, default=[])}
for end in db_endpoints
]
def set_endpoint_rule(session, endpoint_name, monitor_level):
"""
:param session: session for the database
:param endpoint_name: name of the endpoint
:param monitor_level: integer, representing the monitoring-level
"""
update_endpoint(session, endpoint_name, value=monitor_level)
# Remove wrapper
original = getattr(config.app.view_functions[endpoint_name], 'original', None)
if original:
config.app.view_functions[endpoint_name] = original
session.commit()
add_decorator(get_endpoint_by_name(session, endpoint_name))
| 37.190789
| 97
| 0.708473
|
769795123dd5ccc1d63a649d5fb3be723f16222f
| 721
|
py
|
Python
|
lib/config.py
|
PaulMndn/VRMLbot
|
c0e688d6f3458e1298b1ee613238a96a98a38e4f
|
[
"MIT"
] | null | null | null |
lib/config.py
|
PaulMndn/VRMLbot
|
c0e688d6f3458e1298b1ee613238a96a98a38e4f
|
[
"MIT"
] | null | null | null |
lib/config.py
|
PaulMndn/VRMLbot
|
c0e688d6f3458e1298b1ee613238a96a98a38e4f
|
[
"MIT"
] | null | null | null |
import json
from pathlib import Path
import logging
__all__ = [
"get_config"
]
log = logging.getLogger(__name__)
class Config:
def __init__(self):
self._path = Path("config.json")
if not self._path.exists():
log.critical("No config file found. Exiting.")
raise FileNotFoundError("Config file not found in root folder")
with open(str(self._path), "r") as f:
self._data = json.load(f)
self.token = self._data.get("token", None)
self.dev = self._data.get("dev", None)
self.debug_guilds = self._data.get("debug_guilds", None)
self.admin_id = self._data.get("admin_id", None)
def get_config():
return Config()
| 27.730769
| 75
| 0.619972
|
2f591bbccdc11f47fdcdfb2e0df4ac161f9f7092
| 644
|
py
|
Python
|
cms/test_utils/project/sampleapp/cms_app.py
|
tonatos/django-cms
|
96003df57c2dc0215bf109dc74a85aa0c798d1b4
|
[
"BSD-3-Clause"
] | 1
|
2016-08-23T16:20:29.000Z
|
2016-08-23T16:20:29.000Z
|
cms/test_utils/project/sampleapp/cms_app.py
|
tonatos/django-cms
|
96003df57c2dc0215bf109dc74a85aa0c798d1b4
|
[
"BSD-3-Clause"
] | null | null | null |
cms/test_utils/project/sampleapp/cms_app.py
|
tonatos/django-cms
|
96003df57c2dc0215bf109dc74a85aa0c798d1b4
|
[
"BSD-3-Clause"
] | null | null | null |
from cms.app_base import CMSApp
from cms.test_utils.project.sampleapp.menu import SampleAppMenu
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _
class SampleApp(CMSApp):
name = _("Sample App")
urls = ["cms.test_utils.project.sampleapp.urls"]
menus = [SampleAppMenu]
apphook_pool.register(SampleApp)
class NamespacedApp(CMSApp):
name = _("Namespaced App")
urls = [
"cms.test_utils.project.sampleapp.ns_urls",
"cms.test_utils.project.sampleapp.urls"
]
menus = [SampleAppMenu]
app_name = 'namespaced_app_ns'
apphook_pool.register(NamespacedApp)
| 26.833333
| 63
| 0.740683
|
baa62633a661cb44923880e25c832e5a0b84c950
| 1,405
|
py
|
Python
|
tests/test_response.py
|
MoonMoon1919/peyton
|
950f426332496de75ef26d196e67d7e469f805bc
|
[
"MIT"
] | 1
|
2020-09-20T21:16:32.000Z
|
2020-09-20T21:16:32.000Z
|
tests/test_response.py
|
MoonMoon1919/peyton
|
950f426332496de75ef26d196e67d7e469f805bc
|
[
"MIT"
] | 2
|
2021-05-04T14:43:13.000Z
|
2021-06-02T14:12:23.000Z
|
tests/test_response.py
|
MoonMoon1919/peyton
|
950f426332496de75ef26d196e67d7e469f805bc
|
[
"MIT"
] | null | null | null |
"""."""
import base64
import json
import sys
from os import path
import pytest
from peyton.response import Response
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
def test_response_obj():
resp = Response(status_code=200, headers={}, body={"message": "received GET"})
j = resp.to_json()
assert resp.statusCode == 200
assert type(resp.statusCode) == int
assert resp.headers == {}
assert type(resp.headers) == dict
assert resp.isBase64Encoded == False
assert type(resp.isBase64Encoded) == bool
assert resp.body["message"] == "received GET"
assert type(resp.body) == dict
# Test output of to_json()
assert type(j["body"]) == str
def test_body_type():
"""Tests that response object balks on improper type for body."""
with pytest.raises(TypeError):
resp = Response(status_code=200, headers={}, body="hello world")
def test_status_code_type():
"""Tests that response object balks on improper type for status_code."""
with pytest.raises(TypeError):
resp = Response(status_code="foo", headers={}, body={"message": "received GET"})
def test_base64_encoding():
resp = Response(
status_code=200,
headers={},
body={"message": "received GET"},
base64_encode=True,
)
resp = resp.to_json()
assert resp["body"] == b"eyJtZXNzYWdlIjogInJlY2VpdmVkIEdFVCJ9"
| 24.649123
| 88
| 0.666904
|
35d53641baad4fa7fac3f826f2666679065384ec
| 460
|
py
|
Python
|
25.py
|
fptitsyn/task-17
|
7345255b2b614d0b91431a8d91b40b2f4d22c5ac
|
[
"MIT"
] | null | null | null |
25.py
|
fptitsyn/task-17
|
7345255b2b614d0b91431a8d91b40b2f4d22c5ac
|
[
"MIT"
] | null | null | null |
25.py
|
fptitsyn/task-17
|
7345255b2b614d0b91431a8d91b40b2f4d22c5ac
|
[
"MIT"
] | null | null | null |
if __name__ == "__main__":
path = input("Enter a path to file: ")
with open(path, "r", encoding="utf-8") as f:
a = [int(i) for i in f]
count = 0
max_dif = 0
for i in range(len(a) - 1):
for j in range(i + 1, len(a)):
if ((a[i] - a[j]) % 46 == 0) and ((a[i] % 13 == 0) or (a[j] % 13 == 0)):
count += 1
max_dif = max(max_dif, a[i] - a[j])
print(count, max_dif)
| 28.75
| 85
| 0.430435
|
8f4ec104650079836329c92bf19518149ae7d1c3
| 26,507
|
py
|
Python
|
python/tvm/relay/transform/transform.py
|
akosik-anyvision/incubator-tvm
|
e1b11712ac09c32614483d24a4c7e0245ee4cb4b
|
[
"Apache-2.0"
] | 9
|
2019-12-17T08:03:54.000Z
|
2022-01-19T02:34:23.000Z
|
python/tvm/relay/transform/transform.py
|
akosik-anyvision/incubator-tvm
|
e1b11712ac09c32614483d24a4c7e0245ee4cb4b
|
[
"Apache-2.0"
] | 2
|
2020-06-18T21:15:42.000Z
|
2020-06-24T17:38:37.000Z
|
python/tvm/relay/transform/transform.py
|
akosik-anyvision/incubator-tvm
|
e1b11712ac09c32614483d24a4c7e0245ee4cb4b
|
[
"Apache-2.0"
] | 3
|
2020-10-04T20:30:18.000Z
|
2022-01-24T18:03:52.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, missing-docstring, unused-import
"""
Relay pass transformation infrastructure.
"""
import types
import inspect
import functools
import warnings
import tvm.ir
from tvm import te
from tvm.runtime import ndarray as _nd
from tvm import relay
from . import _ffi_api
def build_config(opt_level=2,
required_pass=None,
disabled_pass=None,
trace=None):
"""Configure the build behavior by setting config variables. This function
will be deprecated in TVM v0.7. Instead, we should directly use
tvm.transform.PassContext.
Parameters
----------
opt_level: int, optional
Optimization level. The optimization pass name and level are as the
following:
.. code-block:: python
OPT_PASS_LEVEL = {
"SimplifyInference": 0,
"OpFusion": 1,
"FoldConstant": 2,
"FoldScaleAxis": 3,
"AlterOpLayout": 3,
"CanonicalizeOps": 3,
"CanonicalizeCast": 3,
"EliminateCommonSubexpr": 3,
"CombineParallelConv2D": 4,
"CombineParallelDense": 4,
"FastMath": 4
}
required_pass: set of str, optional
Optimization passes that are required regardless of optimization level.
disabled_pass: set of str, optional
Optimization passes to be disabled during optimization.
trace: Callable[[IRModule, PassInfo, bool], None]
A tracing function for debugging or introspection.
Returns
-------
pass_context: PassContext
The pass context for optimizations.
"""
warnings.warn("relay.build_config will be deprecated. Please use \
tvm.transform.PassContext directly", DeprecationWarning)
return tvm.transform.PassContext(opt_level, required_pass, disabled_pass, trace)
@tvm._ffi.register_object("relay.FunctionPass")
class FunctionPass(tvm.ir.transform.Pass):
"""A pass that works on each tvm.relay.Function in a module. A function
pass class should be created through `function_pass`.
"""
def InferType():
"""Infer the type of an expr.
Returns
-------
ret : tvm.transform.Pass
The registered type inference pass.
"""
return _ffi_api.InferType()
def FoldScaleAxis():
"""Fold the scaling of axis into weights of conv2d/dense. This pass will
invoke both forward and backward scale folding.
Returns
-------
ret : tvm.transform.Pass
The registered pass to fold expressions.
Note
----
Internally, we will call backward_fold_scale_axis before using
forward_fold_scale_axis as backward folding targets the common conv->bn
pattern.
"""
return _ffi_api.FoldScaleAxis()
def BackwardFoldScaleAxis():
"""Backward fold axis scaling into weights of conv2d/dense.
Returns
-------
ret : tvm.transform.Pass
The registered pass to backward fold expressions.
Note
----
It is recommended to call backward_fold_scale_axis
before using forward_fold_scale_axis as backward folding targets the common
conv->bn pattern.
"""
return _ffi_api.BackwardFoldScaleAxis()
def RemoveUnusedFunctions(entry_functions=None):
"""Remove unused global relay functions in a relay module.
Parameters
----------
entry_functions: list[string]
The set of entry functions to start from.
Returns
-------
ret : tvm.transform.Pass
The registered pass to remove unused functions.
"""
if entry_functions is None:
entry_functions = ['main']
return _ffi_api.RemoveUnusedFunctions(entry_functions)
def ForwardFoldScaleAxis():
"""Fold the scaling of axis into weights of conv2d/dense.
Returns
-------
ret : tvm.transform.Pass
The registered pass to forward fold expressions.
Note
----
It is recommended to call backward_fold_scale_axis
before using forward_fold_scale_axis, as backward folding targets the
common conv->bn pattern.
"""
return _ffi_api.ForwardFoldScaleAxis()
def SimplifyInference():
"""Simplify the data-flow graph for inference phase. An simplified expression
which is semantically equal to the input expression will be returned.
Returns
-------
ret: tvm.transform.Pass
The registered pass to perform operator simplification.
"""
return _ffi_api.SimplifyInference()
def FastMath():
""" Converts the expensive non linear functions to their fast but approximate counterparts.
Returns
-------
ret: tvm.transform.Pass
The registered pass to perform fast math operations.
"""
return _ffi_api.FastMath()
def CanonicalizeOps():
"""Canonicalize special operators to basic operators.
This can simplify followed analysis, e.g. expanding bias_add to
expand_dims and broadcast_add.
Returns
-------
ret: tvm.transform.Pass
The registered pass performing the canonicalization.
"""
return _ffi_api.CanonicalizeOps()
def DeadCodeElimination(inline_once=False):
"""Remove expressions that do not have any users (dead code).
Parameters
----------
inline_once: Optional[Bool]
Whether to inline binding that occurs only once.
Returns
-------
ret: tvm.transform.Pass
The registered pass that eliminates the dead code in a Relay program.
"""
return _ffi_api.DeadCodeElimination(inline_once)
def LazyGradientInit():
"""Reduces memory usage of gradient tensors
Parameters
----------
Returns
-------
ret: tvm.transform.Pass
A pass which delays and/or reduces memory allocation,
by lazily allocating 0 or one filled tensors.
"""
return _ffi_api.LazyGradientInit()
def FoldConstant():
"""Fold the constant expressions in a Relay program.
Returns
-------
ret : tvm.transform.Pass
The registered pass for constant folding.
"""
return _ffi_api.FoldConstant()
def FuseOps(fuse_opt_level=-1):
"""Fuse operators in an expr to a larger operator according to some rules.
Parameters
----------
fuse_opt_level : int
The level of fuse optimization. -1 indicates that the level will be
inferred from pass context.
Returns
-------
ret : tvm.transform.Pass
The registered pass for operator fusion.
"""
return _ffi_api.FuseOps(fuse_opt_level)
def CombineParallelConv2D(min_num_branches=3):
"""Combine multiple conv2d operators into one.
Parameters
----------
min_num_branches : int
The minimum number of required parallel branches for performing this
optimization.
Returns
-------
ret: tvm.transform.Pass
The registered pass that combines parallel conv2d operators.
"""
return _ffi_api.CombineParallelConv2D(min_num_branches)
def CombineParallelDense(min_num_branches=3):
"""Combine multiple dense operators into one. For example:
.. code-block
data
/ \
dense (2,2) dense (2,2)
| |
elemwise/bcast (2,2) elemwise/bcast (2,2)
Would become:
.. code-block
data
|
batch_matmul+elemwise/bcast (2,2,2)
Parameters
----------
min_num_branches : int
The minimum number of required parallel branches for performing this
optimization.
Returns
-------
ret: tvm.transform.Pass
The registered pass that combines parallel dense operators.
"""
return _ffi_api.CombineParallelDense(min_num_branches)
def AlterOpLayout():
"""Alternate the layouts of operators or replace primitive operators with
other expressions.
This pass can be used for computing convolution in custom layouts or
other general weight pre-transformation.
Returns
-------
ret : tvm.transform.Pass
The registered pass that alters the layout of operators.
"""
return _ffi_api.AlterOpLayout()
def ConvertLayout(desired_layouts):
""" Given a dest layout, this pass transforms the expr such that most of the ops input data
layout is changed to the dest layout. In ideal situation, there are only 2 layout transforms,
one at the start and one at the end.
This pass is not a part of relay.build and is expected to be called between framework-relay
parser and relay.build call. This is very helpful for hardware backends that support/prefer only
type of data layout.
RFC - https://discuss.tvm.ai/t/layout-conversion-pass/4009
This pass uses most of the AlterOpLayout and InferCorrectLayout infrastructure. We can define
new layouts for conv2d ops for now. Most of the other operators try to adapt to their input
layout using the InferCorrectLayout infrastructure.
Parameters
----------
desired_layouts : map of op_name to list of layouts
Specify a mapping of operator names to a list of layouts to convert to, in the order
defined by the operator. An example for nn.conv2d could be: {"nn.conv2d", ["NHWC", "OHWI]},
where the first item in the list specifies the data layout and the second specifies the
kernel layout.
Returns
-------
pass: FunctionPass
The pass.
"""
return _ffi_api.ConvertLayout(desired_layouts)
def Legalize(legalize_map_attr_name="FTVMLegalize"):
"""Legalizes an expression with another expression.
This pass can be used to replace an expr with another expr for target
dependent optimizations. For example, one expr, though semnatically
equivalent to the other, can have better performance on a target. This pass
can be used to legalize the expr in a target-dependent manner.
Parameters
----------
legalize_map_attr_name : str
The Op's attr name which corresponds to the legalize rule function.
Returns
-------
ret : tvm.transform.Pass
The registered pass that rewrites an expr.
"""
return _ffi_api.Legalize(legalize_map_attr_name)
def MergeComposite(pattern_table):
"""Merge multiple operators into a single composite relay function.
Parameters
----------
pattern_table : List[Tuple[str, tvm.relay.dataflow_pattern.DFPattern, Function]]
A list of (pattern_name, pattern, check) tuples.
The order of the patterns in the list will determine the order
of priority in which they are matched.
'check' is a function to check whether an extracted pattern matches.
It can be implemented by pattern writer but if not specified it will
always return True.
Returns
-------
ret : tvm.transform.Pass
The registered pass that merges operators into a single composite
relay function.
"""
pattern_names = []
patterns = []
checks = []
for tup in pattern_table:
if len(tup) == 2:
pattern_name, pattern = tup
check = lambda extract: True
elif len(tup) == 3:
pattern_name, pattern, check = tup
pattern_names.append(pattern_name)
patterns.append(pattern)
checks.append(check)
return _ffi_api.MergeComposite(pattern_names, patterns, *checks)
def MergeCompilerRegions():
"""Merge together compiler regions.
Returns
-------
ret : tvm.transform.Pass
The registered pass that merges compiler regions.
"""
return _ffi_api.MergeCompilerRegions()
def RewriteAnnotatedOps(fallback_device):
"""Rewrite the annotated program where annotation operators, e.g.
`on_deivce`, mark which device an expression should be scheduled to.
This pass helps heterogeneous execution where different operators may need
to be allocated on various devices.
Parameters
----------
fallback_device : int
The fallback device type. It is also used as the default device for
operators with no annotated device.
Returns
-------
ret: tvm.transform.Pass
The registered pass that rewrites an expression with annotated
`on_device` operators.
"""
return _ffi_api.RewriteDeviceAnnotation(fallback_device)
def ToANormalForm():
"""Turn Graph Normal Form expression into A Normal Form Expression.
The scope of the root expression is the global scope.
The scope of any non root expression is the least common ancestor of all it's scope.
Values are ordered by post-DFS order in each scope.
Returns
-------
ret: Union[tvm.transform.Pass, tvm.relay.Expr]
The registered pass that transforms an expression into A Normal Form.
"""
return _ffi_api.ToANormalForm()
def ToCPS(expr, mod=None):
"""
Turn expression into continuation passing style(CPS).
Every intermediate compute will be passed to a continuation.
Returns
-------
result: tvm.transform.Pass
The registered pass that transforms an expression into CPS.
"""
return _ffi_api.to_cps(expr, mod)
def EtaExpand(expand_constructor=False, expand_global_var=False):
"""Add abstraction over a constructor or global variable bound to a function
Parameters
----------
expand_constructor: bool
Whether to expand constructors.
expand_global_var: bool
Whether to expand global variables.
Returns
-------
ret: tvm.transform.Pass
The registered pass that eta expands an expression.
"""
return _ffi_api.EtaExpand(expand_constructor, expand_global_var)
def ToGraphNormalForm():
"""Turn a Relay program in A Normal Form into Graph Normal Form
Returns
-------
ret : tvm.transform.Pass
The registered pass that transforms an expression into Graph Normal Form.
"""
return _ffi_api.ToGraphNormalForm()
def EliminateCommonSubexpr(fskip=None):
"""Eliminate common subexpressions.
Parameters
----------
fskip: Callable
The callback function that decides whether an expression should be
skipped.
Returns
-------
ret : tvm.transform.Pass
The registered pass that eliminates common subexpressions.
"""
return _ffi_api.EliminateCommonSubexpr(fskip)
def PartialEvaluate():
"""Evaluate the static fragment of the code.
Note
----
This transformation could be either `Module -> Module` or `Expr -> Expr`.
It will directly transform the input expression to a new one if the target
expression is provided. Otherwise, it will rely on the pass manager to
carry out transformation.
Returns
-------
ret: tvm.transform.Pass
The registered pass that performs partial evaluation on an expression.
"""
return _ffi_api.PartialEvaluate()
def CanonicalizeCast():
"""
Canonicalize cast expressions to make operator fusion more efficient.
Returns
-------
ret : tvm.transform.Pass
The registered pass that canonicalizes cast expression.
"""
return _ffi_api.CanonicalizeCast()
def LambdaLift():
"""
Lift the closure to global function.
Returns
-------
ret : tvm.transform.Pass
The registered pass that lifts the lambda function.
"""
return _ffi_api.LambdaLift()
def PartitionGraph():
"""Partition a Relay program into regions that can be executed on different
backends.
Returns
-------
ret: tvm.transform.Pass
The registered pass that partitions the Relay program.
"""
return _ffi_api.PartitionGraph()
def AnnotateTarget(targets):
"""Annotate ops in an experession with a provied compiler/target and then
use it for codegen.
Parameters
----------
targets : str or List[str]
The list of target compilers used for codegen.
Returns
-------
ret : tvm.transform.Pass
The annotated pass that wrapps ops with subgraph_start and
subgraph_end.
"""
if isinstance(targets, str):
targets = [targets]
return _ffi_api.AnnotateTarget([tvm.runtime.container.String(t) for t in targets])
def Inline():
"""Perform inlining on the given Relay IR module. The global functions that
are marked as `inline` should be always inlined. A cost model will be
needed in the future to decide if it is profitable to inline the function.
Returns
-------
ret: tvm.transform.Pass
The registered pass that performs inlining for a Relay IR module.
"""
return _ffi_api.Inline()
def gradient(expr, mod=None, mode='higher_order'):
"""
Transform the input function,
returning a function that calculate the original result,
paired with gradient of the input.
Parameters
----------
expr : tvm.relay.Expr
The input expression, which is a Function or a GlobalVar.
mod : Optional[tvm.IRModule]
mode : Optional[String]
The mode of the automatic differentiation algorithm.
'first_order' only works on first order code, but will not produce
reference nor closure.
'higher_order' works on all code using reference and closure.
Returns
-------
expr : tvm.relay.Expr
The transformed expression.
"""
if mode == 'first_order':
return _ffi_api.first_order_gradient(expr, mod)
if mode == 'higher_order':
return _ffi_api.gradient(expr, mod)
raise Exception('unknown mode')
def to_cps(func, mod=None):
"""
Turn expression into CPS expression.
Every intermediate compute will be passed to a continuation.
Parameters
----------
func: tvm.relay.Function
The input function.
mod: Optional[tvm.IRModule]
The global module.
Returns
-------
result: tvm.relay.Function
The output function.
"""
use_mod = mod if mod is not None else tvm.ir.IRModule()
return _ffi_api.to_cps(func, use_mod)
def un_cps(func):
"""
Turn an cps function into a Function without the continuation argument.
Note that this will not give the exact same interface as before cps:
If the input/output is higher order, they will still be in cps form.
Parameters
----------
func: tvm.relay.Function
The input function
Returns
-------
result: tvm.relay.Function
The output function
"""
return _ffi_api.un_cps(func)
def _wrap_class_function_pass(pass_cls, pass_info):
"""Wrap a python class as function pass"""
class PyFunctionPass(FunctionPass):
"""Internal wrapper class to create a class instance."""
def __init__(self, *args, **kwargs):
# initialize handle in cass pass_cls creation failed.fg
self.handle = None
inst = pass_cls(*args, **kwargs)
# it is important not to capture self to
# avoid a cyclic dependency
def _pass_func(func, mod, ctx):
return inst.transform_function(func, mod, ctx)
self.__init_handle_by_constructor__(
_ffi_api.MakeFunctionPass, _pass_func, pass_info)
self._inst = inst
def __getattr__(self, name):
# fall back to instance attribute if there is not any
return self._inst.__getattribute__(name)
functools.update_wrapper(PyFunctionPass.__init__, pass_cls.__init__)
PyFunctionPass.__name__ = pass_cls.__name__
PyFunctionPass.__doc__ = pass_cls.__doc__
PyFunctionPass.__module__ = pass_cls.__module__
return PyFunctionPass
def function_pass(pass_func=None, opt_level=None, name=None, required=None):
"""Decorate a function pass.
This function returns a callback when pass_func
is provided. Otherwise, it returns the created function pass using the
given optimization function.
Parameters
----------
pass_func : Optional[Callable[(Function, Module, PassContext) -> Function]]
The transformation function or class.
opt_level : int
The optimization level of this module pass.
name : Optional[str]
The name of the function pass. The name could be empty. In this case, the
name of the optimization function will be used as the pass name.
required : Optional[List[str]]
The list of passes that the module pass is dependent on.
Returns
-------
create_function_pass : Union[Callable, FunctionPass]
A decorator will be returned if pass_func is not provided,
otherwise return the decorated result.
The returned decorator has two behaviors depending on the input:
A new FunctionPass will be returned when we decorate a pass function.
A new FunctionPass class will be returned when we decorate a class type.
Examples
--------
The following code block decorates a function pass class.
.. code-block:: python
@relay.transform.function_pass(opt_level=1)
class TestReplaceFunc:
def __init__(self, new_func):
self.new_func = new_func
def transform_function(self, func, mod, ctx):
# just for demo purposes
# transform func to new_func
return self.new_func
x = relay.var("x", shape=(10, 20))
f1 = relay.Function([x], x)
f2 = relay.Function([x], relay.log(x))
# fpass is now a special pass that replaces every
# function to f1
fpass = TestReplaceFunc(f1)
# now every function in input_mod is replaced by f1
res_mod = fpass(input_mod)
The following code creates a function pass by decorating
a user defined transform function.
.. code-block:: python
@relay.transform.function_pass(opt_level=2)
def transform(func, mod, ctx):
# my transformations here.
return func
function_pass = transform
assert isinstance(function_pass, transform.FunctionPass)
assert function_pass.info.opt_level == 2
# Given a module m, the optimization could be invoked as the follwoing:
updated_mod = function_pass(m)
# Now constant folding should have been applied to every function in
# the provided module m. And the updated module will be returned.
"""
if opt_level is None:
raise ValueError("Please provide opt_level for the funtion pass.")
required = required if required else []
if not isinstance(required, (list, tuple)):
raise TypeError("Required is expected to be the type of " +
"list/tuple.")
def create_function_pass(pass_arg):
"""Internal function that creates a function pass"""
fname = name if name else pass_arg.__name__
info = tvm.transform.PassInfo(opt_level, fname, required)
if inspect.isclass(pass_arg):
return _wrap_class_function_pass(pass_arg, info)
if not isinstance(pass_arg, (types.FunctionType, types.LambdaType)):
raise TypeError("pass_func must be a callable for Module pass")
return _ffi_api.MakeFunctionPass(pass_arg, info)
if pass_func:
return create_function_pass(pass_func)
return create_function_pass
@function_pass(opt_level=1)
class ChangeBatch:
"""
Change the batch size.
Parameters
----------
data: Dict[relay.Var, int]
A dictionary of all the params to change.
The keys are all params, and the values are which dimension hold the batch.
batch_size: int
The batch size to change to.
Returns
-------
pass: FunctionPass
The pass.
"""
def __init__(self, data, batch_size=16):
self.data = data
self.batch_size = batch_size
def transform_function(self, func, mod, ctx):
func = relay.Function(func.params, func.body, None, func.type_params, func.attrs)
change_batch = self
class ChangeBatchMutator(tvm.relay.ExprMutator):
def visit_var(self, var):
if var in change_batch.data:
ty = var.type_annotation
new_shape = list(ty.shape)
new_shape[change_batch.data[var]] = change_batch.batch_size
return relay.Var(var.name_hint, relay.TensorType(new_shape, ty.dtype))
return var
return ChangeBatchMutator().visit(func)
def DenseToSparse(weight_name, weight_shape):
"""
Rewrite qualified ```nn.dense operation``` to ```nn.sparse_dense```
This pass is used in ```data_dep_optimization.bsr_dense```
Parameters of this pass is generated by ```analysis.sparse_dense.process_params```
Parameters
----------
weight_name: Array[String]
Names of weights which qualified sparse contrains
weight_shape: Array[Array[IntImm]]
Weights shape in BSR format.
Returns
-------
ret : tvm.transform.Pass
The registered DenseToSparse pass.
"""
return _ffi_api.DenseToSparse(weight_name, weight_shape)
def SimplifyFCTranspose(target_weight_name):
"""
Rewrite ```y = nn.dense(x, transpose(w, [1, 0]))``` to ```y = nn.dense(x, wt)```
This pass is used in ```data_dep_optimization.simplify_fc_transpose```
Parameters
----------
weight_name: Array[String]
Names of weights which qualified ```y = nn.dense(x, transpose(w, [1, 0]))```
This parameter is generated by ```analysis.search_fc_transpose``` function
Returns
-------
ret : tvm.transform.Pass
The registered SimplifyFCTranspose pass.
"""
return _ffi_api.SimplifyFCTranspose(target_weight_name)
| 29.985294
| 100
| 0.664315
|
04d0524c6466fca5fc05e30dd5b36db0d1461df7
| 1,693
|
py
|
Python
|
Ensemble Methods/RandomForest_Classification.py
|
AbuBakkar32/ML-DL-NLP-TP-FE-MP
|
2525b6b32fc1876e65643b8c221ffda591981623
|
[
"MIT"
] | 2
|
2020-10-20T10:35:31.000Z
|
2020-11-19T14:08:05.000Z
|
Ensemble Methods/RandomForest_Classification.py
|
AbuBakkar32/ML-DL-NLP-TP-FE-MP
|
2525b6b32fc1876e65643b8c221ffda591981623
|
[
"MIT"
] | null | null | null |
Ensemble Methods/RandomForest_Classification.py
|
AbuBakkar32/ML-DL-NLP-TP-FE-MP
|
2525b6b32fc1876e65643b8c221ffda591981623
|
[
"MIT"
] | null | null | null |
#Import Libraries
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
dataset = pd.read_csv('BankNote_Authentication.csv')
X = dataset.iloc[:, [0,1]].values
y = dataset.iloc[:, 4].values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
from sklearn.ensemble import RandomForestClassifier
rf_c = RandomForestClassifier(n_estimators = 200, random_state = 2)
rf_c.fit(X_train, y_train)
from sklearn.metrics import accuracy_score
y_pred_test = rf_c.predict(X_test)
test_acc = accuracy_score(y_test, y_pred_test)
print(test_acc)
from matplotlib.colors import ListedColormap
import numpy as np
#Define Variables
clf = rf_c
h = 0.01
X_plot, z_plot = X_test, y_test
#Standard Template to draw graph
x_min, x_max = X_plot[:, 0].min() - 1, X_plot[:, 0].max() + 1
y_min, y_max = X_plot[:, 1].min() - 1, X_plot[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh
Z = clf.predict(np.array([xx.ravel(), yy.ravel()]).T)
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z,
alpha = 0.7, cmap = ListedColormap(('red', 'green')))
for i, j in enumerate(np.unique(z_plot)):
plt.scatter(X_plot[z_plot == j, 0], X_plot[z_plot == j, 1],
c = ['red', 'green'][i], cmap = ListedColormap(('red', 'green')), label = j)
#X[:, 0], X[:, 1]
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title('Random Forest Classification')
plt.xlabel('variance')
plt.ylabel('skewness')
plt.legend()
plt.show()
| 29.701754
| 93
| 0.683993
|
b7a56a7dc0f607168fc269e4845fedbb9b650d86
| 21,176
|
bzl
|
Python
|
third_party/repositories/scala_2_13.bzl
|
renovate-bot/rules_scala
|
6e37eac5194d535f59c4a2f363e67207fd004aca
|
[
"Apache-2.0"
] | 326
|
2016-02-24T18:28:10.000Z
|
2022-03-30T08:51:08.000Z
|
third_party/repositories/scala_2_13.bzl
|
renovate-bot/rules_scala
|
6e37eac5194d535f59c4a2f363e67207fd004aca
|
[
"Apache-2.0"
] | 1,157
|
2016-02-24T04:26:27.000Z
|
2022-03-31T05:59:14.000Z
|
third_party/repositories/scala_2_13.bzl
|
renovate-bot/rules_scala
|
6e37eac5194d535f59c4a2f363e67207fd004aca
|
[
"Apache-2.0"
] | 262
|
2016-02-24T18:29:21.000Z
|
2022-03-24T21:39:20.000Z
|
artifacts = {
"io_bazel_rules_scala_scala_library": {
"artifact": "org.scala-lang:scala-library:2.13.6",
"sha256": "f19ed732e150d3537794fd3fe42ee18470a3f707efd499ecd05a99e727ff6c8a",
},
"io_bazel_rules_scala_scala_compiler": {
"artifact": "org.scala-lang:scala-compiler:2.13.6",
"sha256": "310d263d622a3d016913e94ee00b119d270573a5ceaa6b21312d69637fd9eec1",
},
"io_bazel_rules_scala_scala_reflect": {
"artifact": "org.scala-lang:scala-reflect:2.13.6",
"sha256": "f713593809b387c60935bb9a940dfcea53bd0dbf8fdc8d10739a2896f8ac56fa",
},
"io_bazel_rules_scala_scala_parallel_collections": {
"artifact": "org.scala-lang.modules:scala-parallel-collections_2.13:0.2.0",
"sha256": "d15f22f1308b98e9ac52a3d1ac8d582d548d6d852b1116cbdf5a50f431246ed1",
},
#
"io_bazel_rules_scala_scalatest": {
"artifact": "org.scalatest:scalatest_2.13:3.1.2",
"sha256": "94b636ce8dc2caed3069069c97b94538e60e9a400833fb8086c8271978ad2c21",
},
"io_bazel_rules_scala_scalactic": {
"artifact": "org.scalactic:scalactic_2.13:3.1.2",
"sha256": "6977c34cabeacca7c0d8f3be0c3f5644ddd922b4af7c32d5a59ca561807e728d",
},
"io_bazel_rules_scala_scala_xml": {
"artifact": "org.scala-lang.modules:scala-xml_2.13:1.3.0",
"sha256": "6d96d45a7fc6fc7ab69bdbac841b48cf67ab109f048c8db375ae4effae524f39",
},
"io_bazel_rules_scala_scala_parser_combinators": {
"artifact": "org.scala-lang.modules:scala-parser-combinators_2.13:1.1.2",
"sha256": "5c285b72e6dc0a98e99ae0a1ceeb4027dab9adfa441844046bd3f19e0efdcb54",
},
"org_scalameta_common": {
"artifact": "org.scalameta:common_2.13:4.3.24",
"sha256": "bb8ffbca69b42417aa5d3c73d4434b73dbbeb66748abc44a024090ff3aa38bd3",
"deps": [
"@com_lihaoyi_sourcecode",
"@io_bazel_rules_scala_scala_library",
],
},
"org_scalameta_fastparse": {
"artifact": "org.scalameta:fastparse_2.13:1.0.1",
"sha256": "b43b99244d5b51948daf1467083b3850dc2727c604de98dc426dec14244fd18e",
"deps": [
"@com_lihaoyi_sourcecode",
"@io_bazel_rules_scala_scala_library",
"@org_scalameta_fastparse_utils",
],
},
"org_scalameta_fastparse_utils": {
"artifact": "org.scalameta:fastparse-utils_2.13:1.0.1",
"sha256": "9d650543903836684a808bb4c5ff775a4cae4b38c3a47ce946b572237fde340f",
"deps": [
"@com_lihaoyi_sourcecode",
"@io_bazel_rules_scala_scala_library",
],
},
"org_scala_lang_modules_scala_collection_compat": {
"artifact": "org.scala-lang.modules:scala-collection-compat_2.13:2.2.0",
"sha256": "7f601d3a6d699433ddaf549ffa1441dcbe00bc95f4035add9776772053e9f93f",
"deps": [
"@io_bazel_rules_scala_scala_library",
],
},
"org_scalameta_parsers": {
"artifact": "org.scalameta:parsers_2.13:4.3.24",
"sha256": "5faebb22a064f38a4be19fdadb288dc771c1e362d0c4d2f46546f08e4f43c091",
"deps": [
"@io_bazel_rules_scala_scala_library",
"@org_scalameta_trees",
],
},
"org_scalameta_scalafmt_core": {
"artifact": "org.scalameta:scalafmt-core_2.13:2.7.4",
"sha256": "873d98275f75b67c1e01094a24bafb29a588b7d05fdc508d3b1ba02f08d0c0d8",
"deps": [
"@com_geirsson_metaconfig_core",
"@com_geirsson_metaconfig_typesafe_config",
"@io_bazel_rules_scala_scala_library",
"@io_bazel_rules_scala_scala_reflect",
"@org_scalameta_scalameta",
"@org_scala_lang_modules_scala_collection_compat",
"@io_bazel_rules_scala_scala_parallel_collections",
],
},
"org_scalameta_scalameta": {
"artifact": "org.scalameta:scalameta_2.13:4.3.24",
"sha256": "d73eaf491eb588a2bd78aeba443e62bc95f1a368051d9e81607192c88fa4c61c",
"deps": [
"@io_bazel_rules_scala_scala_library",
"@org_scala_lang_scalap",
"@org_scalameta_parsers",
],
},
"org_scalameta_trees": {
"artifact": "org.scalameta:trees_2.13:4.3.24",
"sha256": "d49d2b085ae62e9317dd2a4e8b14be5b1ecbec2db392fa81cab86ad2bd7c2c68",
"deps": [
"@com_thesamet_scalapb_scalapb_runtime",
"@io_bazel_rules_scala_scala_library",
"@org_scalameta_common",
"@org_scalameta_fastparse",
],
},
"org_typelevel_paiges_core": {
"artifact": "org.typelevel:paiges-core_2.13:0.2.4",
"sha256": "1f55b6f90e370a1c18f7350f6925626b5cde69b17560bd2e2a33137780b210df",
"deps": [
"@io_bazel_rules_scala_scala_library",
],
},
"com_typesafe_config": {
"artifact": "com.typesafe:config:1.3.3",
"sha256": "b5f1d6071f1548d05be82f59f9039c7d37a1787bd8e3c677e31ee275af4a4621",
},
"org_scala_lang_scalap": {
"artifact": "org.scala-lang:scalap:2.13.6",
"sha256": "bbfa4ab0603f510b16114371a35b9c34d20946edfc1aa8f3fd31014b9f06b5b1",
"deps": [
"@io_bazel_rules_scala_scala_compiler",
],
},
"com_thesamet_scalapb_lenses": {
"artifact": "com.thesamet.scalapb:lenses_2.13:0.9.0",
"sha256": "10830d6511fc21b997c4acdde6f6700e87ee6791cbe6278f5acd7b352670a88f",
"deps": [
"@io_bazel_rules_scala_scala_library",
],
},
"com_thesamet_scalapb_scalapb_runtime": {
"artifact": "com.thesamet.scalapb:scalapb-runtime_2.13:0.9.0",
"sha256": "10830d6511fc21b997c4acdde6f6700e87ee6791cbe6278f5acd7b352670a88f",
"deps": [
"@com_google_protobuf_protobuf_java",
"@com_lihaoyi_fastparse",
"@com_thesamet_scalapb_lenses",
"@io_bazel_rules_scala_scala_library",
],
},
"com_lihaoyi_fansi": {
"artifact": "com.lihaoyi:fansi_2.13:0.2.9",
"sha256": "c347b6452152cf55d401090d3d3c230d96a5f9b6792d1bdb9b760e0d5187ed30",
"deps": [
"@com_lihaoyi_sourcecode",
"@io_bazel_rules_scala_scala_library",
],
},
"com_lihaoyi_fastparse": {
"artifact": "com.lihaoyi:fastparse_2.13:2.1.3",
"sha256": "5064d3984aab8c48d2dbd6285787ac5c6d84a6bebfc02c6d431ce153cf91dec1",
"deps": [
"@com_lihaoyi_sourcecode",
],
},
"com_lihaoyi_pprint": {
"artifact": "com.lihaoyi:pprint_2.13:0.6.0",
"sha256": "6bc908b7acb825bc0ce1148a1a417ab1b75335c98749e6e2d2ad3d09604e3701",
"deps": [
"@com_lihaoyi_fansi",
"@com_lihaoyi_sourcecode",
"@io_bazel_rules_scala_scala_library",
],
},
"com_lihaoyi_sourcecode": {
"artifact": "com.lihaoyi:sourcecode_2.13:0.1.7",
"sha256": "6371a79bfd1125ccf0dbf3278c178f3554a50507975f4a182abb973044f24945",
"deps": [
"@io_bazel_rules_scala_scala_library",
],
},
"com_google_protobuf_protobuf_java": {
"artifact": "com.google.protobuf:protobuf-java:3.10.0",
"sha256": "161d7d61a8cb3970891c299578702fd079646e032329d6c2cabf998d191437c9",
},
"com_geirsson_metaconfig_core": {
"artifact": "com.geirsson:metaconfig-core_2.13:0.9.10",
"sha256": "2ee1f3ee60e4c5e3de63ab9bfe52be2c4f319552b7afedbc20c5097fc26fdc8c",
"deps": [
"@com_lihaoyi_pprint",
"@io_bazel_rules_scala_scala_library",
"@org_typelevel_paiges_core",
"@org_scala_lang_modules_scala_collection_compat",
],
},
"com_geirsson_metaconfig_typesafe_config": {
"artifact": "com.geirsson:metaconfig-typesafe-config_2.13:0.9.10",
"sha256": "bd3698fed4af61d03b9b70783dfaa457e664eae234ca1b83f2580552d1306e39",
"deps": [
"@com_geirsson_metaconfig_core",
"@com_typesafe_config",
"@io_bazel_rules_scala_scala_library",
"@org_scala_lang_modules_scala_collection_compat",
],
},
"io_bazel_rules_scala_org_openjdk_jmh_jmh_core": {
"artifact": "org.openjdk.jmh:jmh-core:1.20",
"sha256": "1688db5110ea6413bf63662113ed38084106ab1149e020c58c5ac22b91b842ca",
},
"io_bazel_rules_scala_org_openjdk_jmh_jmh_generator_asm": {
"artifact": "org.openjdk.jmh:jmh-generator-asm:1.20",
"sha256": "2dd4798b0c9120326310cda3864cc2e0035b8476346713d54a28d1adab1414a5",
},
"io_bazel_rules_scala_org_openjdk_jmh_jmh_generator_reflection": {
"artifact": "org.openjdk.jmh:jmh-generator-reflection:1.20",
"sha256": "57706f7c8278272594a9afc42753aaf9ba0ba05980bae0673b8195908d21204e",
},
"io_bazel_rules_scala_org_ows2_asm_asm": {
"artifact": "org.ow2.asm:asm:6.1.1",
"sha256": "dd3b546415dd4bade2ebe3b47c7828ab0623ee2336604068e2d81023f9f8d833",
},
"io_bazel_rules_scala_net_sf_jopt_simple_jopt_simple": {
"artifact": "net.sf.jopt-simple:jopt-simple:4.6",
"sha256": "3fcfbe3203c2ea521bf7640484fd35d6303186ea2e08e72f032d640ca067ffda",
},
"io_bazel_rules_scala_org_apache_commons_commons_math3": {
"artifact": "org.apache.commons:commons-math3:3.6.1",
"sha256": "1e56d7b058d28b65abd256b8458e3885b674c1d588fa43cd7d1cbb9c7ef2b308",
},
"io_bazel_rules_scala_junit_junit": {
"artifact": "junit:junit:4.12",
"sha256": "59721f0805e223d84b90677887d9ff567dc534d7c502ca903c0c2b17f05c116a",
},
"io_bazel_rules_scala_org_hamcrest_hamcrest_core": {
"artifact": "org.hamcrest:hamcrest-core:1.3",
"sha256": "66fdef91e9739348df7a096aa384a5685f4e875584cce89386a7a47251c4d8e9",
},
"io_bazel_rules_scala_org_specs2_specs2_common": {
"artifact": "org.specs2:specs2-common_2.13:4.10.3",
"sha256": "51636fb6a904b3c807de0673f283a971379c9886e03aedbecbf5d787b22346b0",
"deps": [
"@io_bazel_rules_scala_org_specs2_specs2_fp",
],
},
"io_bazel_rules_scala_org_specs2_specs2_core": {
"artifact": "org.specs2:specs2-core_2.13:4.10.3",
"sha256": "9cc55eb11781c9b77689cf8175795fad34b060718b04a225fffb0613a181256b",
"deps": [
"@io_bazel_rules_scala_org_specs2_specs2_common",
"@io_bazel_rules_scala_org_specs2_specs2_matcher",
],
},
"io_bazel_rules_scala_org_specs2_specs2_fp": {
"artifact": "org.specs2:specs2-fp_2.13:4.10.3",
"sha256": "48a908b345c93a3387ddd157ab338686513f450c7dd8afe0f32b6edc7ff15239",
},
"io_bazel_rules_scala_org_specs2_specs2_matcher": {
"artifact": "org.specs2:specs2-matcher_2.13:4.10.3",
"sha256": "754465f58dad8f59b3bb299d5dc127027bf0c0c9ad25250260fc95abd705363b",
"deps": [
"@io_bazel_rules_scala_org_specs2_specs2_common",
],
},
"io_bazel_rules_scala_org_specs2_specs2_junit": {
"artifact": "org.specs2:specs2-junit_2.13:4.10.3",
"sha256": "49c4e7cf5483aada90852314983fc046f72092da1a4e7900ace6574444f581ea",
"deps": [
"@io_bazel_rules_scala_org_specs2_specs2_core",
],
},
"scala_proto_rules_scalapb_plugin": {
"artifact": "com.thesamet.scalapb:compilerplugin_2.13:0.9.7",
"sha256": "ac29c2f01b0b1e39c4226915000505643d586234d586247e1fd97133e20bcc60",
},
"scala_proto_rules_protoc_bridge": {
"artifact": "com.thesamet.scalapb:protoc-bridge_2.13:0.7.14",
"sha256": "0704f2379374205e7130018e3df6b3d50a4d330c3e447ca39b5075ecb4c93cd1",
},
"scala_proto_rules_scalapb_runtime": {
"artifact": "com.thesamet.scalapb:scalapb-runtime_2.13:0.9.7",
"sha256": "8026485011c53d35eb427ac5c09ed34c283b355d8a6363eae68b3f165bee34a0",
},
"scala_proto_rules_scalapb_runtime_grpc": {
"artifact": "com.thesamet.scalapb:scalapb-runtime-grpc_2.13:0.9.7",
"sha256": "950984d4a3b21925d3156dd98cddb4e7c2f429aad81aa25bb5a3792d41fd7c76",
},
"scala_proto_rules_scalapb_lenses": {
"artifact": "com.thesamet.scalapb:lenses_2.13:0.9.7",
"sha256": "5f43b371b2738a81eff129fd2071ce3e5b3aa30909de90e6bb6e25c3de6c312d",
},
"scala_proto_rules_scalapb_fastparse": {
"artifact": "com.lihaoyi:fastparse_2.13:2.1.3",
"sha256": "5064d3984aab8c48d2dbd6285787ac5c6d84a6bebfc02c6d431ce153cf91dec1",
},
"scala_proto_rules_grpc_core": {
"artifact": "io.grpc:grpc-core:1.24.0",
"sha256": "8fc900625a9330b1c155b5423844d21be0a5574fe218a63170a16796c6f7880e",
},
"scala_proto_rules_grpc_api": {
"artifact": "io.grpc:grpc-api:1.24.0",
"sha256": "553978366e04ee8ddba64afde3b3cf2ac021a2f3c2db2831b6491d742b558598",
},
"scala_proto_rules_grpc_stub": {
"artifact": "io.grpc:grpc-stub:1.24.0",
"sha256": "eaa9201896a77a0822e26621b538c7154f00441a51c9b14dc9e1ec1f2acfb815",
},
"scala_proto_rules_grpc_protobuf": {
"artifact": "io.grpc:grpc-protobuf:1.24.0",
"sha256": "88cd0838ea32893d92cb214ea58908351854ed8de7730be07d5f7d19025dd0bc",
},
"scala_proto_rules_grpc_netty": {
"artifact": "io.grpc:grpc-netty:1.24.0",
"sha256": "8478333706ba442a354c2ddb8832d80a5aef71016e8a9cf07e7bf6e8c298f042",
},
"scala_proto_rules_grpc_context": {
"artifact": "io.grpc:grpc-context:1.24.0",
"sha256": "1f0546e18789f7445d1c5a157010a11bc038bbb31544cdb60d9da3848efcfeea",
},
"scala_proto_rules_perfmark_api": {
"artifact": "io.perfmark:perfmark-api:0.17.0",
"sha256": "816c11409b8a0c6c9ce1cda14bed526e7b4da0e772da67c5b7b88eefd41520f9",
},
"scala_proto_rules_guava": {
"artifact": "com.google.guava:guava:26.0-android",
"sha256": "1d044ebb866ef08b7d04e998b4260c9b52fab6e6d6b68d207859486bb3686cd5",
},
"scala_proto_rules_google_instrumentation": {
"artifact": "com.google.instrumentation:instrumentation-api:0.3.0",
"sha256": "671f7147487877f606af2c7e39399c8d178c492982827305d3b1c7f5b04f1145",
},
"scala_proto_rules_netty_codec": {
"artifact": "io.netty:netty-codec:4.1.32.Final",
"sha256": "dbd6cea7d7bf5a2604e87337cb67c9468730d599be56511ed0979aacb309f879",
},
"scala_proto_rules_netty_codec_http": {
"artifact": "io.netty:netty-codec-http:4.1.32.Final",
"sha256": "db2c22744f6a4950d1817e4e1a26692e53052c5d54abe6cceecd7df33f4eaac3",
},
"scala_proto_rules_netty_codec_socks": {
"artifact": "io.netty:netty-codec-socks:4.1.32.Final",
"sha256": "fe2f2e97d6c65dc280623dcfd24337d8a5c7377049c120842f2c59fb83d7408a",
},
"scala_proto_rules_netty_codec_http2": {
"artifact": "io.netty:netty-codec-http2:4.1.32.Final",
"sha256": "4d4c6cfc1f19efb969b9b0ae6cc977462d202867f7dcfee6e9069977e623a2f5",
},
"scala_proto_rules_netty_handler": {
"artifact": "io.netty:netty-handler:4.1.32.Final",
"sha256": "07d9756e48b5f6edc756e33e8b848fb27ff0b1ae087dab5addca6c6bf17cac2d",
},
"scala_proto_rules_netty_buffer": {
"artifact": "io.netty:netty-buffer:4.1.32.Final",
"sha256": "8ac0e30048636bd79ae205c4f9f5d7544290abd3a7ed39d8b6d97dfe3795afc1",
},
"scala_proto_rules_netty_transport": {
"artifact": "io.netty:netty-transport:4.1.32.Final",
"sha256": "175bae0d227d7932c0c965c983efbb3cf01f39abe934f5c4071d0319784715fb",
},
"scala_proto_rules_netty_resolver": {
"artifact": "io.netty:netty-resolver:4.1.32.Final",
"sha256": "9b4a19982047a95ea4791a7ad7ad385c7a08c2ac75f0a3509cc213cb32a726ae",
},
"scala_proto_rules_netty_common": {
"artifact": "io.netty:netty-common:4.1.32.Final",
"sha256": "cc993e660f8f8e3b033f1d25a9e2f70151666bdf878d460a6508cb23daa696dc",
},
"scala_proto_rules_netty_handler_proxy": {
"artifact": "io.netty:netty-handler-proxy:4.1.32.Final",
"sha256": "10d1081ed114bb0e76ebbb5331b66a6c3189cbdefdba232733fc9ca308a6ea34",
},
"scala_proto_rules_opencensus_api": {
"artifact": "io.opencensus:opencensus-api:0.22.1",
"sha256": "62a0503ee81856ba66e3cde65dee3132facb723a4fa5191609c84ce4cad36127",
},
"scala_proto_rules_opencensus_impl": {
"artifact": "io.opencensus:opencensus-impl:0.22.1",
"sha256": "9e8b209da08d1f5db2b355e781b9b969b2e0dab934cc806e33f1ab3baed4f25a",
},
"scala_proto_rules_disruptor": {
"artifact": "com.lmax:disruptor:3.4.2",
"sha256": "f412ecbb235c2460b45e63584109723dea8d94b819c78c9bfc38f50cba8546c0",
},
"scala_proto_rules_opencensus_impl_core": {
"artifact": "io.opencensus:opencensus-impl-core:0.22.1",
"sha256": "04607d100e34bacdb38f93c571c5b7c642a1a6d873191e25d49899668514db68",
},
"scala_proto_rules_opencensus_contrib_grpc_metrics": {
"artifact": "io.opencensus:opencensus-contrib-grpc-metrics:0.22.1",
"sha256": "3f6f4d5bd332c516282583a01a7c940702608a49ed6e62eb87ef3b1d320d144b",
},
"io_bazel_rules_scala_mustache": {
"artifact": "com.github.spullara.mustache.java:compiler:0.8.18",
"sha256": "ddabc1ef897fd72319a761d29525fd61be57dc25d04d825f863f83cc89000e66",
},
"io_bazel_rules_scala_guava": {
"artifact": "com.google.guava:guava:21.0",
"sha256": "972139718abc8a4893fa78cba8cf7b2c903f35c97aaf44fa3031b0669948b480",
},
"libthrift": {
"artifact": "org.apache.thrift:libthrift:0.8.0",
"sha256": "adea029247c3f16e55e29c1708b897812fd1fe335ac55fe3903e5d2f428ef4b3",
},
"io_bazel_rules_scala_scrooge_core": {
"artifact": "com.twitter:scrooge-core_2.13:21.2.0",
"sha256": "a93f179b96e13bd172e5164c587a3645122f45f6d6370304e06d52e2ab0e456f",
},
"io_bazel_rules_scala_scrooge_generator": {
"artifact": "com.twitter:scrooge-generator_2.13:21.2.0",
"sha256": "1293391da7df25497cad7c56cf8ecaeb672496a548d144d7a2a1cfcf748bed6c",
"runtime_deps": [
"@io_bazel_rules_scala_guava",
"@io_bazel_rules_scala_mustache",
"@io_bazel_rules_scala_scopt",
],
},
"io_bazel_rules_scala_util_core": {
"artifact": "com.twitter:util-core_2.13:21.2.0",
"sha256": "da8e149b8f0646316787b29f6e254250da10b4b31d9a96c32e42f613574678cd",
},
"io_bazel_rules_scala_util_logging": {
"artifact": "com.twitter:util-logging_2.13:21.2.0",
"sha256": "90bd8318329907dcf7e161287473e27272b38ee6857e9d56ee8a1958608cc49d",
},
"io_bazel_rules_scala_javax_annotation_api": {
"artifact": "javax.annotation:javax.annotation-api:1.3.2",
"sha256": "e04ba5195bcd555dc95650f7cc614d151e4bcd52d29a10b8aa2197f3ab89ab9b",
},
"io_bazel_rules_scala_scopt": {
"artifact": "com.github.scopt:scopt_2.13:4.0.0-RC2",
"sha256": "07c1937cba53f7509d2ac62a0fc375943a3e0fef346625414c15d41b5a6cfb34",
},
# test only
"com_twitter__scalding_date": {
"testonly": True,
"artifact": "com.twitter:scalding-date_2.13:0.17.0",
"sha256": "973a7198121cc8dac9eeb3f325c93c497fe3b682f68ba56e34c1b210af7b15b4",
},
"org_typelevel__cats_core": {
"testonly": True,
"artifact": "org.typelevel:cats-core_2.13:2.2.0",
"sha256": "6058d02418e4eb5f1919a1156d63d2d1b93f2c6190b1a1806ee2b73f8726a923",
},
"com_google_guava_guava_21_0_with_file": {
"testonly": True,
"artifact": "com.google.guava:guava:21.0",
"sha256": "972139718abc8a4893fa78cba8cf7b2c903f35c97aaf44fa3031b0669948b480",
},
"com_github_jnr_jffi_native": {
"testonly": True,
"artifact": "com.github.jnr:jffi:jar:native:1.2.17",
"sha256": "4eb582bc99d96c8df92fc6f0f608fd123d278223982555ba16219bf8be9f75a9",
},
"org_apache_commons_commons_lang_3_5": {
"testonly": True,
"artifact": "org.apache.commons:commons-lang3:3.5",
"sha256": "8ac96fc686512d777fca85e144f196cd7cfe0c0aec23127229497d1a38ff651c",
},
"org_springframework_spring_core": {
"testonly": True,
"artifact": "org.springframework:spring-core:5.1.5.RELEASE",
"sha256": "f771b605019eb9d2cf8f60c25c050233e39487ff54d74c93d687ea8de8b7285a",
},
"org_springframework_spring_tx": {
"testonly": True,
"artifact": "org.springframework:spring-tx:5.1.5.RELEASE",
"sha256": "666f72b73c7e6b34e5bb92a0d77a14cdeef491c00fcb07a1e89eb62b08500135",
"deps": [
"@org_springframework_spring_core",
],
},
"com_google_guava_guava_21_0": {
"testonly": True,
"artifact": "com.google.guava:guava:21.0",
"sha256": "972139718abc8a4893fa78cba8cf7b2c903f35c97aaf44fa3031b0669948b480",
"deps": [
"@org_springframework_spring_core",
],
},
# TODO: fix misleading artifact group in id
"org_spire_math_kind_projector": {
"testonly": True,
"artifact": "org.typelevel:kind-projector_2.13:0.10.3",
"sha256": "b5d60c8bc8f1333e2deac17d72d41bb59c53283a67ff3a613189746ce97ac8ad",
},
}
| 44.301255
| 85
| 0.683368
|
d769de84a83511c3ada6e7084748282c60c417a8
| 1,554
|
py
|
Python
|
openslides_backend/action/mixins/sequential_numbers_mixin.py
|
MJJojo97/openslides-backend
|
af0d1edb0070e352d46f285a1ba0bbe3702d49ae
|
[
"MIT"
] | null | null | null |
openslides_backend/action/mixins/sequential_numbers_mixin.py
|
MJJojo97/openslides-backend
|
af0d1edb0070e352d46f285a1ba0bbe3702d49ae
|
[
"MIT"
] | null | null | null |
openslides_backend/action/mixins/sequential_numbers_mixin.py
|
MJJojo97/openslides-backend
|
af0d1edb0070e352d46f285a1ba0bbe3702d49ae
|
[
"MIT"
] | null | null | null |
from typing import Any, Dict, Optional
from datastore.shared.util import DeletedModelsBehaviour
from ...models.models import Model
from ...services.datastore.interface import DatastoreService
from ...shared.filters import FilterOperator
from ..generics.create import CreateAction
from ..util.typing import ActionResultElement
class SequentialNumbersMixin(CreateAction):
datastore: DatastoreService
model: Model
def get_sequential_number(self, meeting_id: int) -> int:
"""
Creates a sequential number, unique per meeting and returns it
"""
filter = FilterOperator("meeting_id", "=", meeting_id)
number = self.datastore.max(
collection=self.model.collection,
filter=filter,
field="sequential_number",
get_deleted_models=DeletedModelsBehaviour.ALL_MODELS,
)
number = 1 if number is None else number + 1
return number
def update_instance(self, instance: Dict[str, Any]) -> Dict[str, Any]:
instance = super().update_instance(instance)
instance["sequential_number"] = self.get_sequential_number(
instance["meeting_id"]
)
return instance
def create_action_result_element(
self, instance: Dict[str, Any]
) -> Optional[ActionResultElement]:
result = super().create_action_result_element(instance)
if result is None:
result = {"id": instance["id"]}
result["sequential_number"] = instance["sequential_number"]
return result
| 33.06383
| 74
| 0.675032
|
dd8af5a30761dc3b22e0a7380d4676f81b8d963f
| 3,665
|
py
|
Python
|
keystone/tests/unit/common/test_json_home.py
|
ferag/keystone
|
af1c1a822a8dfdd543c6e4d48264f5b8be2bdfc7
|
[
"Apache-2.0"
] | 615
|
2015-01-07T12:32:52.000Z
|
2022-03-24T03:49:47.000Z
|
keystone/tests/unit/common/test_json_home.py
|
ferag/keystone
|
af1c1a822a8dfdd543c6e4d48264f5b8be2bdfc7
|
[
"Apache-2.0"
] | 11
|
2015-04-13T18:52:40.000Z
|
2021-08-21T06:13:05.000Z
|
keystone/tests/unit/common/test_json_home.py
|
ferag/keystone
|
af1c1a822a8dfdd543c6e4d48264f5b8be2bdfc7
|
[
"Apache-2.0"
] | 696
|
2015-01-15T00:31:07.000Z
|
2022-03-16T09:56:00.000Z
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from testtools import matchers
from keystone.common import json_home
from keystone.tests import unit
class JsonHomeTest(unit.BaseTestCase):
def test_build_v3_resource_relation(self):
resource_name = self.getUniqueString()
relation = json_home.build_v3_resource_relation(resource_name)
exp_relation = (
'https://docs.openstack.org/api/openstack-identity/3/rel/%s' %
resource_name)
self.assertThat(relation, matchers.Equals(exp_relation))
def test_build_v3_extension_resource_relation(self):
extension_name = self.getUniqueString()
extension_version = self.getUniqueString()
resource_name = self.getUniqueString()
relation = json_home.build_v3_extension_resource_relation(
extension_name, extension_version, resource_name)
exp_relation = (
'https://docs.openstack.org/api/openstack-identity/3/ext/%s/%s/rel'
'/%s' % (extension_name, extension_version, resource_name))
self.assertThat(relation, matchers.Equals(exp_relation))
def test_build_v3_parameter_relation(self):
parameter_name = self.getUniqueString()
relation = json_home.build_v3_parameter_relation(parameter_name)
exp_relation = (
'https://docs.openstack.org/api/openstack-identity/3/param/%s' %
parameter_name)
self.assertThat(relation, matchers.Equals(exp_relation))
def test_build_v3_extension_parameter_relation(self):
extension_name = self.getUniqueString()
extension_version = self.getUniqueString()
parameter_name = self.getUniqueString()
relation = json_home.build_v3_extension_parameter_relation(
extension_name, extension_version, parameter_name)
exp_relation = (
'https://docs.openstack.org/api/openstack-identity/3/ext/%s/%s/'
'param/%s' % (extension_name, extension_version, parameter_name))
self.assertThat(relation, matchers.Equals(exp_relation))
def test_translate_urls(self):
href_rel = self.getUniqueString()
href = self.getUniqueString()
href_template_rel = self.getUniqueString()
href_template = self.getUniqueString()
href_vars = {self.getUniqueString(): self.getUniqueString()}
original_json_home = {
'resources': {
href_rel: {'href': href},
href_template_rel: {
'href-template': href_template,
'href-vars': href_vars}
}
}
new_json_home = copy.deepcopy(original_json_home)
new_prefix = self.getUniqueString()
json_home.translate_urls(new_json_home, new_prefix)
exp_json_home = {
'resources': {
href_rel: {'href': new_prefix + href},
href_template_rel: {
'href-template': new_prefix + href_template,
'href-vars': href_vars}
}
}
self.assertThat(new_json_home, matchers.Equals(exp_json_home))
| 39.836957
| 79
| 0.669577
|
7a1f99210397f2ae05e6dd7c838a92a560064cee
| 1,359
|
py
|
Python
|
tools/vis_result.py
|
Wang-hao-thu/PatchCore_anomaly_detection
|
ac4068c8fa6f50d4252385258096b7dc85d1abc5
|
[
"Apache-2.0"
] | null | null | null |
tools/vis_result.py
|
Wang-hao-thu/PatchCore_anomaly_detection
|
ac4068c8fa6f50d4252385258096b7dc85d1abc5
|
[
"Apache-2.0"
] | null | null | null |
tools/vis_result.py
|
Wang-hao-thu/PatchCore_anomaly_detection
|
ac4068c8fa6f50d4252385258096b7dc85d1abc5
|
[
"Apache-2.0"
] | null | null | null |
import sys
import numpy as np
from tqdm import tqdm
import math
result_file = sys.argv[1]
tmp_file = sys.argv[2]
def get_result(result_file):
f1 = open(result_file,'r')
f2 = open(tmp_file, 'w')
neg = {}
neg_score = []
pos = {}
pos_score = []
for line in tqdm(f1.readlines()):
img_path, label, score = line.strip().split(' ')
if int(label) == 0:
neg_score.append(float(score))
neg.update({str(score):img_path})
else:
pos_score.append(float(score))
pos.update({str(score):img_path})
neg_score = np.array(neg_score)
neg_shunxu = sorted(neg_score,reverse=True)
pos_score = np.array(pos_score)
for rate in [0.5,0.2,0.1,0.05,0.01,0.005,0.001]:
threshold = neg_shunxu[int(rate * len(neg_shunxu))]
recall = sum(pos_score > threshold)
print(f"fp:{rate:.5f} ({math.ceil(rate * len(neg_shunxu))}/{len(neg_score)}) recall: {recall / len(pos_score):.3f} ({recall}/{len(pos_score)}) threshold: {threshold}")
threshold = 2.5
recall = sum(pos_score > threshold)
print(f"{recall}/{len(pos_score)}")
for i in range(245):
score = str(neg_shunxu[i])
image_name = neg[score]
f2.write(image_name+' ' + '0' + '\n')
def main():
get_result(result_file)
if __name__ == "__main__":
main()
| 31.604651
| 180
| 0.599706
|
550d9e9f229436152f59e9964ea113498c82323a
| 199
|
py
|
Python
|
scripts/quest/q25712s.py
|
lynsone/swordie
|
7e9d564c1f2659a87e01c376089e1ee0a3842c5b
|
[
"MIT"
] | 2
|
2020-08-25T06:55:19.000Z
|
2021-03-15T14:37:34.000Z
|
scripts/quest/q25712s.py
|
lynsone/swordie
|
7e9d564c1f2659a87e01c376089e1ee0a3842c5b
|
[
"MIT"
] | null | null | null |
scripts/quest/q25712s.py
|
lynsone/swordie
|
7e9d564c1f2659a87e01c376089e1ee0a3842c5b
|
[
"MIT"
] | 3
|
2020-08-25T06:55:25.000Z
|
2020-12-01T13:07:43.000Z
|
# q25712s - Kaiser 4th job advancement
if chr.getJob() == 6111:
sm.jobAdvance(6112)
sm.completeQuest(25712)
else:
sm.sendSayOkay("You're currently not a third job Kaiser.")
sm.dispose()
| 22.111111
| 62
| 0.698492
|
ac526175783c68a4b74dfc5e1a6c400e53681113
| 527
|
py
|
Python
|
posts/admin.py
|
TrueDi1905/yatube
|
074fac97a47332933f35350a95f661903aac014f
|
[
"BSD-3-Clause"
] | null | null | null |
posts/admin.py
|
TrueDi1905/yatube
|
074fac97a47332933f35350a95f661903aac014f
|
[
"BSD-3-Clause"
] | null | null | null |
posts/admin.py
|
TrueDi1905/yatube
|
074fac97a47332933f35350a95f661903aac014f
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib import admin
from .models import Post, Group
class PostAdmin(admin.ModelAdmin):
list_display = ("pk", "text", "pub_date", "author")
search_fields = ("text",)
list_filter = ("pub_date",)
empty_value_display = "-пусто-"
class GroupAdmin(admin.ModelAdmin):
list_display = ("title", "description")
search_fields = ("title", "description")
list_filter = ("title",)
empty_value_display = "-пусто-"
admin.site.register(Post, PostAdmin)
admin.site.register(Group, GroupAdmin)
| 23.954545
| 55
| 0.688805
|
b29fb056f35e93eb001de809ee138d9e3c8ce362
| 3,382
|
py
|
Python
|
word-ranking/web/db.py
|
sironitomas/october-challenge
|
b12807779a7c73c54f9af06f7ec2826197cff721
|
[
"MIT"
] | null | null | null |
word-ranking/web/db.py
|
sironitomas/october-challenge
|
b12807779a7c73c54f9af06f7ec2826197cff721
|
[
"MIT"
] | null | null | null |
word-ranking/web/db.py
|
sironitomas/october-challenge
|
b12807779a7c73c54f9af06f7ec2826197cff721
|
[
"MIT"
] | null | null | null |
import hashlib
import mysql.connector
from mysql.connector import errorcode
DB_NAME = 'ranking'
def connect():
try:
cnx = mysql.connector.connect(user='root',
password='my-strong-password',
host='db')
return cnx
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
cnx.close()
def create_database(cursor, DB_NAME):
try:
cursor.execute(
"CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'".format(DB_NAME))
except mysql.connector.Error as err:
print("Failed creating database: {}".format(err))
exit(1)
def create_tables():
TABLES = {}
TABLES['allwords'] = ("CREATE TABLE `allwords` ("
" `hash` varchar(32) NOT NULL,"
" `word` varchar(64) NOT NULL,"
" `count` int(10) NOT NULL,"
" PRIMARY KEY (`hash`)"
") ENGINE=InnoDB")
cnx = connect()
cursor = cnx.cursor()
try:
cursor.execute("USE {}".format(DB_NAME))
except mysql.connector.Error as err:
print("Database {} does not exists.".format(DB_NAME))
if err.errno == errorcode.ER_BAD_DB_ERROR:
create_database(cursor, DB_NAME)
print("Database {} created successfully.".format(DB_NAME))
cnx.database = DB_NAME
else:
print(err)
exit(1)
for table_name in TABLES:
table_description = TABLES[table_name]
try:
print("Creating table {}: ".format(table_name), end='')
cursor.execute(table_description)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:
print("already exists.")
else:
print(err.msg)
else:
print("OK")
cursor.close()
cnx.close()
def save_words(new_words):
cnx = connect()
cursor = cnx.cursor()
cursor.execute("USE {}".format(DB_NAME))
query = "SELECT word, count FROM allwords"
cursor.execute(query)
current_words_dict = {}
for (word, count) in cursor:
current_words_dict[word] = count
new_words_dict = {}
for i in new_words:
word = i['word']
count = i['count']
new_words_dict[word] = count
inserts = []
updates = []
for word, count in new_words_dict.items():
res = hashlib.md5(word.encode())
md5sum = res.hexdigest()
if word in current_words_dict:
new_count = count + current_words_dict[word]
query = "UPDATE allwords SET count={} WHERE hash=\"{}\"".format(
new_count, md5sum)
updates.append(query)
else:
query = "INSERT INTO allwords VALUES (\"{}\", \"{}\", {})".format(
md5sum, word, count)
inserts.append(query)
for query in updates:
cursor.execute(query)
for query in inserts:
cursor.execute(query)
cnx.commit()
cursor.close()
cnx.close()
| 29.408696
| 78
| 0.548788
|
b8e17d0eaa53977700877ff30422099e1e1f8299
| 780
|
py
|
Python
|
manage.py
|
chenke91/ihaveablog
|
64000723589d3f5a074bd09f045cb5d6c3daf6dd
|
[
"MIT"
] | null | null | null |
manage.py
|
chenke91/ihaveablog
|
64000723589d3f5a074bd09f045cb5d6c3daf6dd
|
[
"MIT"
] | null | null | null |
manage.py
|
chenke91/ihaveablog
|
64000723589d3f5a074bd09f045cb5d6c3daf6dd
|
[
"MIT"
] | null | null | null |
#!/Users/ck-air/dev/ihaveablog/venv3/bin/python
import os
from flask.ext.script import Manager, Shell
from flask.ext.migrate import Migrate, MigrateCommand
from app import create_app, db
from app.models import User, Blog, Category
app = create_app(os.getenv('BLOG_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, User=User, Blog=Blog, Category=Category)
manager.add_command('shell', Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
@manager.command
def test():
'''run the unit test'''
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == '__main__':
manager.run()
| 26.896552
| 72
| 0.746154
|
5ee089dbbb0b974d7955276679d6ebc5157e57af
| 56,516
|
py
|
Python
|
src/azure-cli/azure/cli/command_modules/network/tests/latest/test_private_endpoint_commands.py
|
xhl873/azure-cli
|
6448a3437b7139c29a77ba2cb0f592d2f2146afc
|
[
"MIT"
] | null | null | null |
src/azure-cli/azure/cli/command_modules/network/tests/latest/test_private_endpoint_commands.py
|
xhl873/azure-cli
|
6448a3437b7139c29a77ba2cb0f592d2f2146afc
|
[
"MIT"
] | null | null | null |
src/azure-cli/azure/cli/command_modules/network/tests/latest/test_private_endpoint_commands.py
|
xhl873/azure-cli
|
6448a3437b7139c29a77ba2cb0f592d2f2146afc
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import time
import unittest
from azure.cli.testsdk import (
ScenarioTest, ResourceGroupPreparer, StorageAccountPreparer)
from azure.cli.core.util import parse_proxy_resource_id, CLIError
from azure.cli.command_modules.keyvault.tests.latest.test_keyvault_commands import _create_keyvault
from azure.cli.command_modules.rdbms.tests.latest.test_rdbms_commands import ServerPreparer
from azure.cli.command_modules.batch.tests.latest.batch_preparers import BatchAccountPreparer, BatchScenarioMixin
class NetworkPrivateLinkKeyVaultScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_keyvault_plr')
def test_private_link_resource_keyvault(self, resource_group):
self.kwargs.update({
'kv': self.create_random_name('cli-test-kv-plr-', 24),
'loc': 'centraluseuap',
'rg': resource_group
})
_create_keyvault(self, self.kwargs, additional_args='--enable-soft-delete')
self.cmd('network private-link-resource list '
'--name {kv} '
'-g {rg} '
'--type microsoft.keyvault/vaults',
checks=self.check('@[0].properties.groupId', 'vault'))
@ResourceGroupPreparer(name_prefix='cli_test_keyvault_pe')
def test_private_endpoint_connection_keyvault(self, resource_group):
self.kwargs.update({
'kv': self.create_random_name('cli-test-kv-pe-', 24),
'loc': 'centraluseuap',
'vnet': self.create_random_name('cli-vnet-', 24),
'subnet': self.create_random_name('cli-subnet-', 24),
'pe': self.create_random_name('cli-pe-', 24),
'pe_connection': self.create_random_name('cli-pec-', 24),
'rg': resource_group
})
# Prepare vault and network
keyvault = _create_keyvault(self, self.kwargs, additional_args='--enable-soft-delete').get_output_in_json()
self.kwargs['kv_id'] = keyvault['id']
self.cmd('network vnet create '
'-n {vnet} '
'-g {rg} '
'-l {loc} '
'--subnet-name {subnet}',
checks=self.check('length(newVNet.subnets)', 1))
self.cmd('network vnet subnet update '
'-n {subnet} '
'--vnet-name {vnet} '
'-g {rg} '
'--disable-private-endpoint-network-policies true',
checks=self.check('privateEndpointNetworkPolicies', 'Disabled'))
# Create a private endpoint connection
pe = self.cmd('network private-endpoint create '
'-g {rg} '
'-n {pe} '
'--vnet-name {vnet} '
'--subnet {subnet} '
'-l {loc} '
'--connection-name {pe_connection} '
'--private-connection-resource-id {kv_id} '
'--group-id vault').get_output_in_json()
self.kwargs['pe_id'] = pe['id']
# Show the connection at vault side
keyvault = self.cmd('keyvault show -n {kv}',
checks=self.check('length(properties.privateEndpointConnections)', 1)).get_output_in_json()
self.kwargs['kv_pe_id'] = keyvault['properties']['privateEndpointConnections'][0]['id']
print(self.kwargs['kv_pe_id'])
self.cmd('network private-endpoint-connection show '
'--id {kv_pe_id}',
checks=self.check('id', '{kv_pe_id}'))
self.kwargs['kv_pe_name'] = self.kwargs['kv_pe_id'].split('/')[-1]
self.cmd('network private-endpoint-connection show '
'--resource-name {kv} '
'-g {rg} '
'--name {kv_pe_name} '
'--type microsoft.keyvault/vaults',
checks=self.check('name', '{kv_pe_name}'))
self.cmd('network private-endpoint-connection show '
'--resource-name {kv} '
'-g {rg} '
'-n {kv_pe_name} '
'--type microsoft.keyvault/vaults',
checks=self.check('name', '{kv_pe_name}'))
# Try running `set-policy` on the linked vault
self.kwargs['policy_id'] = keyvault['properties']['accessPolicies'][0]['objectId']
self.cmd('keyvault set-policy '
'-g {rg} '
'-n {kv} '
'--object-id {policy_id} '
'--certificate-permissions get list',
checks=self.check('length(properties.accessPolicies[0].permissions.certificates)', 2))
# Test approval/rejection
self.kwargs.update({
'approval_desc': 'You are approved!',
'rejection_desc': 'You are rejected!'
})
self.cmd('network private-endpoint-connection reject '
'--id {kv_pe_id} '
'--description "{rejection_desc}"',
checks=[
self.check('properties.privateLinkServiceConnectionState.status', 'Rejected'),
self.check('properties.privateLinkServiceConnectionState.description', '{rejection_desc}'),
self.check('properties.provisioningState', 'Succeeded')
])
self.cmd('network private-endpoint-connection show --id {kv_pe_id}',
checks=self.check('properties.provisioningState', 'Succeeded'))
self.cmd('network private-endpoint-connection approve '
'--resource-name {kv} '
'--name {kv_pe_name} '
'-g {rg} '
'--type microsoft.keyvault/vaults '
'--description "{approval_desc}"',
checks=[
self.check('properties.privateLinkServiceConnectionState.status', 'Approved'),
self.check('properties.privateLinkServiceConnectionState.description', '{approval_desc}'),
self.check('properties.provisioningState', 'Succeeded')
])
self.cmd('network private-endpoint-connection show --id {kv_pe_id}',
checks=self.check('properties.provisioningState', 'Succeeded'))
self.cmd('network private-endpoint-connection list --id {kv_id}',
checks=self.check('length(@)', 1))
self.cmd('network private-endpoint-connection delete --id {kv_pe_id} -y')
class NetworkPrivateLinkStorageAccountScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_sa_plr')
@StorageAccountPreparer(name_prefix='saplr', kind='StorageV2', sku='Standard_LRS')
def test_private_link_resource_storage_account(self, storage_account):
self.kwargs.update({
'sa': storage_account
})
self.cmd('network private-link-resource list --name {sa} -g {rg} --type Microsoft.Storage/storageAccounts', checks=[
self.check('length(@)', 6)])
@ResourceGroupPreparer(name_prefix='cli_test_sa_pe')
@StorageAccountPreparer(name_prefix='saplr', kind='StorageV2')
def test_private_endpoint_connection_storage_account(self, storage_account):
from msrestazure.azure_exceptions import CloudError
self.kwargs.update({
'sa': storage_account,
'loc': 'eastus',
'vnet': self.create_random_name('cli-vnet-', 24),
'subnet': self.create_random_name('cli-subnet-', 24),
'pe': self.create_random_name('cli-pe-', 24),
'pe_connection': self.create_random_name('cli-pec-', 24),
})
# Prepare network
self.cmd('network vnet create -n {vnet} -g {rg} -l {loc} --subnet-name {subnet}',
checks=self.check('length(newVNet.subnets)', 1))
self.cmd('network vnet subnet update -n {subnet} --vnet-name {vnet} -g {rg} '
'--disable-private-endpoint-network-policies true',
checks=self.check('privateEndpointNetworkPolicies', 'Disabled'))
# Create a private endpoint connection
pr = self.cmd('storage account private-link-resource list --account-name {sa} -g {rg}').get_output_in_json()
self.kwargs['group_id'] = pr[0]['groupId']
storage = self.cmd('storage account show -n {sa} -g {rg}').get_output_in_json()
self.kwargs['sa_id'] = storage['id']
private_endpoint = self.cmd(
'network private-endpoint create -g {rg} -n {pe} --vnet-name {vnet} --subnet {subnet} -l {loc} '
'--connection-name {pe_connection} --private-connection-resource-id {sa_id} '
'--group-id blob').get_output_in_json()
self.assertEqual(private_endpoint['name'], self.kwargs['pe'])
self.assertEqual(private_endpoint['privateLinkServiceConnections'][0]['name'], self.kwargs['pe_connection'])
self.assertEqual(private_endpoint['privateLinkServiceConnections'][0]['privateLinkServiceConnectionState']['status'], 'Approved')
self.assertEqual(private_endpoint['privateLinkServiceConnections'][0]['provisioningState'], 'Succeeded')
self.assertEqual(private_endpoint['privateLinkServiceConnections'][0]['groupIds'][0], self.kwargs['group_id'])
self.kwargs['pe_id'] = private_endpoint['privateLinkServiceConnections'][0]['id']
# Show the connection at storage account
storage = self.cmd('storage account show -n {sa} -g {rg}').get_output_in_json()
self.assertIn('privateEndpointConnections', storage)
self.assertEqual(len(storage['privateEndpointConnections']), 1)
self.assertEqual(storage['privateEndpointConnections'][0]['privateLinkServiceConnectionState']['status'],
'Approved')
self.kwargs['sa_pec_id'] = storage['privateEndpointConnections'][0]['id']
self.kwargs['sa_pec_name'] = storage['privateEndpointConnections'][0]['name']
self.cmd('network private-endpoint-connection show --name {sa_pec_name} -g {rg} --resource-name {sa} --type Microsoft.Storage/storageAccounts',
checks=self.check('id', '{sa_pec_id}'))
# cannot approve it from auto-approved state
# self.cmd('network private-endpoint-connection approve --name {sa_pec_name} -g {rg} --resource-name {sa} --type Microsoft.Storage/storageAccounts',
# checks=[self.check('properties.privateLinkServiceConnectionState.status', 'Approved')])
self.cmd('network private-endpoint-connection reject --name {sa_pec_name} -g {rg} --resource-name {sa} --type Microsoft.Storage/storageAccounts',
checks=[self.check('properties.privateLinkServiceConnectionState.status', 'Rejected')])
self.cmd('network private-endpoint-connection list --id {sa_pec_id}',
checks=self.check('length(@)', 1))
self.cmd('network private-endpoint-connection delete --id {sa_pec_id} -y')
class NetworkPrivateLinkACRScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_sa_plr')
def test_private_link_resource_acr(self):
self.kwargs.update({
'registry_name': self.create_random_name('testreg', 20)
})
result = self.cmd('acr create --name {registry_name} --resource-group {rg} --sku premium').get_output_in_json()
self.kwargs['registry_id'] = result['id']
self.cmd('network private-link-resource list --id {registry_id}', checks=[
self.check('length(@)', 1)])
@ResourceGroupPreparer(location='centraluseuap')
def test_private_endpoint_connection_acr(self, resource_group):
self.kwargs.update({
'registry_name': self.create_random_name('testreg', 20),
'vnet_name': self.create_random_name('testvnet', 20),
'subnet_name': self.create_random_name('testsubnet', 20),
'endpoint_name': self.create_random_name('priv_endpoint', 25),
'endpoint_conn_name': self.create_random_name('priv_endpointconn', 25),
'second_endpoint_name': self.create_random_name('priv_endpoint', 25),
'second_endpoint_conn_name': self.create_random_name('priv_endpointconn', 25),
'description_msg': 'somedescription'
})
# create subnet with disabled endpoint network policies
self.cmd('network vnet create -g {rg} -n {vnet_name} --subnet-name {subnet_name}')
self.cmd('network vnet subnet update -g {rg} --vnet-name {vnet_name} --name {subnet_name} --disable-private-endpoint-network-policies true')
result = self.cmd('acr create --name {registry_name} --resource-group {rg} --sku premium').get_output_in_json()
self.kwargs['registry_id'] = result['id']
# add an endpoint and approve it
result = self.cmd(
'network private-endpoint create -n {endpoint_name} -g {rg} --subnet {subnet_name} --vnet-name {vnet_name} '
'--private-connection-resource-id {registry_id} --group-id registry --connection-name {endpoint_conn_name} --manual-request').get_output_in_json()
self.assertTrue(self.kwargs['endpoint_name'].lower() in result['name'].lower())
result = self.cmd(
'network private-endpoint-connection list -g {rg} --name {registry_name} --type Microsoft.ContainerRegistry/registries').get_output_in_json()
self.kwargs['endpoint_request'] = result[0]['name']
self.cmd(
'network private-endpoint-connection approve -g {rg} --resource-name {registry_name} -n {endpoint_request} --description {description_msg} --type Microsoft.ContainerRegistry/registries',
checks=[
self.check('properties.privateLinkServiceConnectionState.status', 'Approved'),
self.check('properties.privateLinkServiceConnectionState.description', '{description_msg}')
])
# add an endpoint and then reject it
self.cmd(
'network private-endpoint create -n {second_endpoint_name} -g {rg} --subnet {subnet_name} --vnet-name {vnet_name} --private-connection-resource-id {registry_id} --group-id registry --connection-name {second_endpoint_conn_name} --manual-request')
result = self.cmd('network private-endpoint-connection list -g {rg} --name {registry_name} --type Microsoft.ContainerRegistry/registries').get_output_in_json()
# the connection request name starts with the registry / resource name
self.kwargs['second_endpoint_request'] = [conn['name'] for conn in result if
self.kwargs['second_endpoint_name'].lower() in
conn['properties']['privateEndpoint']['id'].lower()][0]
self.cmd(
'network private-endpoint-connection reject -g {rg} --resource-name {registry_name} -n {second_endpoint_request} --description {description_msg} --type Microsoft.ContainerRegistry/registries',
checks=[
self.check('properties.privateLinkServiceConnectionState.status', 'Rejected'),
self.check('properties.privateLinkServiceConnectionState.description', '{description_msg}')
])
# list endpoints
self.cmd('network private-endpoint-connection list -g {rg} -n {registry_name} --type Microsoft.ContainerRegistry/registries', checks=[
self.check('length(@)', '2'),
])
# remove endpoints
self.cmd(
'network private-endpoint-connection delete -g {rg} --resource-name {registry_name} -n {second_endpoint_request} --type Microsoft.ContainerRegistry/registries -y')
time.sleep(30)
self.cmd('network private-endpoint-connection list -g {rg} -n {registry_name} --type Microsoft.ContainerRegistry/registries', checks=[
self.check('length(@)', '1'),
])
self.cmd('network private-endpoint-connection show -g {rg} --resource-name {registry_name} -n {endpoint_request} --type Microsoft.ContainerRegistry/registries', checks=[
self.check('properties.privateLinkServiceConnectionState.status', 'Approved'),
self.check('properties.privateLinkServiceConnectionState.description', '{description_msg}'),
self.check('name', '{endpoint_request}')
])
self.cmd('network private-endpoint-connection delete -g {rg} --resource-name {registry_name} -n {endpoint_request} --type Microsoft.ContainerRegistry/registries -y')
class NetworkPrivateLinkPrivateLinkScopeScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location='eastus')
def test_private_endpoint_connection_private_link_scope(self, resource_group, resource_group_location):
self.kwargs.update({
'rg': resource_group,
'scope': 'clitestscopename',
'assigned_app': 'assigned_app',
'assigned_ws': 'assigned_ws',
'workspace': self.create_random_name('clitest', 20),
'app': self.create_random_name('clitest', 20),
'vnet': self.create_random_name('cli-vnet-', 24),
'subnet': self.create_random_name('cli-subnet-', 24),
'pe': self.create_random_name('cli-pe-', 24),
'pe_connection': self.create_random_name('cli-pec-', 24),
'loc': resource_group_location
})
self.cmd('monitor private-link-scope create -n {scope} -g {rg}', checks=[
self.check('name', '{scope}')
])
self.cmd('monitor private-link-scope update -n {scope} -g {rg} --tags tag1=d1', checks=[
self.check('tags.tag1', 'd1')
])
self.cmd('monitor private-link-scope show -n {scope} -g {rg}', checks=[
self.check('tags.tag1', 'd1')
])
self.cmd('monitor private-link-scope list -g {rg}', checks=[
self.check('length(@)', 1)
])
self.cmd('monitor private-link-scope list')
workspace_id = self.cmd('monitor log-analytics workspace create -n {workspace} -g {rg} -l {loc}').get_output_in_json()['id']
self.kwargs.update({
'workspace_id': workspace_id
})
self.cmd('monitor private-link-scope scoped-resource create -g {rg} -n {assigned_ws} --linked-resource {workspace_id} --scope-name {scope}', checks=[
self.check('name', '{assigned_ws}')
])
self.cmd('monitor private-link-scope scoped-resource list -g {rg} --scope-name {scope}', checks=[
self.check('length(@)', 1)
])
self.cmd('network private-link-resource list --name {scope} -g {rg} --type microsoft.insights/privateLinkScopes', checks=[
self.check('length(@)', 1)
])
# Prepare network
self.cmd('network vnet create -n {vnet} -g {rg} -l {loc} --subnet-name {subnet}',
checks=self.check('length(newVNet.subnets)', 1))
self.cmd('network vnet subnet update -n {subnet} --vnet-name {vnet} -g {rg} '
'--disable-private-endpoint-network-policies true',
checks=self.check('privateEndpointNetworkPolicies', 'Disabled'))
# Create a private endpoint connection
pr = self.cmd('monitor private-link-scope private-link-resource list --scope-name {scope} -g {rg}').get_output_in_json()
self.kwargs['group_id'] = pr[0]['groupId']
private_link_scope = self.cmd('monitor private-link-scope show -n {scope} -g {rg}').get_output_in_json()
self.kwargs['scope_id'] = private_link_scope['id']
private_endpoint = self.cmd(
'network private-endpoint create -g {rg} -n {pe} --vnet-name {vnet} --subnet {subnet} -l {loc} '
'--connection-name {pe_connection} --private-connection-resource-id {scope_id} '
'--group-id {group_id}').get_output_in_json()
self.assertEqual(private_endpoint['name'], self.kwargs['pe'])
self.assertEqual(private_endpoint['privateLinkServiceConnections'][0]['name'], self.kwargs['pe_connection'])
self.assertEqual(
private_endpoint['privateLinkServiceConnections'][0]['privateLinkServiceConnectionState']['status'],
'Approved')
self.assertEqual(private_endpoint['privateLinkServiceConnections'][0]['provisioningState'], 'Succeeded')
self.assertEqual(private_endpoint['privateLinkServiceConnections'][0]['groupIds'][0], self.kwargs['group_id'])
self.kwargs['pe_id'] = private_endpoint['privateLinkServiceConnections'][0]['id']
# Show the connection at monitor private-link-scope
private_endpoint_connections = self.cmd('monitor private-link-scope show --name {scope} -g {rg}').get_output_in_json()['privateEndpointConnections']
self.assertEqual(len(private_endpoint_connections), 1)
self.assertEqual(private_endpoint_connections[0]['privateLinkServiceConnectionState']['status'], 'Approved')
self.kwargs['scope_pec_id'] = private_endpoint_connections[0]['id']
self.kwargs['scope_pec_name'] = private_endpoint_connections[0]['name']
self.cmd('network private-endpoint-connection show --resource-name {scope} -g {rg} --name {scope_pec_name} --type microsoft.insights/privateLinkScopes',
checks=self.check('id', '{scope_pec_id}'))
self.cmd('network private-endpoint-connection reject --resource-name {scope} -g {rg} --name {scope_pec_name} --type microsoft.insights/privateLinkScopes',
checks=[self.check('properties.privateLinkServiceConnectionState.status', 'Rejected')])
self.cmd('network private-endpoint-connection list --name {scope} -g {rg} --type microsoft.insights/privateLinkScopes',
checks=[self.check('length(@)', 1)])
self.cmd('network private-endpoint-connection delete --id {scope_pec_id} -y')
self.cmd('monitor private-link-scope show --name {scope} -g {rg}', checks=[
self.check('privateEndpointConnections', None)
])
self.cmd('monitor private-link-scope scoped-resource delete -g {rg} -n {assigned_app} --scope-name {scope} -y')
self.cmd('monitor private-link-scope scoped-resource list -g {rg} --scope-name {scope}', checks=[
self.check('length(@)', 1)
])
self.cmd('monitor private-link-scope delete -n {scope} -g {rg} -y')
with self.assertRaisesRegexp(SystemExit, '3'):
self.cmd('monitor private-link-scope show -n {scope} -g {rg}')
class NetworkPrivateLinkRDBMSScenarioTest(ScenarioTest):
@ResourceGroupPreparer()
@ServerPreparer(engine_type='mariadb')
def test_mariadb_private_link_scenario(self, resource_group, server, database_engine):
print(server)
self._test_private_link_resource(resource_group, server, 'Microsoft.DBforMariaDB/servers', 'mariadbServer')
self._test_private_endpoint_connection(resource_group, server, database_engine, 'Microsoft.DBforMariaDB/servers')
@ResourceGroupPreparer()
@ServerPreparer(engine_type='mysql')
def test_mysql_private_link_scenario(self, resource_group, server, database_engine):
self._test_private_link_resource(resource_group, server, 'Microsoft.DBforMySQL/servers', 'mysqlServer')
self._test_private_endpoint_connection(resource_group, server, database_engine, 'Microsoft.DBforMySQL/servers')
@ResourceGroupPreparer()
@ServerPreparer(engine_type='postgres')
def test_postgres_private_link_scenario(self, resource_group, server, database_engine):
self._test_private_link_resource(resource_group, server, 'Microsoft.DBforPostgreSQL/servers', 'postgresqlServer')
self._test_private_endpoint_connection(resource_group, server, database_engine, 'Microsoft.DBforPostgreSQL/servers')
def _test_private_link_resource(self, resource_group, server, database_engine, group_id):
result = self.cmd('network private-link-resource list -g {} --name {} --type {}'
.format(resource_group, server, database_engine)).get_output_in_json()
self.assertEqual(result[0]['properties']['groupId'], group_id)
def _test_private_endpoint_connection(self, resource_group, server, database_engine, rp_type):
loc = 'westus'
vnet = self.create_random_name('cli-vnet-', 24)
subnet = self.create_random_name('cli-subnet-', 24)
pe_name_auto = self.create_random_name('cli-pe-', 24)
pe_name_manual_approve = self.create_random_name('cli-pe-', 24)
pe_name_manual_reject = self.create_random_name('cli-pe-', 24)
pe_connection_name_auto = self.create_random_name('cli-pec-', 24)
pe_connection_name_manual_approve = self.create_random_name('cli-pec-', 24)
pe_connection_name_manual_reject = self.create_random_name('cli-pec-', 24)
# Prepare network and disable network policies
self.cmd('network vnet create -n {} -g {} -l {} --subnet-name {}'
.format(vnet, resource_group, loc, subnet),
checks=self.check('length(newVNet.subnets)', 1))
self.cmd('network vnet subnet update -n {} --vnet-name {} -g {} '
'--disable-private-endpoint-network-policies true'
.format(subnet, vnet, resource_group),
checks=self.check('privateEndpointNetworkPolicies', 'Disabled'))
# Get Server Id and Group Id
result = self.cmd('{} server show -g {} -n {}'
.format(database_engine, resource_group, server)).get_output_in_json()
server_id = result['id']
result = self.cmd('network private-link-resource list -g {} -n {} --type {}'
.format(resource_group, server, rp_type)).get_output_in_json()
group_id = result[0]['properties']['groupId']
approval_description = 'You are approved!'
rejection_description = 'You are rejected!'
expectedError = 'Private Endpoint Connection Status is not Pending'
# Testing Auto-Approval workflow
# Create a private endpoint connection
private_endpoint = self.cmd('network private-endpoint create -g {} -n {} --vnet-name {} --subnet {} -l {} '
'--connection-name {} --private-connection-resource-id {} '
'--group-id {}'
.format(resource_group, pe_name_auto, vnet, subnet, loc, pe_connection_name_auto, server_id, group_id)).get_output_in_json()
self.assertEqual(private_endpoint['name'], pe_name_auto)
self.assertEqual(private_endpoint['privateLinkServiceConnections'][0]['name'], pe_connection_name_auto)
self.assertEqual(private_endpoint['privateLinkServiceConnections'][0]['privateLinkServiceConnectionState']['status'], 'Approved')
self.assertEqual(private_endpoint['privateLinkServiceConnections'][0]['provisioningState'], 'Succeeded')
self.assertEqual(private_endpoint['privateLinkServiceConnections'][0]['groupIds'][0], group_id)
# Get Private Endpoint Connection Name and Id
result = self.cmd('{} server show -g {} -n {}'
.format(database_engine, resource_group, server)).get_output_in_json()
self.assertEqual(len(result['privateEndpointConnections']), 1)
self.assertEqual(result['privateEndpointConnections'][0]['properties']['privateLinkServiceConnectionState']['status'],
'Approved')
server_pec_id = result['privateEndpointConnections'][0]['id']
result = parse_proxy_resource_id(server_pec_id)
server_pec_name = result['child_name_1']
self.cmd('network private-endpoint-connection show --resource-name {} -g {} --name {} --type {}'
.format(server, resource_group, server_pec_name, rp_type),
checks=[
self.check('id', server_pec_id),
self.check('properties.privateLinkServiceConnectionState.status', 'Approved'),
self.check('properties.provisioningState', 'Ready')
])
with self.assertRaisesRegexp(CLIError, expectedError):
self.cmd('network private-endpoint-connection approve --resource-name {} -g {} --name {} --description "{}" --type {}'
.format(server, resource_group, server_pec_name, approval_description, rp_type))
with self.assertRaisesRegexp(CLIError, expectedError):
self.cmd('network private-endpoint-connection reject --resource-name {} -g {} --name {} --description "{}" --type {}'
.format(server, resource_group, server_pec_name, rejection_description, rp_type))
self.cmd('network private-endpoint-connection delete --id {} -y'
.format(server_pec_id))
# Testing Manual-Approval workflow [Approval]
# Create a private endpoint connection
private_endpoint = self.cmd('network private-endpoint create -g {} -n {} --vnet-name {} --subnet {} -l {} '
'--connection-name {} --private-connection-resource-id {} '
'--group-id {} --manual-request'
.format(resource_group, pe_name_manual_approve, vnet, subnet, loc, pe_connection_name_manual_approve, server_id, group_id)).get_output_in_json()
self.assertEqual(private_endpoint['name'], pe_name_manual_approve)
self.assertEqual(private_endpoint['manualPrivateLinkServiceConnections'][0]['name'], pe_connection_name_manual_approve)
self.assertEqual(private_endpoint['manualPrivateLinkServiceConnections'][0]['privateLinkServiceConnectionState']['status'], 'Pending')
self.assertEqual(private_endpoint['manualPrivateLinkServiceConnections'][0]['provisioningState'], 'Succeeded')
self.assertEqual(private_endpoint['manualPrivateLinkServiceConnections'][0]['groupIds'][0], group_id)
# Get Private Endpoint Connection Name and Id
result = self.cmd('{} server show -g {} -n {}'
.format(database_engine, resource_group, server)).get_output_in_json()
self.assertEqual(len(result['privateEndpointConnections']), 1)
self.assertEqual(result['privateEndpointConnections'][0]['properties']['privateLinkServiceConnectionState']['status'],
'Pending')
server_pec_id = result['privateEndpointConnections'][0]['id']
result = parse_proxy_resource_id(server_pec_id)
server_pec_name = result['child_name_1']
self.cmd('network private-endpoint-connection show --resource-name {} -g {} --name {} --type {}'
.format(server, resource_group, server_pec_name, rp_type),
checks=[
self.check('id', server_pec_id),
self.check('properties.privateLinkServiceConnectionState.status', 'Pending'),
self.check('properties.provisioningState', 'Ready')
])
self.cmd('network private-endpoint-connection approve --resource-name {} -g {} --name {} --description "{}" --type {}'
.format(server, resource_group, server_pec_name, approval_description, rp_type),
checks=[
self.check('properties.privateLinkServiceConnectionState.status', 'Approved'),
self.check('properties.privateLinkServiceConnectionState.description', approval_description),
self.check('properties.provisioningState', 'Ready')
])
with self.assertRaisesRegexp(CLIError, expectedError):
self.cmd('network private-endpoint-connection reject --resource-name {} -g {} --name {} --description "{}" --type {}'
.format(server, resource_group, server_pec_name, rejection_description, rp_type))
self.cmd('network private-endpoint-connection delete --id {} -y'
.format(server_pec_id))
# Testing Manual-Approval workflow [Rejection]
# Create a private endpoint connection
private_endpoint = self.cmd('network private-endpoint create -g {} -n {} --vnet-name {} --subnet {} -l {} '
'--connection-name {} --private-connection-resource-id {} '
'--group-id {} --manual-request true'
.format(resource_group, pe_name_manual_reject, vnet, subnet, loc, pe_connection_name_manual_reject, server_id, group_id)).get_output_in_json()
self.assertEqual(private_endpoint['name'], pe_name_manual_reject)
self.assertEqual(private_endpoint['manualPrivateLinkServiceConnections'][0]['name'], pe_connection_name_manual_reject)
self.assertEqual(private_endpoint['manualPrivateLinkServiceConnections'][0]['privateLinkServiceConnectionState']['status'], 'Pending')
self.assertEqual(private_endpoint['manualPrivateLinkServiceConnections'][0]['provisioningState'], 'Succeeded')
self.assertEqual(private_endpoint['manualPrivateLinkServiceConnections'][0]['groupIds'][0], group_id)
# Get Private Endpoint Connection Name and Id
result = self.cmd('{} server show -g {} -n {}'
.format(database_engine, resource_group, server)).get_output_in_json()
self.assertEqual(len(result['privateEndpointConnections']), 1)
self.assertEqual(result['privateEndpointConnections'][0]['properties']['privateLinkServiceConnectionState']['status'],
'Pending')
server_pec_id = result['privateEndpointConnections'][0]['id']
result = parse_proxy_resource_id(server_pec_id)
server_pec_name = result['child_name_1']
self.cmd('network private-endpoint-connection show --resource-name {} -g {} --name {} --type {}'
.format(server, resource_group, server_pec_name, rp_type),
checks=[
self.check('id', server_pec_id),
self.check('properties.privateLinkServiceConnectionState.status', 'Pending'),
self.check('properties.provisioningState', 'Ready')
])
self.cmd('network private-endpoint-connection reject --resource-name {} -g {} --name {} --description "{}" --type {}'
.format(server, resource_group, server_pec_name, rejection_description, rp_type),
checks=[
self.check('properties.privateLinkServiceConnectionState.status', 'Rejected'),
self.check('properties.privateLinkServiceConnectionState.description', rejection_description),
self.check('properties.provisioningState', 'Ready')
])
with self.assertRaisesRegexp(CLIError, expectedError):
self.cmd('network private-endpoint-connection approve --resource-name {} -g {} --name {} --description "{}" --type {}'
.format(server, resource_group, server_pec_name, approval_description, rp_type))
self.cmd('network private-endpoint-connection list --name {} -g {} --type {}'
.format(server, resource_group, rp_type))
self.cmd('network private-endpoint-connection delete --id {} -y'
.format(server_pec_id))
class NetworkPrivateLinkBatchAccountScenarioTest(ScenarioTest):
def _get_test_data_file(self, filename):
filepath = os.path.join(os.path.dirname(os.path.realpath(__file__)), filename)
self.assertTrue(os.path.isfile(filepath), 'File {} does not exist.'.format(filepath))
return filepath
# Currently private-link-resource and private-endpoint-connection are whitelist only features so scenario tests are limited
@ResourceGroupPreparer(location='westcentralus')
def test_private_link_resource_batch_account(self, resource_group, batch_account_name='testplinksbatch'):
self.kwargs.update({
'vnet_name': self.create_random_name('testvnet', 20),
'subnet_name': self.create_random_name('testsubnet', 20),
'second_endpoint_name': self.create_random_name('priv_endpoint', 25),
'second_endpoint_conn_name': self.create_random_name('priv_endpointconn', 25),
'approval_desc': 'You are approved!',
'rejection_desc': 'You are rejected!',
'rg': resource_group,
'acc_n': batch_account_name,
'loc': 'westcentralus'
})
account = self.cmd('batch account create -g {rg} -n {acc_n} -l {loc} --public-network-access disabled').assert_with_checks([
self.check('name', '{acc_n}'),
self.check('location', '{loc}'),
self.check('resourceGroup', '{rg}')]).get_output_in_json()
self.kwargs['acc_id'] = account['id']
# create subnet with disabled endpoint network policies
self.cmd('network vnet create -g {rg} -n {vnet_name} --subnet-name {subnet_name}')
self.cmd('network vnet subnet update -g {rg} --vnet-name {vnet_name} --name {subnet_name} --disable-private-endpoint-network-policies true')
# add an endpoint and then reject it
self.cmd(
'network private-endpoint create '
'-n {second_endpoint_name} '
'-g {rg} '
'--subnet {subnet_name} '
'--vnet-name {vnet_name} '
'--private-connection-resource-id {acc_id} '
'--group-ids batchAccount '
'--connection-name {second_endpoint_conn_name} '
'--manual-request').get_output_in_json()
private_endpoints = self.cmd('network private-endpoint-connection list --name {acc_n} --resource-group {rg} --type Microsoft.Batch/batchAccounts', checks=[
self.check('length(@)', 1)
]).get_output_in_json()
self.cmd('batch account show --name {acc_n} --resource-group {rg}', checks=[
self.check('length(privateEndpointConnections[*])', 1),
self.check('privateEndpointConnections[0].id', private_endpoints[0]['id'])
])
self.kwargs['pe_id'] = private_endpoints[0]["id"]
self.kwargs['pe_name'] = private_endpoints[0]['name']
self.cmd(
'network private-endpoint-connection approve --resource-name {acc_n} --name {pe_name} --resource-group {rg} --type Microsoft.Batch/batchAccounts '
'--description "{approval_desc}"')
self.cmd(
'network private-endpoint-connection show --resource-name {acc_n} --name {pe_name} --resource-group {rg} --type Microsoft.Batch/batchAccounts',
checks=[
self.check('name', '{pe_name}'),
self.check('properties.privateLinkServiceConnectionState.status', 'Approved'),
self.check('properties.privateLinkServiceConnectionState.description', '{approval_desc}')])
self.cmd('network private-endpoint-connection reject --resource-name {acc_n} --name {pe_name} --resource-group {rg} --type Microsoft.Batch/batchAccounts '
'--description "{rejection_desc}"')
self.cmd('network private-endpoint-connection show --id {pe_id}',
checks=[
self.check('id', '{pe_id}'),
self.check('properties.privateLinkServiceConnectionState.status', 'Rejected'),
self.check('properties.privateLinkServiceConnectionState.description', '{rejection_desc}')])
# Test delete
self.cmd('network private-endpoint-connection delete --id {pe_id} -y')
self.cmd('network private-endpoint delete -n {second_endpoint_name} -g {rg}')
class NetworkPrivateLinkCosmosDBScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_cosmosdb_plr')
def test_private_link_resource_cosmosdb(self, resource_group):
self.kwargs.update({
'acc': self.create_random_name('cli-test-cosmosdb-plr-', 28),
'loc': 'centraluseuap'
})
self.cmd('az cosmosdb create -n {acc} -g {rg}')
self.cmd('network private-link-resource list --name {acc} --resource-group {rg} --type Microsoft.DocumentDB/databaseAccounts',
checks=[self.check('length(@)', 1), self.check('[0].properties.groupId', 'Sql')])
@ResourceGroupPreparer(name_prefix='cli_test_cosmosdb_pe')
def test_private_endpoint_connection_cosmosdb(self, resource_group):
self.kwargs.update({
'acc': self.create_random_name('cli-test-cosmosdb-pe-', 28),
'loc': 'centraluseuap',
'vnet': self.create_random_name('cli-vnet-', 24),
'subnet': self.create_random_name('cli-subnet-', 24),
'pe': self.create_random_name('cli-pe-', 24),
'pe_connection': self.create_random_name('cli-pec-', 24)
})
# Prepare cosmos db account and network
account = self.cmd('az cosmosdb create -n {acc} -g {rg}').get_output_in_json()
self.kwargs['acc_id'] = account['id']
self.cmd('network vnet create -n {vnet} -g {rg} -l {loc} --subnet-name {subnet}',
checks=self.check('length(newVNet.subnets)', 1))
self.cmd('network vnet subnet update -n {subnet} --vnet-name {vnet} -g {rg} '
'--disable-private-endpoint-network-policies true',
checks=self.check('privateEndpointNetworkPolicies', 'Disabled'))
# Create a private endpoint connection
pe = self.cmd('network private-endpoint create -g {rg} -n {pe} --vnet-name {vnet} --subnet {subnet} -l {loc} '
'--connection-name {pe_connection} --private-connection-resource-id {acc_id} '
'--group-id Sql').get_output_in_json()
self.kwargs['pe_id'] = pe['id']
self.kwargs['pe_name'] = self.kwargs['pe_id'].split('/')[-1]
# Show the connection at cosmos db side
results = self.kwargs['pe_id'].split('/')
self.kwargs[
'pec_id'] = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.DocumentDB/databaseAccounts/{2}/privateEndpointConnections/{3}'.format(
results[2], results[4], self.kwargs['acc'], results[-1])
self.cmd('network private-endpoint-connection show --id {pec_id}',
checks=self.check('id', '{pec_id}'))
self.cmd(
'network private-endpoint-connection show --resource-name {acc} --name {pe_name} --resource-group {rg} --type Microsoft.DocumentDB/databaseAccounts',
checks=self.check('name', '{pe_name}'))
self.cmd('network private-endpoint-connection show --resource-name {acc} -n {pe_name} -g {rg} --type Microsoft.DocumentDB/databaseAccounts',
checks=self.check('name', '{pe_name}'))
# Test approval/rejection
self.kwargs.update({
'approval_desc': 'You are approved!',
'rejection_desc': 'You are rejected!'
})
self.cmd(
'network private-endpoint-connection approve --resource-name {acc} --resource-group {rg} --name {pe_name} --type Microsoft.DocumentDB/databaseAccounts '
'--description "{approval_desc}"', checks=[
self.check('properties.privateLinkServiceConnectionState.status', 'Approved'),
self.check('properties.privateLinkServiceConnectionState.description', '{approval_desc}')
])
self.cmd('network private-endpoint-connection reject --id {pec_id} '
'--description "{rejection_desc}"',
checks=[
self.check('properties.privateLinkServiceConnectionState.status', 'Rejected'),
self.check('properties.privateLinkServiceConnectionState.description', '{rejection_desc}')
])
self.cmd('network private-endpoint-connection list --name {acc} --resource-group {rg} --type Microsoft.DocumentDB/databaseAccounts', checks=[
self.check('length(@)', 1)
])
# Test delete
self.cmd('network private-endpoint-connection delete --id {pec_id} -y')
class NetworkPrivateLinkEventGridScenarioTest(ScenarioTest):
def setUp(self):
super(NetworkPrivateLinkEventGridScenarioTest, self).setUp()
self.cmd('extension add -n eventgrid')
def tearDown(self):
self.cmd('extension remove -n eventgrid')
super(NetworkPrivateLinkEventGridScenarioTest, self).tearDown()
@ResourceGroupPreparer(name_prefix='cli_test_event_grid_plr')
def test_private_link_resource_event_grid(self, resource_group):
self.kwargs.update({
'topic_name': self.create_random_name(prefix='cli', length=40),
'domain_name': self.create_random_name(prefix='cli', length=40),
'location': 'centraluseuap',
'rg': resource_group
})
scope_id = self.cmd(
'az eventgrid topic create --name {topic_name} --resource-group {rg} --location {location} --public-network-access disabled',
checks=[
self.check('type', 'Microsoft.EventGrid/topics'),
self.check('name', self.kwargs['topic_name']),
self.check('provisioningState', 'Succeeded'),
self.check('sku', {'name': 'Basic'}),
self.check('publicNetworkAccess', 'Disabled'),
self.check('identity.principalId', None),
self.check('identity.tenantId', None),
self.check('identity.type', None),
self.check('identity.userAssignedIdentities', None)
]).get_output_in_json()['id']
self.kwargs.update({
'scope_id': scope_id
})
self.cmd(
'network private-link-resource list --id {scope_id}',
checks=[self.check('length(@)', 1), self.check('[0].properties.groupId', 'topic')])
domain_id = self.cmd('az eventgrid domain create --name {domain_name} --resource-group {rg} --location {location} --public-network-access disabled',).get_output_in_json()['id']
self.kwargs.update({
'domain_id': domain_id
})
self.cmd(
'network private-link-resource list --id {domain_id}',
checks=[self.check('length(@)', 1), self.check('[0].properties.groupId', 'domain')])
@ResourceGroupPreparer(name_prefix='cli_test_event_grid_pec', location='centraluseuap')
@ResourceGroupPreparer(name_prefix='cli_test_event_grid_pec', parameter_name='resource_group_2', location='centraluseuap')
def test_private_endpoint_connection_event_grid_topic(self, resource_group, resource_group_2):
self.kwargs.update({
'resource_group_net': resource_group_2,
'vnet_name': self.create_random_name(prefix='cli', length=20),
'subnet_name': self.create_random_name(prefix='cli', length=20),
'private_endpoint_name': self.create_random_name(prefix='cli', length=20),
'connection_name': self.create_random_name(prefix='cli', length=20),
'topic_name': self.create_random_name(prefix='cli', length=40),
'location': 'centraluseuap',
'approval_description': 'You are approved!',
'rejection_description': 'You are rejected!',
'rg': resource_group
})
self.cmd('az network vnet create --resource-group {resource_group_net} --location {location} --name {vnet_name} --address-prefix 10.0.0.0/16')
self.cmd('az network vnet subnet create --resource-group {resource_group_net} --vnet-name {vnet_name} --name {subnet_name} --address-prefixes 10.0.0.0/24')
self.cmd('az network vnet subnet update --resource-group {resource_group_net} --vnet-name {vnet_name} --name {subnet_name} --disable-private-endpoint-network-policies true')
scope = self.cmd('az eventgrid topic create --name {topic_name} --resource-group {rg} --location {location} --public-network-access disabled', checks=[
self.check('type', 'Microsoft.EventGrid/topics'),
self.check('name', self.kwargs['topic_name']),
self.check('provisioningState', 'Succeeded'),
self.check('sku', {'name': 'Basic'}),
self.check('publicNetworkAccess', 'Disabled'),
self.check('identity.principalId', None),
self.check('identity.tenantId', None),
self.check('identity.type', None),
self.check('identity.userAssignedIdentities', None)
]).get_output_in_json()['id']
self.kwargs.update({
'scope': scope,
})
# Create private endpoint
self.cmd('az network private-endpoint create --resource-group {resource_group_net} --name {private_endpoint_name} --vnet-name {vnet_name} --subnet {subnet_name} --private-connection-resource-id {scope} --location {location} --group-ids topic --connection-name {connection_name}')
server_pec_id = self.cmd('az eventgrid topic show --name {topic_name} --resource-group {rg}').get_output_in_json()['privateEndpointConnections'][0]['id']
result = parse_proxy_resource_id(server_pec_id)
server_pec_name = result['child_name_1']
self.kwargs.update({
'server_pec_name': server_pec_name,
})
self.cmd('az network private-endpoint-connection list --resource-group {rg} --name {topic_name} --type Microsoft.EventGrid/topics',
checks=[
self.check('length(@)', 1)
])
self.cmd('az network private-endpoint-connection show --resource-group {rg} --resource-name {topic_name} --name {server_pec_name} --type Microsoft.EventGrid/topics')
self.cmd('az network private-endpoint-connection approve --resource-group {rg} --resource-name {topic_name} '
'--name {server_pec_name} --type Microsoft.EventGrid/topics --description "{approval_description}"',
checks=[
self.check('properties.privateLinkServiceConnectionState.status', 'Approved'),
self.check('properties.privateLinkServiceConnectionState.description', '{approval_description}')
])
self.cmd('az network private-endpoint-connection reject --resource-group {rg} --resource-name {topic_name} '
'--name {server_pec_name} --type Microsoft.EventGrid/topics --description "{rejection_description}"',
checks=[
self.check('properties.privateLinkServiceConnectionState.status', 'Rejected'),
self.check('properties.privateLinkServiceConnectionState.description', '{rejection_description}')
])
self.cmd('az network private-endpoint-connection delete --resource-group {rg} --resource-name {topic_name} --name {server_pec_name} --type Microsoft.EventGrid/topics -y')
self.cmd('az network private-endpoint delete --resource-group {resource_group_net} --name {private_endpoint_name}')
self.cmd('az network vnet subnet delete --resource-group {resource_group_net} --vnet-name {vnet_name} --name {subnet_name}')
self.cmd('az network vnet delete --resource-group {resource_group_net} --name {vnet_name}')
self.cmd('az eventgrid topic delete --name {topic_name} --resource-group {rg}')
@ResourceGroupPreparer(name_prefix='cli_test_event_grid_pec', location='centraluseuap')
@ResourceGroupPreparer(name_prefix='cli_test_event_grid_pec', parameter_name='resource_group_2', location='centraluseuap')
def test_private_endpoint_connection_event_grid_domain(self, resource_group, resource_group_2):
self.kwargs.update({
'resource_group_net': resource_group_2,
'vnet_name': self.create_random_name(prefix='cli', length=20),
'subnet_name': self.create_random_name(prefix='cli', length=20),
'private_endpoint_name': self.create_random_name(prefix='cli', length=20),
'connection_name': self.create_random_name(prefix='cli', length=20),
'domain_name': self.create_random_name(prefix='cli', length=40),
'location': 'centraluseuap',
'approval_description': 'You are approved!',
'rejection_description': 'You are rejected!',
'rg': resource_group
})
self.cmd('az network vnet create --resource-group {resource_group_net} --location {location} --name {vnet_name} --address-prefix 10.0.0.0/16')
self.cmd('az network vnet subnet create --resource-group {resource_group_net} --vnet-name {vnet_name} --name {subnet_name} --address-prefixes 10.0.0.0/24')
self.cmd('az network vnet subnet update --resource-group {resource_group_net} --vnet-name {vnet_name} --name {subnet_name} --disable-private-endpoint-network-policies true')
scope = self.cmd('az eventgrid domain create --name {domain_name} --resource-group {rg} --location {location} --public-network-access disabled', checks=[
self.check('type', 'Microsoft.EventGrid/domains'),
self.check('name', self.kwargs['domain_name']),
self.check('provisioningState', 'Succeeded'),
self.check('sku', {'name': 'Basic'}),
self.check('publicNetworkAccess', 'Disabled'),
self.check('identity.principalId', None),
self.check('identity.tenantId', None),
self.check('identity.type', None),
self.check('identity.userAssignedIdentities', None)
]).get_output_in_json()['id']
self.kwargs.update({
'scope': scope,
})
# Create private endpoint
self.cmd('az network private-endpoint create --resource-group {resource_group_net} --name {private_endpoint_name} --vnet-name {vnet_name} --subnet {subnet_name} --private-connection-resource-id {scope} --location {location} --group-ids domain --connection-name {connection_name}')
server_pec_id = self.cmd('az eventgrid domain show --name {domain_name} --resource-group {rg}').get_output_in_json()['privateEndpointConnections'][0]['id']
result = parse_proxy_resource_id(server_pec_id)
server_pec_name = result['child_name_1']
self.kwargs.update({
'server_pec_name': server_pec_name,
})
self.cmd('az network private-endpoint-connection list --resource-group {rg} --name {domain_name} --type Microsoft.EventGrid/domains',
checks=[
self.check('length(@)', 1)
])
self.cmd('az network private-endpoint-connection show --resource-group {rg} --resource-name {domain_name} --name {server_pec_name} --type Microsoft.EventGrid/domains')
self.cmd('az network private-endpoint-connection approve --resource-group {rg} --resource-name {domain_name} '
'--name {server_pec_name} --type Microsoft.EventGrid/domains --description "{approval_description}"',
checks=[
self.check('properties.privateLinkServiceConnectionState.status', 'Approved'),
self.check('properties.privateLinkServiceConnectionState.description', '{approval_description}')
])
self.cmd('az network private-endpoint-connection reject --resource-group {rg} --resource-name {domain_name} '
'--name {server_pec_name} --type Microsoft.EventGrid/domains --description "{rejection_description}"',
checks=[
self.check('properties.privateLinkServiceConnectionState.status', 'Rejected'),
self.check('properties.privateLinkServiceConnectionState.description', '{rejection_description}')
])
self.cmd('az network private-endpoint-connection delete --resource-group {rg} --resource-name {domain_name} --name {server_pec_name} --type Microsoft.EventGrid/domains -y')
self.cmd('az network private-endpoint delete --resource-group {resource_group_net} --name {private_endpoint_name}')
self.cmd('az network vnet subnet delete --resource-group {resource_group_net} --vnet-name {vnet_name} --name {subnet_name}')
self.cmd('az network vnet delete --resource-group {resource_group_net} --name {vnet_name}')
self.cmd('az eventgrid domain delete --name {domain_name} --resource-group {rg}')
if __name__ == '__main__':
unittest.main()
| 59.742072
| 288
| 0.638297
|
1bb3d6b3f7da8eb0b604dbd27453b5db60c085be
| 467
|
py
|
Python
|
config.py
|
McMvMc/lsm_mike
|
7c62d9e1ef9a60bbd5de04b4481485c3b9648359
|
[
"MIT"
] | null | null | null |
config.py
|
McMvMc/lsm_mike
|
7c62d9e1ef9a60bbd5de04b4481485c3b9648359
|
[
"MIT"
] | null | null | null |
config.py
|
McMvMc/lsm_mike
|
7c62d9e1ef9a60bbd5de04b4481485c3b9648359
|
[
"MIT"
] | null | null | null |
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# Shapenet config
SHAPENET_VOX = {
32: os.path.join(BASE_DIR, 'data/shapenet_release/voxels/modelVoxels32'),
64: os.path.join(BASE_DIR, 'data/shapenet_release/voxels/modelVoxels64')
}
SHAPENET_IM = os.path.join(BASE_DIR, 'data/shapenet_release/renders')
CUSTOM_SHAPENET_IM = os.path.join(BASE_DIR, 'data/rendered_images')
CUSTOM_SPLIT_JSON = os.path.join(BASE_DIR, 'data/custom_split.json')
| 31.133333
| 77
| 0.770878
|
cefa31a5e12ced83854b7b7013c4e93fd3b2152c
| 11,785
|
py
|
Python
|
jishaku/shim/paginator_200.py
|
danrfq/jishaku
|
d1d10e80a729b169c3c86eecbb0403ea30d4f414
|
[
"MIT"
] | 1
|
2022-01-07T10:43:20.000Z
|
2022-01-07T10:43:20.000Z
|
jishaku/shim/paginator_200.py
|
danrfq/jishaku
|
d1d10e80a729b169c3c86eecbb0403ea30d4f414
|
[
"MIT"
] | null | null | null |
jishaku/shim/paginator_200.py
|
danrfq/jishaku
|
d1d10e80a729b169c3c86eecbb0403ea30d4f414
|
[
"MIT"
] | 1
|
2022-03-15T02:21:39.000Z
|
2022-03-15T02:21:39.000Z
|
# -*- coding: utf-8 -*-
"""
jishaku.paginators (shim for discord.py 2.0.0)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Paginator-related tools and interfaces for Jishaku.
:copyright: (c) 2021 Devon (Gorialis) R
:license: MIT, see LICENSE for more details.
"""
import asyncio
import discord
from discord import ui
from discord.ext import commands
from jishaku.shim.paginator_base import EMOJI_DEFAULT
class PaginatorInterface(ui.View): # pylint: disable=too-many-instance-attributes
"""
A message and reaction based interface for paginators.
This allows users to interactively navigate the pages of a Paginator, and supports live output.
An example of how to use this with a standard Paginator:
.. code:: python3
from discord.ext import commands
from jishaku.paginators import PaginatorInterface
# In a command somewhere...
# Paginators need to have a reduced max_size to accommodate the extra text added by the interface.
paginator = commands.Paginator(max_size=1900)
# Populate the paginator with some information
for line in range(100):
paginator.add_line(f"Line {line + 1}")
# Create and send the interface.
# The 'owner' field determines who can interact with this interface. If it's None, anyone can use it.
interface = PaginatorInterface(ctx.bot, paginator, owner=ctx.author)
await interface.send_to(ctx)
# send_to creates a task and returns control flow.
# It will raise if the interface can't be created, e.g., if there's no reaction permission in the channel.
# Once the interface has been sent, line additions have to be done asynchronously, so the interface can be updated.
await interface.add_line("My, the Earth sure is full of things!")
# You can also check if it's closed using the 'closed' property.
if not interface.closed:
await interface.add_line("I'm still here!")
"""
def __init__(self, bot: commands.Bot, paginator: commands.Paginator, **kwargs):
if not isinstance(paginator, commands.Paginator):
raise TypeError('paginator must be a commands.Paginator instance')
self._display_page = 0
self.bot = bot
self.message = None
self.paginator = paginator
self.owner = kwargs.pop('owner', None)
self.emojis = kwargs.pop('emoji', EMOJI_DEFAULT)
self.timeout_length = kwargs.pop('timeout', 7200)
self.delete_message = kwargs.pop('delete_message', False)
self.sent_page_reactions = False
self.task: asyncio.Task = None
self.send_lock: asyncio.Event = asyncio.Event()
self.close_exception: Exception = None
if self.page_size > self.max_page_size:
raise ValueError(
f'Paginator passed has too large of a page size for this interface. '
f'({self.page_size} > {self.max_page_size})'
)
super().__init__(timeout=self.timeout_length)
@property
def pages(self):
"""
Returns the paginator's pages without prematurely closing the active page.
"""
# protected access has to be permitted here to not close the paginator's pages
# pylint: disable=protected-access
paginator_pages = list(self.paginator._pages)
if len(self.paginator._current_page) > 1:
paginator_pages.append('\n'.join(self.paginator._current_page) + '\n' + (self.paginator.suffix or ''))
# pylint: enable=protected-access
return paginator_pages
@property
def page_count(self):
"""
Returns the page count of the internal paginator.
"""
return len(self.pages)
@property
def display_page(self):
"""
Returns the current page the paginator interface is on.
"""
self._display_page = max(0, min(self.page_count - 1, self._display_page))
return self._display_page
@display_page.setter
def display_page(self, value):
"""
Sets the current page the paginator is on. Automatically pushes values inbounds.
"""
self._display_page = max(0, min(self.page_count - 1, value))
max_page_size = 2000
@property
def page_size(self) -> int:
"""
A property that returns how large a page is, calculated from the paginator properties.
If this exceeds `max_page_size`, an exception is raised upon instantiation.
"""
page_count = self.page_count
return self.paginator.max_size + len(f'\nPage {page_count}/{page_count}')
@property
def send_kwargs(self) -> dict:
"""
A property that returns the kwargs forwarded to send/edit when updating the page.
As this must be compatible with both `discord.TextChannel.send` and `discord.Message.edit`,
it should be a dict containing 'content', 'embed' or both.
"""
content = self.pages[self.display_page]
return {'content': content, 'view': self}
def update_view(self):
"""
Updates view buttons to correspond to current interface state.
This is used internally.
"""
self.button_start.label = f"1 \u200b {self.emojis.start}"
self.button_previous.label = self.emojis.back
self.button_current.label = str(self.display_page + 1)
self.button_next.label = self.emojis.forward
self.button_last.label = f"{self.emojis.end} \u200b {self.page_count}"
self.button_close.label = f"{self.emojis.close} \u200b Close paginator"
async def add_line(self, *args, **kwargs):
"""
A proxy function that allows this PaginatorInterface to remain locked to the last page
if it is already on it.
"""
display_page = self.display_page
page_count = self.page_count
self.paginator.add_line(*args, **kwargs)
new_page_count = self.page_count
if display_page + 1 == page_count:
# To keep position fixed on the end, update position to new last page and update message.
self._display_page = new_page_count
# Unconditionally set send lock to try and guarantee page updates on unfocused pages
self.send_lock.set()
async def send_to(self, destination: discord.abc.Messageable):
"""
Sends a message to the given destination with this interface.
This automatically creates the response task for you.
"""
self.message = await destination.send(**self.send_kwargs)
self.send_lock.set()
if self.task:
self.task.cancel()
self.task = self.bot.loop.create_task(self.wait_loop())
return self
@property
def closed(self):
"""
Is this interface closed?
"""
if not self.task:
return False
return self.task.done()
async def send_lock_delayed(self):
"""
A coroutine that returns 1 second after the send lock has been released
This helps reduce release spam that hits rate limits quickly
"""
gathered = await self.send_lock.wait()
self.send_lock.clear()
await asyncio.sleep(1)
return gathered
async def wait_loop(self):
"""
Waits on a loop for updates to the interface. This should not be called manually - it is handled by `send_to`.
"""
try: # pylint: disable=too-many-nested-blocks
while not self.bot.is_closed():
await asyncio.wait_for(self.send_lock_delayed(), timeout=self.timeout_length)
self.update_view()
try:
await self.message.edit(**self.send_kwargs)
except discord.NotFound:
# something terrible has happened
return
except (asyncio.CancelledError, asyncio.TimeoutError) as exception:
self.close_exception = exception
if self.bot.is_closed():
# Can't do anything about the messages, so just close out to avoid noisy error
return
# If the message was already deleted, this part is unnecessary
if not self.message:
return
if self.delete_message:
await self.message.delete()
else:
await self.message.edit(view=None)
async def interaction_check(self, interaction: discord.Interaction):
"""Check that determines whether this interaction should be honored"""
return interaction.user.id in [211756205721255947, 714731543309844561]
@ui.button(label="1 \u200b \N{BLACK LEFT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR}", style=discord.ButtonStyle.secondary)
async def button_start(self, interaction: discord.Interaction, button: ui.Button): # pylint: disable=unused-argument
"""Button to send interface to first page"""
self._display_page = 0
self.update_view()
await interaction.response.edit_message(**self.send_kwargs)
@ui.button(label="\N{BLACK LEFT-POINTING TRIANGLE}", style=discord.ButtonStyle.secondary)
async def button_previous(self, interaction: discord.Interaction, button: ui.Button): # pylint: disable=unused-argument
"""Button to send interface to previous page"""
self._display_page -= 1
self.update_view()
await interaction.response.edit_message(**self.send_kwargs)
@ui.button(label="1", style=discord.ButtonStyle.primary)
async def button_current(self, interaction: discord.Interaction, button: ui.Button): # pylint: disable=unused-argument
"""Button to refresh the interface"""
self.update_view()
await interaction.response.edit_message(**self.send_kwargs)
@ui.button(label="\N{BLACK RIGHT-POINTING TRIANGLE}", style=discord.ButtonStyle.secondary)
async def button_next(self, interaction: discord.Interaction, button: ui.Button): # pylint: disable=unused-argument
"""Button to send interface to next page"""
self._display_page += 1
self.update_view()
await interaction.response.edit_message(**self.send_kwargs)
@ui.button(label="\N{BLACK RIGHT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR} \u200b 1", style=discord.ButtonStyle.secondary)
async def button_last(self, interaction: discord.Interaction, button: ui.Button): # pylint: disable=unused-argument
"""Button to send interface to last page"""
self._display_page = self.page_count - 1
self.update_view()
await interaction.response.edit_message(**self.send_kwargs)
@ui.button(label="\N{BLACK SQUARE FOR STOP} \u200b Close paginator", style=discord.ButtonStyle.danger)
async def button_close(self, interaction: discord.Interaction, button: ui.Button): # pylint: disable=unused-argument
"""Button to close the interface"""
message = self.message
self.message = None
self.task.cancel()
self.stop()
await message.delete()
class PaginatorEmbedInterface(PaginatorInterface):
"""
A subclass of :class:`PaginatorInterface` that encloses content in an Embed.
"""
def __init__(self, *args, **kwargs):
self._embed = kwargs.pop('embed', None) or discord.Embed()
super().__init__(*args, **kwargs)
@property
def send_kwargs(self) -> dict:
self._embed.description = self.pages[self.display_page]
return {'embed': self._embed, 'view': self}
max_page_size = 2048
@property
def page_size(self) -> int:
return self.paginator.max_size
| 35.39039
| 128
| 0.645057
|
2376fdbd509cb848005c870c62619ae40f85d028
| 37,892
|
py
|
Python
|
neutron/tests/unit/agent/linux/test_ovs_lib.py
|
bradleyjones/neutron
|
d283e23d7658162f911240bf6a4e707e3709093a
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/unit/agent/linux/test_ovs_lib.py
|
bradleyjones/neutron
|
d283e23d7658162f911240bf6a4e707e3709093a
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/unit/agent/linux/test_ovs_lib.py
|
bradleyjones/neutron
|
d283e23d7658162f911240bf6a4e707e3709093a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012, VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
from oslo_serialization import jsonutils
import testtools
from neutron.agent.linux import ovs_lib
from neutron.agent.linux import utils
from neutron.common import exceptions
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from neutron.tests import base
from neutron.tests import tools
OVS_LINUX_KERN_VERS_WITHOUT_VXLAN = "3.12.0"
class OFCTLParamListMatcher(object):
def _parse(self, params):
actions_pos = params.find('actions')
return set(params[:actions_pos].split(',')), params[actions_pos:]
def __init__(self, params):
self.expected = self._parse(params)
def __eq__(self, other):
return self.expected == self._parse(other)
def __str__(self):
return 'ovs-ofctl parameters: %s, "%s"' % self.expected
__repr__ = __str__
class OVS_Lib_Test(base.BaseTestCase):
"""A test suite to exercise the OVS libraries shared by Neutron agents.
Note: these tests do not actually execute ovs-* utilities, and thus
can run on any system. That does, however, limit their scope.
"""
def setUp(self):
super(OVS_Lib_Test, self).setUp()
self.BR_NAME = "br-int"
self.br = ovs_lib.OVSBridge(self.BR_NAME)
self.execute = mock.patch.object(
utils, "execute", spec=utils.execute).start()
@property
def TO(self):
return "--timeout=%s" % self.br.vsctl_timeout
def _vsctl_args(self, *args):
cmd = ['ovs-vsctl', self.TO, '--oneline', '--format=json', '--']
cmd += args
return cmd
def _vsctl_mock(self, *args):
cmd = self._vsctl_args(*args)
return mock.call(cmd, run_as_root=True, log_fail_as_error=False)
def _verify_vsctl_mock(self, *args):
cmd = self._vsctl_args(*args)
self.execute.assert_called_once_with(cmd, run_as_root=True,
log_fail_as_error=False)
def test_vifport(self):
"""Create and stringify vif port, confirm no exceptions."""
pname = "vif1.0"
ofport = 5
vif_id = uuidutils.generate_uuid()
mac = "ca:fe:de:ad:be:ef"
# test __init__
port = ovs_lib.VifPort(pname, ofport, vif_id, mac, self.br)
self.assertEqual(port.port_name, pname)
self.assertEqual(port.ofport, ofport)
self.assertEqual(port.vif_id, vif_id)
self.assertEqual(port.vif_mac, mac)
self.assertEqual(port.switch.br_name, self.BR_NAME)
# test __str__
str(port)
def test_set_controller(self):
controller_names = ['tcp:127.0.0.1:6633', 'tcp:172.17.16.10:5555']
self.br.set_controller(controller_names)
self._verify_vsctl_mock('set-controller', self.BR_NAME,
'tcp:127.0.0.1:6633', 'tcp:172.17.16.10:5555')
def test_del_controller(self):
self.br.del_controller()
self._verify_vsctl_mock('del-controller', self.BR_NAME)
def test_get_controller(self):
self.execute.return_value = (
'tcp:127.0.0.1:6633\\ntcp:172.17.16.10:5555')
names = self.br.get_controller()
self.assertEqual(names,
['tcp:127.0.0.1:6633', 'tcp:172.17.16.10:5555'])
self._verify_vsctl_mock('get-controller', self.BR_NAME)
def test_set_secure_mode(self):
self.br.set_secure_mode()
self._verify_vsctl_mock('set-fail-mode', self.BR_NAME, 'secure')
def test_set_protocols(self):
protocols = 'OpenFlow13'
self.br.set_protocols(protocols)
self._verify_vsctl_mock('set', 'Bridge', self.BR_NAME,
"protocols=%s" % protocols)
def test_create(self):
self.br.add_bridge(self.BR_NAME)
self.br.create()
def test_destroy(self):
self.br.delete_bridge(self.BR_NAME)
self.br.destroy()
def test_reset_bridge(self):
self.br.destroy()
self.br.create()
self.br.reset_bridge()
def _build_timeout_opt(self, exp_timeout):
return "--timeout=%d" % exp_timeout if exp_timeout else self.TO
def test_replace_port(self):
pname = "tap5"
self.br.replace_port(pname)
self._verify_vsctl_mock("--if-exists", "del-port", pname,
"--", "add-port", self.BR_NAME, pname)
def test_replace_port_with_attrs(self):
pname = "tap5"
self.br.replace_port(pname, ('type', 'internal'),
('external_ids:iface-status', 'active'))
self._verify_vsctl_mock("--if-exists", "del-port", pname,
"--", "add-port", self.BR_NAME, pname,
"--", "set", "Interface", pname,
"type=internal",
"external_ids:iface-status=active")
def _test_delete_port(self, exp_timeout=None):
pname = "tap5"
self.br.delete_port(pname)
self._verify_vsctl_mock("--if-exists", "del-port", self.BR_NAME, pname)
def test_delete_port(self):
self._test_delete_port()
def test_call_command_non_default_timeput(self):
# This test is only for verifying a non-default timeout
# is correctly applied. Does not need to be repeated for
# every ovs_lib method
new_timeout = 5
self.br.vsctl_timeout = new_timeout
self._test_delete_port(new_timeout)
def test_add_flow(self):
ofport = "99"
vid = 4000
lsw_id = 18
cidr = '192.168.1.0/24'
flow_dict_1 = collections.OrderedDict([
('priority', 2),
('dl_src', 'ca:fe:de:ad:be:ef'),
('actions', 'strip_vlan,output:0')])
flow_dict_2 = collections.OrderedDict([
('priority', 1),
('actions', 'normal')])
flow_dict_3 = collections.OrderedDict([
('priority', 2),
('actions', 'drop')])
flow_dict_4 = collections.OrderedDict([
('priority', 2),
('in_port', ofport),
('actions', 'drop')])
flow_dict_5 = collections.OrderedDict([
('priority', 4),
('in_port', ofport),
('dl_vlan', vid),
('actions', "strip_vlan,set_tunnel:%s,normal" % (lsw_id))])
flow_dict_6 = collections.OrderedDict([
('priority', 3),
('tun_id', lsw_id),
('actions', "mod_vlan_vid:%s,output:%s" % (vid, ofport))])
flow_dict_7 = collections.OrderedDict([
('priority', 4),
('nw_src', cidr),
('proto', 'arp'),
('actions', 'drop')])
self.br.add_flow(**flow_dict_1)
self.br.add_flow(**flow_dict_2)
self.br.add_flow(**flow_dict_3)
self.br.add_flow(**flow_dict_4)
self.br.add_flow(**flow_dict_5)
self.br.add_flow(**flow_dict_6)
self.br.add_flow(**flow_dict_7)
expected_calls = [
self._ofctl_mock("add-flows", self.BR_NAME, '-',
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,"
"priority=2,dl_src=ca:fe:de:ad:be:ef,"
"actions=strip_vlan,output:0")),
self._ofctl_mock("add-flows", self.BR_NAME, '-',
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,"
"priority=1,actions=normal")),
self._ofctl_mock("add-flows", self.BR_NAME, '-',
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,"
"priority=2,actions=drop")),
self._ofctl_mock("add-flows", self.BR_NAME, '-',
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,priority=2,"
"in_port=%s,actions=drop" % ofport)),
self._ofctl_mock("add-flows", self.BR_NAME, '-',
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,"
"priority=4,dl_vlan=%s,in_port=%s,"
"actions=strip_vlan,set_tunnel:%s,normal" %
(vid, ofport, lsw_id))),
self._ofctl_mock("add-flows", self.BR_NAME, '-',
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,priority=3,"
"tun_id=%s,actions=mod_vlan_vid:%s,"
"output:%s" % (lsw_id, vid, ofport))),
self._ofctl_mock("add-flows", self.BR_NAME, '-',
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,priority=4,"
"nw_src=%s,arp,actions=drop" % cidr)),
]
self.execute.assert_has_calls(expected_calls)
def _ofctl_args(self, cmd, *args):
cmd = ['ovs-ofctl', cmd]
cmd += args
return cmd
def _ofctl_mock(self, cmd, *args, **kwargs):
cmd = self._ofctl_args(cmd, *args)
return mock.call(cmd, run_as_root=True, **kwargs)
def _verify_ofctl_mock(self, cmd, *args, **kwargs):
cmd = self._ofctl_args(cmd, *args)
return self.execute.assert_called_once_with(cmd, run_as_root=True,
**kwargs)
def test_add_flow_timeout_set(self):
flow_dict = collections.OrderedDict([
('priority', 1),
('hard_timeout', 1000),
('idle_timeout', 2000),
('actions', 'normal')])
self.br.add_flow(**flow_dict)
self._verify_ofctl_mock(
"add-flows", self.BR_NAME, '-',
process_input="hard_timeout=1000,idle_timeout=2000,priority=1,"
"actions=normal")
def test_add_flow_default_priority(self):
flow_dict = collections.OrderedDict([('actions', 'normal')])
self.br.add_flow(**flow_dict)
self._verify_ofctl_mock(
"add-flows", self.BR_NAME, '-',
process_input="hard_timeout=0,idle_timeout=0,priority=1,"
"actions=normal")
def _test_get_port_ofport(self, ofport, expected_result):
pname = "tap99"
self.br.vsctl_timeout = 0 # Don't waste precious time retrying
self.execute.return_value = self._encode_ovs_json(
['ofport'], [[ofport]])
self.assertEqual(self.br.get_port_ofport(pname), expected_result)
self._verify_vsctl_mock("--columns=ofport", "list", "Interface", pname)
def test_get_port_ofport_succeeds_for_valid_ofport(self):
self._test_get_port_ofport(6, 6)
def test_get_port_ofport_returns_invalid_ofport_for_non_int(self):
self._test_get_port_ofport([], ovs_lib.INVALID_OFPORT)
def test_get_port_ofport_returns_invalid_for_invalid(self):
self._test_get_port_ofport(ovs_lib.INVALID_OFPORT,
ovs_lib.INVALID_OFPORT)
def test_get_datapath_id(self):
datapath_id = '"0000b67f4fbcc149"'
self.execute.return_value = self._encode_ovs_json(['datapath_id'],
[[datapath_id]])
self.assertEqual(self.br.get_datapath_id(), datapath_id)
self._verify_vsctl_mock("--columns=datapath_id", "list", "Bridge",
self.BR_NAME)
def test_count_flows(self):
self.execute.return_value = 'ignore\nflow-1\n'
# counts the number of flows as total lines of output - 2
self.assertEqual(self.br.count_flows(), 1)
self._verify_ofctl_mock("dump-flows", self.BR_NAME, process_input=None)
def test_delete_flow(self):
ofport = "5"
lsw_id = 40
vid = 39
self.br.delete_flows(in_port=ofport)
self.br.delete_flows(tun_id=lsw_id)
self.br.delete_flows(dl_vlan=vid)
expected_calls = [
self._ofctl_mock("del-flows", self.BR_NAME, '-',
process_input="in_port=" + ofport),
self._ofctl_mock("del-flows", self.BR_NAME, '-',
process_input="tun_id=%s" % lsw_id),
self._ofctl_mock("del-flows", self.BR_NAME, '-',
process_input="dl_vlan=%s" % vid),
]
self.execute.assert_has_calls(expected_calls)
def test_delete_flow_with_priority_set(self):
params = {'in_port': '1',
'priority': '1'}
self.assertRaises(exceptions.InvalidInput,
self.br.delete_flows,
**params)
def test_dump_flows(self):
table = 23
nxst_flow = "NXST_FLOW reply (xid=0x4):"
flows = "\n".join([" cookie=0x0, duration=18042.514s, table=0, "
"n_packets=6, n_bytes=468, "
"priority=2,in_port=1 actions=drop",
" cookie=0x0, duration=18027.562s, table=0, "
"n_packets=0, n_bytes=0, "
"priority=3,in_port=1,dl_vlan=100 "
"actions=mod_vlan_vid:1,NORMAL",
" cookie=0x0, duration=18044.351s, table=0, "
"n_packets=9, n_bytes=594, priority=1 "
"actions=NORMAL", " cookie=0x0, "
"duration=18044.211s, table=23, n_packets=0, "
"n_bytes=0, priority=0 actions=drop"])
flow_args = '\n'.join([nxst_flow, flows])
run_ofctl = mock.patch.object(self.br, 'run_ofctl').start()
run_ofctl.side_effect = [flow_args]
retflows = self.br.dump_flows_for_table(table)
self.assertEqual(flows, retflows)
def test_dump_flows_ovs_dead(self):
table = 23
run_ofctl = mock.patch.object(self.br, 'run_ofctl').start()
run_ofctl.side_effect = ['']
retflows = self.br.dump_flows_for_table(table)
self.assertEqual(None, retflows)
def test_mod_flow_with_priority_set(self):
params = {'in_port': '1',
'priority': '1'}
self.assertRaises(exceptions.InvalidInput,
self.br.mod_flow,
**params)
def test_mod_flow_no_actions_set(self):
params = {'in_port': '1'}
self.assertRaises(exceptions.InvalidInput,
self.br.mod_flow,
**params)
def test_add_tunnel_port(self):
pname = "tap99"
local_ip = "1.1.1.1"
remote_ip = "9.9.9.9"
ofport = 6
command = ["--may-exist", "add-port",
self.BR_NAME, pname]
command.extend(["--", "set", "Interface", pname])
command.extend(["type=gre", "options:df_default=true",
"options:remote_ip=" + remote_ip,
"options:local_ip=" + local_ip,
"options:in_key=flow",
"options:out_key=flow"])
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(self._vsctl_mock(*command), None),
(self._vsctl_mock("--columns=ofport", "list", "Interface", pname),
self._encode_ovs_json(['ofport'], [[ofport]])),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertEqual(
self.br.add_tunnel_port(pname, remote_ip, local_ip),
ofport)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_add_vxlan_fragmented_tunnel_port(self):
pname = "tap99"
local_ip = "1.1.1.1"
remote_ip = "9.9.9.9"
ofport = 6
vxlan_udp_port = "9999"
dont_fragment = False
command = ["--may-exist", "add-port", self.BR_NAME, pname]
command.extend(["--", "set", "Interface", pname])
command.extend(["type=" + constants.TYPE_VXLAN,
"options:dst_port=" + vxlan_udp_port,
"options:df_default=false",
"options:remote_ip=" + remote_ip,
"options:local_ip=" + local_ip,
"options:in_key=flow",
"options:out_key=flow"])
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(self._vsctl_mock(*command), None),
(self._vsctl_mock("--columns=ofport", "list", "Interface", pname),
self._encode_ovs_json(['ofport'], [[ofport]])),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertEqual(
self.br.add_tunnel_port(pname, remote_ip, local_ip,
constants.TYPE_VXLAN, vxlan_udp_port,
dont_fragment),
ofport)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_add_patch_port(self):
pname = "tap99"
peer = "bar10"
ofport = 6
# Each element is a tuple of (expected mock call, return_value)
command = ["--may-exist", "add-port", self.BR_NAME, pname]
command.extend(["--", "set", "Interface", pname])
command.extend(["type=patch", "options:peer=" + peer])
expected_calls_and_values = [
(self._vsctl_mock(*command), None),
(self._vsctl_mock("--columns=ofport", "list", "Interface", pname),
self._encode_ovs_json(['ofport'], [[ofport]]))
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertEqual(self.br.add_patch_port(pname, peer), ofport)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def _test_get_vif_ports(self, is_xen=False):
pname = "tap99"
ofport = 6
ofport_data = self._encode_ovs_json(['ofport'], [[ofport]])
vif_id = uuidutils.generate_uuid()
mac = "ca:fe:de:ad:be:ef"
id_field = 'xs-vif-uuid' if is_xen else 'iface-id'
external_ids = ('{"data":[[["map",[["attached-mac","%(mac)s"],'
'["%(id_field)s","%(vif)s"],'
'["iface-status","active"]]]]],'
'"headings":["external_ids"]}' % {
'mac': mac, 'vif': vif_id, 'id_field': id_field})
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(self._vsctl_mock("list-ports", self.BR_NAME), "%s\n" % pname),
(self._vsctl_mock("--columns=external_ids", "list",
"Interface", pname), external_ids),
(self._vsctl_mock("--columns=ofport", "list", "Interface", pname),
ofport_data),
]
if is_xen:
expected_calls_and_values.append(
(mock.call(["xe", "vif-param-get", "param-name=other-config",
"param-key=nicira-iface-id", "uuid=" + vif_id],
run_as_root=True),
vif_id)
)
tools.setup_mock_calls(self.execute, expected_calls_and_values)
ports = self.br.get_vif_ports()
self.assertEqual(1, len(ports))
self.assertEqual(ports[0].port_name, pname)
self.assertEqual(ports[0].ofport, ofport)
self.assertEqual(ports[0].vif_id, vif_id)
self.assertEqual(ports[0].vif_mac, mac)
self.assertEqual(ports[0].switch.br_name, self.BR_NAME)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def _encode_ovs_json(self, headings, data):
# See man ovs-vsctl(8) for the encoding details.
r = {"data": [],
"headings": headings}
for row in data:
ovs_row = []
r["data"].append(ovs_row)
for cell in row:
if isinstance(cell, (str, int, list)):
ovs_row.append(cell)
elif isinstance(cell, dict):
ovs_row.append(["map", cell.items()])
elif isinstance(cell, set):
ovs_row.append(["set", cell])
else:
raise TypeError('%r not int, str, list, set or dict' %
type(cell))
return jsonutils.dumps(r)
def _test_get_vif_port_set(self, is_xen):
if is_xen:
id_key = 'xs-vif-uuid'
else:
id_key = 'iface-id'
headings = ['name', 'external_ids', 'ofport']
data = [
# A vif port on this bridge:
['tap99', {id_key: 'tap99id', 'attached-mac': 'tap99mac'}, 1],
# A vif port on this bridge not yet configured
['tap98', {id_key: 'tap98id', 'attached-mac': 'tap98mac'}, []],
# Another vif port on this bridge not yet configured
['tap97', {id_key: 'tap97id', 'attached-mac': 'tap97mac'},
['set', []]],
# Non-vif port on this bridge:
['bogus', {}, 2],
]
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(self._vsctl_mock("list-ports", self.BR_NAME), 'tap99\\ntun22'),
(self._vsctl_mock("--if-exists",
"--columns=name,external_ids,ofport",
"list", "Interface", 'tap99', 'tun22'),
self._encode_ovs_json(headings, data)),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
if is_xen:
get_xapi_iface_id = mock.patch.object(self.br,
'get_xapi_iface_id').start()
get_xapi_iface_id.return_value = 'tap99id'
port_set = self.br.get_vif_port_set()
self.assertEqual(set(['tap99id']), port_set)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
if is_xen:
get_xapi_iface_id.assert_called_once_with('tap99id')
def test_get_vif_ports_nonxen(self):
self._test_get_vif_ports(is_xen=False)
def test_get_vif_ports_xen(self):
self._test_get_vif_ports(is_xen=True)
def test_get_vif_port_set_nonxen(self):
self._test_get_vif_port_set(False)
def test_get_vif_port_set_xen(self):
self._test_get_vif_port_set(True)
def test_get_vif_ports_list_ports_error(self):
expected_calls_and_values = [
(self._vsctl_mock("list-ports", self.BR_NAME), RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.get_vif_ports)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_get_vif_port_set_list_ports_error(self):
expected_calls_and_values = [
(self._vsctl_mock("list-ports", self.BR_NAME), RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.get_vif_port_set)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_get_vif_port_set_list_interface_error(self):
expected_calls_and_values = [
(self._vsctl_mock("list-ports", self.BR_NAME), 'tap99\n'),
(self._vsctl_mock("--if-exists",
"--columns=name,external_ids,ofport",
"list", "Interface", "tap99"), RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.get_vif_port_set)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_get_port_tag_dict(self):
headings = ['name', 'tag']
data = [
['int-br-eth2', set()],
['patch-tun', set()],
['qr-76d9e6b6-21', 1],
['tapce5318ff-78', 1],
['tape1400310-e6', 1],
]
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(self._vsctl_mock("list-ports", self.BR_NAME),
'\\n'.join((iface for iface, tag in data))),
(self._vsctl_mock("--columns=name,tag", "list", "Port"),
self._encode_ovs_json(headings, data)),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
port_tags = self.br.get_port_tag_dict()
self.assertEqual(
port_tags,
{u'int-br-eth2': [],
u'patch-tun': [],
u'qr-76d9e6b6-21': 1,
u'tapce5318ff-78': 1,
u'tape1400310-e6': 1}
)
def test_clear_db_attribute(self):
pname = "tap77"
self.br.clear_db_attribute("Port", pname, "tag")
self._verify_vsctl_mock("clear", "Port", pname, "tag")
def _test_iface_to_br(self, exp_timeout=None):
iface = 'tap0'
br = 'br-int'
if exp_timeout:
self.br.vsctl_timeout = exp_timeout
self.execute.return_value = 'br-int'
self.assertEqual(self.br.get_bridge_for_iface(iface), br)
self._verify_vsctl_mock("iface-to-br", iface)
def test_iface_to_br(self):
self._test_iface_to_br()
def test_iface_to_br_non_default_timeout(self):
new_timeout = 5
self._test_iface_to_br(new_timeout)
def test_iface_to_br_handles_ovs_vsctl_exception(self):
iface = 'tap0'
self.execute.side_effect = Exception
self.assertIsNone(self.br.get_bridge_for_iface(iface))
self._verify_vsctl_mock("iface-to-br", iface)
def test_delete_all_ports(self):
with mock.patch.object(self.br, 'get_port_name_list',
return_value=['port1']) as get_port:
with mock.patch.object(self.br, 'delete_port') as delete_port:
self.br.delete_ports(all_ports=True)
get_port.assert_called_once_with()
delete_port.assert_called_once_with('port1')
def test_delete_neutron_ports(self):
port1 = ovs_lib.VifPort('tap1234', 1, uuidutils.generate_uuid(),
'ca:fe:de:ad:be:ef', 'br')
port2 = ovs_lib.VifPort('tap5678', 2, uuidutils.generate_uuid(),
'ca:ee:de:ad:be:ef', 'br')
with mock.patch.object(self.br, 'get_vif_ports',
return_value=[port1, port2]) as get_ports:
with mock.patch.object(self.br, 'delete_port') as delete_port:
self.br.delete_ports(all_ports=False)
get_ports.assert_called_once_with()
delete_port.assert_has_calls([
mock.call('tap1234'),
mock.call('tap5678')
])
def test_delete_neutron_ports_list_error(self):
expected_calls_and_values = [
(self._vsctl_mock("list-ports", self.BR_NAME), RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.delete_ports, all_ports=False)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def _test_get_bridges(self, exp_timeout=None):
bridges = ['br-int', 'br-ex']
if exp_timeout:
self.br.vsctl_timeout = exp_timeout
self.execute.return_value = 'br-int\\nbr-ex\n'
self.assertEqual(self.br.get_bridges(), bridges)
self._verify_vsctl_mock("list-br")
def test_get_bridges(self):
self._test_get_bridges()
def test_get_bridges_not_default_timeout(self):
self._test_get_bridges(5)
def test_get_local_port_mac_succeeds(self):
with mock.patch('neutron.agent.linux.ip_lib.IpLinkCommand',
return_value=mock.Mock(address='foo')):
self.assertEqual('foo', self.br.get_local_port_mac())
def test_get_local_port_mac_raises_exception_for_missing_mac(self):
with mock.patch('neutron.agent.linux.ip_lib.IpLinkCommand',
return_value=mock.Mock(address=None)):
with testtools.ExpectedException(Exception):
self.br.get_local_port_mac()
def _test_get_vif_port_by_id(self, iface_id, data, br_name=None,
extra_calls_and_values=None):
headings = ['external_ids', 'name', 'ofport']
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(self._vsctl_mock("--columns=external_ids,name,ofport", "find",
"Interface",
'external_ids:iface-id=%s' % iface_id,
'external_ids:attached-mac!=""'),
self._encode_ovs_json(headings, data))]
if data:
if not br_name:
br_name = self.BR_NAME
# Only the last information list in 'data' is used, so if more
# than one vif is described in data, the rest must be declared
# in the argument 'expected_calls_and_values'.
if extra_calls_and_values:
expected_calls_and_values.extend(extra_calls_and_values)
expected_calls_and_values.append(
(self._vsctl_mock("iface-to-br",
data[-1][headings.index('name')]), br_name))
tools.setup_mock_calls(self.execute, expected_calls_and_values)
vif_port = self.br.get_vif_port_by_id(iface_id)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
return vif_port
def _assert_vif_port(self, vif_port, ofport=None, mac=None):
if not ofport or ofport == -1 or not mac:
self.assertIsNone(vif_port, "Got %s" % vif_port)
return
self.assertEqual('tap99id', vif_port.vif_id)
self.assertEqual(mac, vif_port.vif_mac)
self.assertEqual('tap99', vif_port.port_name)
self.assertEqual(ofport, vif_port.ofport)
def _test_get_vif_port_by_id_with_data(self, ofport=None, mac=None):
external_ids = [["iface-id", "tap99id"],
["iface-status", "active"],
["attached-mac", mac]]
data = [[["map", external_ids], "tap99",
ofport if ofport else ["set", []]]]
vif_port = self._test_get_vif_port_by_id('tap99id', data)
self._assert_vif_port(vif_port, ofport, mac)
def test_get_vif_by_port_id_with_ofport(self):
self._test_get_vif_port_by_id_with_data(
ofport=1, mac="aa:bb:cc:dd:ee:ff")
def test_get_vif_by_port_id_without_ofport(self):
self._test_get_vif_port_by_id_with_data(mac="aa:bb:cc:dd:ee:ff")
def test_get_vif_by_port_id_with_invalid_ofport(self):
self._test_get_vif_port_by_id_with_data(
ofport=-1, mac="aa:bb:cc:dd:ee:ff")
def test_get_vif_by_port_id_with_no_data(self):
self.assertIsNone(self._test_get_vif_port_by_id('whatever', []))
def test_get_vif_by_port_id_different_bridge(self):
external_ids = [["iface-id", "tap99id"],
["iface-status", "active"]]
data = [[["map", external_ids], "tap99", 1]]
self.assertIsNone(self._test_get_vif_port_by_id('tap99id', data,
"br-ext"))
def test_get_vif_by_port_id_multiple_vifs(self):
external_ids = [["iface-id", "tap99id"],
["iface-status", "active"],
["attached-mac", "de:ad:be:ef:13:37"]]
data = [[["map", external_ids], "dummytap", 1],
[["map", external_ids], "tap99", 1337]]
extra_calls_and_values = [
(self._vsctl_mock("iface-to-br", "dummytap"), "br-ext")]
vif_port = self._test_get_vif_port_by_id(
'tap99id', data, extra_calls_and_values=extra_calls_and_values)
self._assert_vif_port(vif_port, ofport=1337, mac="de:ad:be:ef:13:37")
class TestDeferredOVSBridge(base.BaseTestCase):
def setUp(self):
super(TestDeferredOVSBridge, self).setUp()
self.br = mock.Mock()
self.mocked_do_action_flows = mock.patch.object(
self.br, 'do_action_flows').start()
self.add_flow_dict1 = dict(in_port=11, actions='drop')
self.add_flow_dict2 = dict(in_port=12, actions='drop')
self.mod_flow_dict1 = dict(in_port=21, actions='drop')
self.mod_flow_dict2 = dict(in_port=22, actions='drop')
self.del_flow_dict1 = dict(in_port=31)
self.del_flow_dict2 = dict(in_port=32)
def test_right_allowed_passthroughs(self):
expected_passthroughs = ('add_port', 'add_tunnel_port', 'delete_port')
self.assertEqual(expected_passthroughs,
ovs_lib.DeferredOVSBridge.ALLOWED_PASSTHROUGHS)
def _verify_mock_call(self, expected_calls):
self.mocked_do_action_flows.assert_has_calls(expected_calls)
self.assertEqual(len(expected_calls),
len(self.mocked_do_action_flows.mock_calls))
def test_apply_on_exit(self):
expected_calls = [
mock.call('add', [self.add_flow_dict1]),
mock.call('mod', [self.mod_flow_dict1]),
mock.call('del', [self.del_flow_dict1]),
]
with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
deferred_br.add_flow(**self.add_flow_dict1)
deferred_br.mod_flow(**self.mod_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict1)
self._verify_mock_call([])
self._verify_mock_call(expected_calls)
def test_apply_on_exit_with_errors(self):
try:
with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
deferred_br.add_flow(**self.add_flow_dict1)
deferred_br.mod_flow(**self.mod_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict1)
raise Exception()
except Exception:
self._verify_mock_call([])
else:
self.fail('Exception would be reraised')
def test_apply(self):
expected_calls = [
mock.call('add', [self.add_flow_dict1]),
mock.call('mod', [self.mod_flow_dict1]),
mock.call('del', [self.del_flow_dict1]),
]
with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
deferred_br.add_flow(**self.add_flow_dict1)
deferred_br.mod_flow(**self.mod_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict1)
self._verify_mock_call([])
deferred_br.apply_flows()
self._verify_mock_call(expected_calls)
self._verify_mock_call(expected_calls)
def test_apply_order(self):
expected_calls = [
mock.call('del', [self.del_flow_dict1, self.del_flow_dict2]),
mock.call('mod', [self.mod_flow_dict1, self.mod_flow_dict2]),
mock.call('add', [self.add_flow_dict1, self.add_flow_dict2]),
]
order = 'del', 'mod', 'add'
with ovs_lib.DeferredOVSBridge(self.br, order=order) as deferred_br:
deferred_br.add_flow(**self.add_flow_dict1)
deferred_br.mod_flow(**self.mod_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict2)
deferred_br.add_flow(**self.add_flow_dict2)
deferred_br.mod_flow(**self.mod_flow_dict2)
self._verify_mock_call(expected_calls)
def test_apply_full_ordered(self):
expected_calls = [
mock.call('add', [self.add_flow_dict1]),
mock.call('mod', [self.mod_flow_dict1]),
mock.call('del', [self.del_flow_dict1, self.del_flow_dict2]),
mock.call('add', [self.add_flow_dict2]),
mock.call('mod', [self.mod_flow_dict2]),
]
with ovs_lib.DeferredOVSBridge(self.br,
full_ordered=True) as deferred_br:
deferred_br.add_flow(**self.add_flow_dict1)
deferred_br.mod_flow(**self.mod_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict2)
deferred_br.add_flow(**self.add_flow_dict2)
deferred_br.mod_flow(**self.mod_flow_dict2)
self._verify_mock_call(expected_calls)
def test_getattr_unallowed_attr(self):
with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
self.assertEqual(self.br.add_port, deferred_br.add_port)
def test_getattr_unallowed_attr_failure(self):
with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
self.assertRaises(AttributeError, getattr, deferred_br, 'failure')
| 41.276688
| 79
| 0.585533
|
4d10f8ccb8cbb1532421acb13fc98a381c8d61c5
| 2,354
|
py
|
Python
|
api/resources/webapp/swipes.py
|
jimbunny/wedding-invitation
|
a3648454e1105d9362f95d9f6e69055a7522e15b
|
[
"MIT"
] | null | null | null |
api/resources/webapp/swipes.py
|
jimbunny/wedding-invitation
|
a3648454e1105d9362f95d9f6e69055a7522e15b
|
[
"MIT"
] | null | null | null |
api/resources/webapp/swipes.py
|
jimbunny/wedding-invitation
|
a3648454e1105d9362f95d9f6e69055a7522e15b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
# author:jingtongyu
# datetime:2020/6/7 10:14 下午
# software: PyCharm
from flask_restful import Resource
from flask_restful.reqparse import RequestParser
from common import code, pretty_result
import os
import json
from werkzeug.datastructures import FileStorage
root = os.path.abspath(os.path.join(os.getcwd()))
filePath = r'./downloads/swipe/'
if not os.path.exists(filePath):
os.makedirs(filePath)
class SwipesResource(Resource):
"""
swipe list资源类
"""
def __init__(self):
self.parser = RequestParser()
def get(self):
"""
工具函数:
获取本地图片流
:param img_local_path:文件单张图片的本地绝对路径
:return: 图片流
"""
data = []
# for i in os.listdir(filePath):
# url = config.domain + "/api/v1/admin/image?_type=swipe&id=" + i.split(".")[0]
# data.append({"name": i, "redirectUrl": url, "carouselUrl": url})
with open(os.path.join(root, "data", "template", "swipe.json"), 'r', encoding="utf8") as load_f:
load_dict = json.load(load_f)
return pretty_result(code.OK, data=load_dict.get("data"), msg='Get swipes picture successful!')
def put(self):
"""
工具函数:
获取本地图片流
:param img_local_path:文件单张图片的本地绝对路径
:return: 图片流
"""
self.parser.add_argument("picture", type=FileStorage, location='files', action='append',
help='picture is required')
self.parser.add_argument("removeList", type=str, required=True, location="form", help='removelist is required')
args = self.parser.parse_args()
removeList = args.removeList.split(",")
if args.picture:
for item in args.picture:
if item.filename in removeList:
continue
new_fname = filePath + str(item.filename) + '.png'
item.save(new_fname)
for i in os.listdir(filePath):
if i in removeList:
old_fname = filePath + i
if os.path.exists(old_fname):
os.remove(old_fname)
else:
print(str(i) + " the file does not exist")
return pretty_result(code.OK, msg='Update swipes picture successful!')
| 33.628571
| 119
| 0.578165
|
2b064e8d58479f14479e97b1f6926b30f218605f
| 10,646
|
py
|
Python
|
babyai/rl/algos/base.py
|
m-smith/babyai
|
deb79a8171eaf3c7e1e131a49e92caaf89eecd8d
|
[
"BSD-3-Clause"
] | 411
|
2019-02-13T13:57:10.000Z
|
2022-03-15T22:47:27.000Z
|
babyai/rl/algos/base.py
|
m-smith/babyai
|
deb79a8171eaf3c7e1e131a49e92caaf89eecd8d
|
[
"BSD-3-Clause"
] | 47
|
2019-02-19T17:23:35.000Z
|
2021-05-05T15:16:03.000Z
|
babyai/rl/algos/base.py
|
m-smith/babyai
|
deb79a8171eaf3c7e1e131a49e92caaf89eecd8d
|
[
"BSD-3-Clause"
] | 100
|
2019-02-13T23:35:25.000Z
|
2022-02-10T17:58:25.000Z
|
from abc import ABC, abstractmethod
import torch
import numpy
from babyai.rl.format import default_preprocess_obss
from babyai.rl.utils import DictList, ParallelEnv
from babyai.rl.utils.supervised_losses import ExtraInfoCollector
class BaseAlgo(ABC):
"""The base class for RL algorithms."""
def __init__(self, envs, acmodel, num_frames_per_proc, discount, lr, gae_lambda, entropy_coef,
value_loss_coef, max_grad_norm, recurrence, preprocess_obss, reshape_reward, aux_info):
"""
Initializes a `BaseAlgo` instance.
Parameters:
----------
envs : list
a list of environments that will be run in parallel
acmodel : torch.Module
the model
num_frames_per_proc : int
the number of frames collected by every process for an update
discount : float
the discount for future rewards
lr : float
the learning rate for optimizers
gae_lambda : float
the lambda coefficient in the GAE formula
([Schulman et al., 2015](https://arxiv.org/abs/1506.02438))
entropy_coef : float
the weight of the entropy cost in the final objective
value_loss_coef : float
the weight of the value loss in the final objective
max_grad_norm : float
gradient will be clipped to be at most this value
recurrence : int
the number of steps the gradient is propagated back in time
preprocess_obss : function
a function that takes observations returned by the environment
and converts them into the format that the model can handle
reshape_reward : function
a function that shapes the reward, takes an
(observation, action, reward, done) tuple as an input
aux_info : list
a list of strings corresponding to the name of the extra information
retrieved from the environment for supervised auxiliary losses
"""
# Store parameters
self.env = ParallelEnv(envs)
self.acmodel = acmodel
self.acmodel.train()
self.num_frames_per_proc = num_frames_per_proc
self.discount = discount
self.lr = lr
self.gae_lambda = gae_lambda
self.entropy_coef = entropy_coef
self.value_loss_coef = value_loss_coef
self.max_grad_norm = max_grad_norm
self.recurrence = recurrence
self.preprocess_obss = preprocess_obss or default_preprocess_obss
self.reshape_reward = reshape_reward
self.aux_info = aux_info
# Store helpers values
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.num_procs = len(envs)
self.num_frames = self.num_frames_per_proc * self.num_procs
assert self.num_frames_per_proc % self.recurrence == 0
# Initialize experience values
shape = (self.num_frames_per_proc, self.num_procs)
self.obs = self.env.reset()
self.obss = [None]*(shape[0])
self.memory = torch.zeros(shape[1], self.acmodel.memory_size, device=self.device)
self.memories = torch.zeros(*shape, self.acmodel.memory_size, device=self.device)
self.mask = torch.ones(shape[1], device=self.device)
self.masks = torch.zeros(*shape, device=self.device)
self.actions = torch.zeros(*shape, device=self.device, dtype=torch.int)
self.values = torch.zeros(*shape, device=self.device)
self.rewards = torch.zeros(*shape, device=self.device)
self.advantages = torch.zeros(*shape, device=self.device)
self.log_probs = torch.zeros(*shape, device=self.device)
if self.aux_info:
self.aux_info_collector = ExtraInfoCollector(self.aux_info, shape, self.device)
# Initialize log values
self.log_episode_return = torch.zeros(self.num_procs, device=self.device)
self.log_episode_reshaped_return = torch.zeros(self.num_procs, device=self.device)
self.log_episode_num_frames = torch.zeros(self.num_procs, device=self.device)
self.log_done_counter = 0
self.log_return = [0] * self.num_procs
self.log_reshaped_return = [0] * self.num_procs
self.log_num_frames = [0] * self.num_procs
def collect_experiences(self):
"""Collects rollouts and computes advantages.
Runs several environments concurrently. The next actions are computed
in a batch mode for all environments at the same time. The rollouts
and advantages from all environments are concatenated together.
Returns
-------
exps : DictList
Contains actions, rewards, advantages etc as attributes.
Each attribute, e.g. `exps.reward` has a shape
(self.num_frames_per_proc * num_envs, ...). k-th block
of consecutive `self.num_frames_per_proc` frames contains
data obtained from the k-th environment. Be careful not to mix
data from different environments!
logs : dict
Useful stats about the training process, including the average
reward, policy loss, value loss, etc.
"""
for i in range(self.num_frames_per_proc):
# Do one agent-environment interaction
preprocessed_obs = self.preprocess_obss(self.obs, device=self.device)
with torch.no_grad():
model_results = self.acmodel(preprocessed_obs, self.memory * self.mask.unsqueeze(1))
dist = model_results['dist']
value = model_results['value']
memory = model_results['memory']
extra_predictions = model_results['extra_predictions']
action = dist.sample()
obs, reward, done, env_info = self.env.step(action.cpu().numpy())
if self.aux_info:
env_info = self.aux_info_collector.process(env_info)
# env_info = self.process_aux_info(env_info)
# Update experiences values
self.obss[i] = self.obs
self.obs = obs
self.memories[i] = self.memory
self.memory = memory
self.masks[i] = self.mask
self.mask = 1 - torch.tensor(done, device=self.device, dtype=torch.float)
self.actions[i] = action
self.values[i] = value
if self.reshape_reward is not None:
self.rewards[i] = torch.tensor([
self.reshape_reward(obs_, action_, reward_, done_)
for obs_, action_, reward_, done_ in zip(obs, action, reward, done)
], device=self.device)
else:
self.rewards[i] = torch.tensor(reward, device=self.device)
self.log_probs[i] = dist.log_prob(action)
if self.aux_info:
self.aux_info_collector.fill_dictionaries(i, env_info, extra_predictions)
# Update log values
self.log_episode_return += torch.tensor(reward, device=self.device, dtype=torch.float)
self.log_episode_reshaped_return += self.rewards[i]
self.log_episode_num_frames += torch.ones(self.num_procs, device=self.device)
for i, done_ in enumerate(done):
if done_:
self.log_done_counter += 1
self.log_return.append(self.log_episode_return[i].item())
self.log_reshaped_return.append(self.log_episode_reshaped_return[i].item())
self.log_num_frames.append(self.log_episode_num_frames[i].item())
self.log_episode_return *= self.mask
self.log_episode_reshaped_return *= self.mask
self.log_episode_num_frames *= self.mask
# Add advantage and return to experiences
preprocessed_obs = self.preprocess_obss(self.obs, device=self.device)
with torch.no_grad():
next_value = self.acmodel(preprocessed_obs, self.memory * self.mask.unsqueeze(1))['value']
for i in reversed(range(self.num_frames_per_proc)):
next_mask = self.masks[i+1] if i < self.num_frames_per_proc - 1 else self.mask
next_value = self.values[i+1] if i < self.num_frames_per_proc - 1 else next_value
next_advantage = self.advantages[i+1] if i < self.num_frames_per_proc - 1 else 0
delta = self.rewards[i] + self.discount * next_value * next_mask - self.values[i]
self.advantages[i] = delta + self.discount * self.gae_lambda * next_advantage * next_mask
# Flatten the data correctly, making sure that
# each episode's data is a continuous chunk
exps = DictList()
exps.obs = [self.obss[i][j]
for j in range(self.num_procs)
for i in range(self.num_frames_per_proc)]
# In commments below T is self.num_frames_per_proc, P is self.num_procs,
# D is the dimensionality
# T x P x D -> P x T x D -> (P * T) x D
exps.memory = self.memories.transpose(0, 1).reshape(-1, *self.memories.shape[2:])
# T x P -> P x T -> (P * T) x 1
exps.mask = self.masks.transpose(0, 1).reshape(-1).unsqueeze(1)
# for all tensors below, T x P -> P x T -> P * T
exps.action = self.actions.transpose(0, 1).reshape(-1)
exps.value = self.values.transpose(0, 1).reshape(-1)
exps.reward = self.rewards.transpose(0, 1).reshape(-1)
exps.advantage = self.advantages.transpose(0, 1).reshape(-1)
exps.returnn = exps.value + exps.advantage
exps.log_prob = self.log_probs.transpose(0, 1).reshape(-1)
if self.aux_info:
exps = self.aux_info_collector.end_collection(exps)
# Preprocess experiences
exps.obs = self.preprocess_obss(exps.obs, device=self.device)
# Log some values
keep = max(self.log_done_counter, self.num_procs)
log = {
"return_per_episode": self.log_return[-keep:],
"reshaped_return_per_episode": self.log_reshaped_return[-keep:],
"num_frames_per_episode": self.log_num_frames[-keep:],
"num_frames": self.num_frames,
"episodes_done": self.log_done_counter,
}
self.log_done_counter = 0
self.log_return = self.log_return[-self.num_procs:]
self.log_reshaped_return = self.log_reshaped_return[-self.num_procs:]
self.log_num_frames = self.log_num_frames[-self.num_procs:]
return exps, log
@abstractmethod
def update_parameters(self):
pass
| 41.585938
| 104
| 0.63235
|
408681ed1131d64e85e852faab94916533f5ad3a
| 6,171
|
py
|
Python
|
setup.py
|
tescalada/npyscreen-restructure
|
0833bbbdec18439182f102d2147f3756fa98aadd
|
[
"BSD-2-Clause"
] | 2
|
2015-01-12T14:47:19.000Z
|
2018-10-03T09:27:22.000Z
|
setup.py
|
tescalada/npyscreen-restructure
|
0833bbbdec18439182f102d2147f3756fa98aadd
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
tescalada/npyscreen-restructure
|
0833bbbdec18439182f102d2147f3756fa98aadd
|
[
"BSD-2-Clause"
] | 1
|
2020-03-20T20:19:33.000Z
|
2020-03-20T20:19:33.000Z
|
#!/usr/bin/env python
from distutils.core import setup
setup(
name="npyscreen",
version="4.2.0",
description="Writing user interfaces without all that ugly mucking about in hyperspace",
author="Nicholas Cole",
author_email="n@npcole.com",
url="http://www.npcole.com/npyscreen/",
packages=['npyscreen'],
license='New BSD License',
classifiers= [
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Environment :: Console',
'Operating System :: POSIX',
'Environment :: Console :: Curses',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Topic :: Terminals'
],
long_description = """This library provides a framework for developing console applications using Python and curses.
This framework should be powerful enough to create everything from quick, simple programs to complex, multi-screen applications. It is designed to make doing the simple tasks very quick and to take much of the pain out of writing larger applications.
There is a very wide variety of default widgets - everything from simple text fields to more complex tree and grid views.
I have used versions of this library for private scripts and small applications for around ten years. As a result, it is fairly mature.
Documentation is online at http://npyscreen.readthedocs.org
Please report bugs or make feature requests using the bug-tracker at http://code.google.com/p/npyscreen.
There is a mailing list available at https://groups.google.com/forum/?fromgroups#!forum/npyscreen/
*Latest Changes*:
Version 4.2.0 introduces the ability of Grid widgets to highlight the whole line that the cursor is on (user request).
Version 4.1.0 introduces support for hvc consoles (thanks to wu.fuheng@********* for the bug report). Title widgets can now define a when_cursor_moved() method directly
on themselves that will be called as expected by the contained entry_widget during its edit loop (user request).
Version 4.0.0 introduces a new version scheme. Due to a packaging error in
the 3.0 release series some users were having problems obtaining the latest
version. This is most easily fixed with a new major version release.
Version 3.10 MultiLineEditable, MultiLineEditableTitle, MultiLineEditableBoxed classes added, allowing the user to edit lists of items.
See EXAMPLE-MultilineEditable for an example.
Version 3.6 Title.. widgets should now resize properly. Menu items can now
be specified with arguments and keywords.
Version 3.5 when_value_edited defined on Title.. widgets now work as users expect.
Version 3.4 Fixed bugs in Title.. widgets and in the App classes.
Version 3.3 and the subsequent minor releases fix some bugs, mainly related
to changes caused by allowing resized forms.
Version 3.2 adds CheckboxBare - a checkbox without a label. Added at user request.
Version 3.0 *IMPORTANT* The version number has changed to version 3.0.
This is because newer versions of pip distinguish between pre-release and released versions,
and this will allow more flexibility in future releases. A version '2.0' might have caused confusion at this stage.
Version 3.0 fixes the specification of max_width values for titled widgets (Thanks to Phil Rich for the bug report).
Please report any further problems.
Version 2.0pre90 introduces a new BufferPager and TitleBufferPager class. (User request, suggested by dennis@wsec.be)
Version 2.0pre88 *IMPORTANT* This version supports resizing the terminal.
Read the documentation for more detail about how to disable this feature if
you need to. It has been implemented in a way that should be compatible
with existing code. New code can make the resizing even more flexible.
Version 2.0pre87 Updates the documentation and contains various bug fixes.
Version 2.0pre85 and 2.0pre86 are both bugfix releases.
Version 2.0pre84 introduces an experimental system for editing lists of
options. See documentation for details.
Version 2.0pre83 multi-line checkbox widgets are now possible. These can also be used as contained widgets within the multiselect class. See documentation for details.
Version 2.0pre82 changes the menu system and allows menu items to be given keyboard shortcuts.
Version 2.0pre81 introduces FilenameCombo, TitleFilenameCombo.
Version 2.0pre79 is a bugfix release.
Version 2.0pre76 further improves the handling of mouse events on compatible
terminals.
Version 2.0pre75 improves the handling of the mouse on compatible terminals.
Version 2.0pre74 corrects one minor bug and introduces makes box widgets
behave slightly more predictably (.editable attribute now linked to that of
the contained widget.
Version 2.0pre73 corrects two bugs - thanks to Lasse for his help in finding
them and offering patches.
Version 2.0pre71 new tree classes introduced. Bug fixes.
Version 2.0pre70 introduces the MLTreeMultiSelect class.
Version 2.0pre69 fixes and tidies up some of the new tree classes. There is an API change assocatied with this, noted in the documentation, though backward compatibility should have been maintained.
Version 2.0pre68 setting a form's .editing attribute to False now causes it to exit immediately,
even if a widget is still being edited.
Version 2.0pre67 fixes minor bugs.
Version 2.0pre65 fixes several bugs. All textboxes now honour the .hidden
attribute. The major side effect of this is that tree classes are now
easier to write.
Version 2.0pre64 extends multi-page support and includes revision to the
documentation.
Version 2.0pre63 adds initial support for multi-page forms. See documentation on the
FormMultiPage class for details.
Version 2.0pre57 fixes color support - it should now be possible to display
a terminal with a different color background. Text widgets have some
additional color options.
Version 2.0pre52 fixes compatibility with python2.6, 3.0 and 3.1. All other versions should be unaffected.
Version 2.0pre50 enables basic mouse support. Note that the Apple terminal does not handle mouse events correctly.
"""
)
| 44.395683
| 250
| 0.782531
|
61b329c60719060cb97b0371435619ab5d833da5
| 14,416
|
py
|
Python
|
src/dynamodb_encryption_sdk/structures.py
|
robin-aws/aws-dynamodb-encryption-python
|
25c7c3d80bfbe0deb661b4beb86f61b8b2f8545e
|
[
"Apache-2.0"
] | 57
|
2018-08-23T00:32:37.000Z
|
2022-03-24T20:59:01.000Z
|
src/dynamodb_encryption_sdk/structures.py
|
robin-aws/aws-dynamodb-encryption-python
|
25c7c3d80bfbe0deb661b4beb86f61b8b2f8545e
|
[
"Apache-2.0"
] | 91
|
2018-08-06T17:32:28.000Z
|
2022-03-31T10:23:02.000Z
|
src/dynamodb_encryption_sdk/structures.py
|
robin-aws/aws-dynamodb-encryption-python
|
25c7c3d80bfbe0deb661b4beb86f61b8b2f8545e
|
[
"Apache-2.0"
] | 38
|
2018-10-17T12:02:37.000Z
|
2022-02-13T02:53:14.000Z
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Common structures used by the DynamoDB Encryption Client."""
import copy
import attr
import six
from dynamodb_encryption_sdk.exceptions import InvalidArgumentError
from dynamodb_encryption_sdk.internal.identifiers import ReservedAttributes
from dynamodb_encryption_sdk.internal.validators import dictionary_validator, iterable_validator
from .identifiers import CryptoAction
try: # Python 3.5.0 and 3.5.1 have incompatible typing modules
from typing import Dict, Iterable, List, Optional, Set, Text # noqa pylint: disable=unused-import
except ImportError: # pragma: no cover
# We only actually need these imports when running the mypy checks
pass
__all__ = ("EncryptionContext", "AttributeActions", "TableIndex", "TableInfo")
def _validate_attribute_values_are_ddb_items(instance, attribute, value): # pylint: disable=unused-argument
"""Validate that dictionary values in ``value`` match the structure of DynamoDB JSON
items.
.. note::
We are not trying to validate the full structure of the item with this validator.
This is just meant to verify that the values roughly match the correct format.
"""
for data in value.values():
if len(list(data.values())) != 1:
raise TypeError('"{}" values do not look like DynamoDB items'.format(attribute.name))
@attr.s(init=False)
class EncryptionContext(object):
# pylint: disable=too-few-public-methods
"""Additional information about an encryption request.
:param str table_name: Table name
:param str partition_key_name: Name of primary index partition attribute
:param str sort_key_name: Name of primary index sort attribute
:param dict attributes: Plaintext item attributes as a DynamoDB JSON dictionary
:param dict material_description: Material description to use with this request
"""
table_name = attr.ib(
validator=attr.validators.optional(attr.validators.instance_of(six.string_types)), default=None
)
partition_key_name = attr.ib(
validator=attr.validators.optional(attr.validators.instance_of(six.string_types)), default=None
)
sort_key_name = attr.ib(
validator=attr.validators.optional(attr.validators.instance_of(six.string_types)), default=None
)
attributes = attr.ib(
repr=False,
validator=(dictionary_validator(six.string_types, dict), _validate_attribute_values_are_ddb_items),
default=attr.Factory(dict),
)
material_description = attr.ib(
validator=dictionary_validator(six.string_types, six.string_types),
converter=copy.deepcopy,
default=attr.Factory(dict),
)
def __init__(
self,
table_name=None, # type: Optional[Text]
partition_key_name=None, # type: Optional[Text]
sort_key_name=None, # type: Optional[Text]
attributes=None, # type: Optional[Dict[Text, Dict]]
material_description=None, # type: Optional[Dict[Text, Text]]
): # noqa=D107
# type: (...) -> None
# Workaround pending resolution of attrs/mypy interaction.
# https://github.com/python/mypy/issues/2088
# https://github.com/python-attrs/attrs/issues/215
if attributes is None:
attributes = {}
if material_description is None:
material_description = {}
self.table_name = table_name
self.partition_key_name = partition_key_name
self.sort_key_name = sort_key_name
self.attributes = attributes
self.material_description = material_description
attr.validate(self)
@attr.s(init=False)
class AttributeActions(object):
"""Configuration resource used to determine what action should be taken for a specific attribute.
:param CryptoAction default_action: Action to take if no specific action is defined in
``attribute_actions``
:param dict attribute_actions: Dictionary mapping attribute names to specific actions
"""
default_action = attr.ib(validator=attr.validators.instance_of(CryptoAction), default=CryptoAction.ENCRYPT_AND_SIGN)
attribute_actions = attr.ib(
validator=dictionary_validator(six.string_types, CryptoAction), default=attr.Factory(dict)
)
def __init__(
self,
default_action=CryptoAction.ENCRYPT_AND_SIGN, # type: Optional[CryptoAction]
attribute_actions=None, # type: Optional[Dict[Text, CryptoAction]]
): # noqa=D107
# type: (...) -> None
# Workaround pending resolution of attrs/mypy interaction.
# https://github.com/python/mypy/issues/2088
# https://github.com/python-attrs/attrs/issues/215
if attribute_actions is None:
attribute_actions = {}
self.default_action = default_action
self.attribute_actions = attribute_actions
attr.validate(self)
self.__attrs_post_init__()
def __attrs_post_init__(self):
# () -> None
"""Determine if any actions should ever be taken with this configuration and record that for reference."""
for attribute in ReservedAttributes:
if attribute.value in self.attribute_actions:
raise ValueError('No override behavior can be set for reserved attribute "{}"'.format(attribute.value))
# Enums are not hashable, but their names are unique
_unique_actions = {self.default_action.name}
_unique_actions.update({action.name for action in self.attribute_actions.values()})
no_actions = _unique_actions == {CryptoAction.DO_NOTHING.name}
self.take_no_actions = no_actions # attrs confuses pylint: disable=attribute-defined-outside-init
def action(self, attribute_name):
# (text) -> CryptoAction
"""Determine the correct :class:`CryptoAction` to apply to a supplied attribute based
on this config.
:param str attribute_name: Attribute for which to determine action
"""
return self.attribute_actions.get(attribute_name, self.default_action)
def copy(self):
# () -> AttributeActions
"""Return a new copy of this object."""
return AttributeActions(default_action=self.default_action, attribute_actions=self.attribute_actions.copy())
def set_index_keys(self, *keys):
"""Set the appropriate action for the specified indexed attribute names.
.. warning::
If you have already set a custom action for any of these attributes, this will
raise an error.
.. code::
Default Action -> Index Key Action
DO_NOTHING -> DO_NOTHING
SIGN_ONLY -> SIGN_ONLY
ENCRYPT_AND_SIGN -> SIGN_ONLY
:param str *keys: Attribute names to treat as indexed
:raises InvalidArgumentError: if a custom action was previously set for any specified
attributes
"""
for key in keys:
index_action = min(self.action(key), CryptoAction.SIGN_ONLY)
try:
if self.attribute_actions[key] is not index_action:
raise InvalidArgumentError(
'Cannot overwrite a previously requested action on indexed attribute: "{}"'.format(key)
)
except KeyError:
self.attribute_actions[key] = index_action
def contains_action(self, action):
# (CryptoAction) -> bool
"""Determine if the specified action is a possible action from this configuration.
:param CryptoAction action: Action to look for
"""
return action is self.default_action or action in self.attribute_actions.values()
def __add__(self, other):
# (AttributeActions) -> AttributeActions
"""Merge two AttributeActions objects into a new instance, applying the dominant
action in each discovered case.
"""
default_action = self.default_action + other.default_action
all_attributes = set(self.attribute_actions.keys()).union(set(other.attribute_actions.keys()))
attribute_actions = {}
for attribute in all_attributes:
attribute_actions[attribute] = max(self.action(attribute), other.action(attribute))
return AttributeActions(default_action=default_action, attribute_actions=attribute_actions)
@attr.s(init=False)
class TableIndex(object):
# pylint: disable=too-few-public-methods
"""Describes a table index.
:param str partition: Name of the partition attribute
:param str sort: Name of the sort attribute (optional)
"""
partition = attr.ib(validator=attr.validators.instance_of(six.string_types))
sort = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(six.string_types)), default=None)
def __init__(self, partition, sort=None): # noqa=D107
# type: (Text, Optional[Text]) -> None
# Workaround pending resolution of attrs/mypy interaction.
# https://github.com/python/mypy/issues/2088
# https://github.com/python-attrs/attrs/issues/215
self.partition = partition
self.sort = sort
attr.validate(self)
self.__attrs_post_init__()
def __attrs_post_init__(self):
"""Set the ``attributes`` attribute for ease of access later."""
self.attributes = set([self.partition]) # attrs confuses pylint: disable=attribute-defined-outside-init
if self.sort is not None:
self.attributes.add(self.sort)
@classmethod
def from_key_schema(cls, key_schema):
# type: (Iterable[Dict[Text, Text]]) -> TableIndex
"""Build a TableIndex from the key schema returned by DescribeTable.
.. code::
[
{
"KeyType": "HASH"|"RANGE",
"AttributeName": ""
},
]
:param list key_schema: KeySchema from DescribeTable response
:returns: New TableIndex that describes the provided schema
:rtype: TableIndex
"""
index = {key["KeyType"]: key["AttributeName"] for key in key_schema}
return cls(partition=index["HASH"], sort=index.get("RANGE", None))
@attr.s(init=False)
class TableInfo(object):
"""Describes a DynamoDB table.
:param str name: Table name
:param bool all_encrypting_secondary_indexes: Should we allow secondary index attributes to be encrypted?
:param TableIndex primary_index: Description of primary index
:param secondary_indexes: Set of TableIndex objects describing any secondary indexes
:type secondary_indexes: list(TableIndex)
"""
name = attr.ib(validator=attr.validators.instance_of(six.string_types))
_primary_index = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(TableIndex)), default=None)
_secondary_indexes = attr.ib(validator=attr.validators.optional(iterable_validator(list, TableIndex)), default=None)
def __init__(
self,
name, # type: Text
primary_index=None, # type: Optional[TableIndex]
secondary_indexes=None, # type: Optional[List[TableIndex]]
): # noqa=D107
# type: (...) -> None
# Workaround pending resolution of attrs/mypy interaction.
# https://github.com/python/mypy/issues/2088
# https://github.com/python-attrs/attrs/issues/215
self.name = name
self._primary_index = primary_index
self._secondary_indexes = secondary_indexes
attr.validate(self)
@property
def primary_index(self):
# type: () -> TableIndex
"""Return the primary TableIndex.
:returns: primary index description
:rtype: TableIndex
:raises AttributeError: if primary index is unknown
"""
if self._primary_index is None:
raise AttributeError("Indexes unknown. Run refresh_indexed_attributes")
return self._primary_index
@property
def secondary_indexes(self):
# type: () -> List[TableIndex]
"""Return the primary TableIndex.
:returns: secondary index descriptions
:rtype: TableIndex
:raises AttributeError: if secondary indexes are unknown
"""
if self._secondary_indexes is None:
raise AttributeError("Indexes unknown. Run refresh_indexed_attributes")
return self._secondary_indexes
def protected_index_keys(self):
# type: () -> Set[Text]
"""Provide a set containing the names of all indexed attributes that must not be encrypted."""
return self.primary_index.attributes
@property
def encryption_context_values(self):
# type: () -> Dict[Text, Text]
"""Build parameters needed to inform an EncryptionContext constructor about this table.
:rtype: dict
"""
values = {"table_name": self.name}
if self.primary_index is not None:
values.update(
{"partition_key_name": self.primary_index.partition, "sort_key_name": self.primary_index.sort}
)
return values
def refresh_indexed_attributes(self, client):
"""Use the provided boto3 DynamoDB client to determine all indexes for this table.
:param client: Pre-configured boto3 DynamoDB client
:type client: botocore.client.BaseClient
"""
table = client.describe_table(TableName=self.name)["Table"]
self._primary_index = TableIndex.from_key_schema(table["KeySchema"])
self._secondary_indexes = []
for group in ("LocalSecondaryIndexes", "GlobalSecondaryIndexes"):
try:
for index in table[group]:
self._secondary_indexes.append(TableIndex.from_key_schema(index["KeySchema"]))
except KeyError:
pass # Not all tables will have secondary indexes.
| 40.608451
| 120
| 0.676332
|
283f63bbe2faed5ef8c161dc156746ce3e623287
| 1,169
|
py
|
Python
|
pyapprox/sys_utilities.py
|
ConnectedSystems/pyapprox
|
4f405654c707cba83d211f327c0f0fdbc95efa29
|
[
"MIT"
] | 26
|
2019-12-16T02:21:15.000Z
|
2022-03-17T09:59:18.000Z
|
pyapprox/sys_utilities.py
|
ConnectedSystems/pyapprox
|
4f405654c707cba83d211f327c0f0fdbc95efa29
|
[
"MIT"
] | 9
|
2020-03-03T03:04:55.000Z
|
2021-08-19T22:50:42.000Z
|
pyapprox/sys_utilities.py
|
ConnectedSystems/pyapprox
|
4f405654c707cba83d211f327c0f0fdbc95efa29
|
[
"MIT"
] | 7
|
2020-03-02T03:49:17.000Z
|
2021-02-17T02:07:53.000Z
|
import sys, os
import pkg_resources
import importlib
import numpy as np
def trace_error_with_msg(msg, e: Exception):
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(msg)
print(f'Failed with error: {e}')
details = f"""
Error type: {exc_type}
file/location: {fname} | {exc_tb.tb_lineno}
"""
print(details)
def hash_array(array, decimals=None):
r"""
Hash an array for dictionary or set based lookup
Parameters
----------
array : np.ndarray
The integer array to hash
Returns
-------
key : integer
The hash value of the array
"""
#assert array.ndim==1
#array = np.ascontiguousarray(array)
#array.flags.writeable = False
# return hash(array.data)
if decimals is not None:
array = np.around(array, decimals)
# return hash(array.tostring())
return hash(array.tobytes())
def package_available(name):
pkg_available = True
try:
mod = importlib.import_module(name)
except (ModuleNotFoundError, ImportError):
pkg_available = False
return pkg_available
| 22.480769
| 64
| 0.644996
|
ce377857ff0550c9ba921f1ca78d79f68791948e
| 12,004
|
py
|
Python
|
python/ray/util/actor_pool.py
|
mgelbart/ray
|
4cec2286572e368a4bd64aae467751a384eff62d
|
[
"Apache-2.0"
] | 22
|
2018-05-08T05:52:34.000Z
|
2020-04-01T10:09:55.000Z
|
python/ray/util/actor_pool.py
|
mgelbart/ray
|
4cec2286572e368a4bd64aae467751a384eff62d
|
[
"Apache-2.0"
] | 51
|
2018-05-17T05:55:28.000Z
|
2020-03-18T06:49:49.000Z
|
python/ray/util/actor_pool.py
|
mgelbart/ray
|
4cec2286572e368a4bd64aae467751a384eff62d
|
[
"Apache-2.0"
] | 10
|
2018-04-27T10:50:59.000Z
|
2020-02-24T02:41:43.000Z
|
import ray
from ray.util.annotations import PublicAPI
@PublicAPI(stability="beta")
class ActorPool:
"""Utility class to operate on a fixed pool of actors.
Arguments:
actors (list): List of Ray actor handles to use in this pool.
Examples:
>>> import ray
>>> from ray.util.actor_pool import ActorPool
>>> @ray.remote # doctest: +SKIP
>>> class Actor: # doctest: +SKIP
... ... # doctest: +SKIP
>>> a1, a2 = Actor.remote(), Actor.remote() # doctest: +SKIP
>>> pool = ActorPool([a1, a2]) # doctest: +SKIP
>>> print(list(pool.map(lambda a, v: a.double.remote(v), # doctest: +SKIP
... [1, 2, 3, 4]))) # doctest: +SKIP
[2, 4, 6, 8]
"""
def __init__(self, actors):
# actors to be used
self._idle_actors = list(actors)
# get actor from future
self._future_to_actor = {}
# get future from index
self._index_to_future = {}
# next task to do
self._next_task_index = 0
# next task to return
self._next_return_index = 0
# next work depending when actors free
self._pending_submits = []
def map(self, fn, values):
"""Apply the given function in parallel over the actors and values.
This returns an ordered iterator that will return results of the map
as they finish. Note that you must iterate over the iterator to force
the computation to finish.
Arguments:
fn (func): Function that takes (actor, value) as argument and
returns an ObjectRef computing the result over the value. The
actor will be considered busy until the ObjectRef completes.
values (list): List of values that fn(actor, value) should be
applied to.
Returns:
Iterator over results from applying fn to the actors and values.
Examples:
>>> from ray.util.actor_pool import ActorPool
>>> pool = ActorPool(...) # doctest: +SKIP
>>> print(list(pool.map(lambda a, v: a.double.remote(v),
... [1, 2, 3, 4]))) # doctest: +SKIP
[2, 4, 6, 8]
"""
# Ignore/Cancel all the previous submissions
# by calling `has_next` and `gen_next` repeteadly.
while self.has_next():
try:
self.get_next(timeout=0)
except TimeoutError:
pass
for v in values:
self.submit(fn, v)
while self.has_next():
yield self.get_next()
def map_unordered(self, fn, values):
"""Similar to map(), but returning an unordered iterator.
This returns an unordered iterator that will return results of the map
as they finish. This can be more efficient that map() if some results
take longer to compute than others.
Arguments:
fn (func): Function that takes (actor, value) as argument and
returns an ObjectRef computing the result over the value. The
actor will be considered busy until the ObjectRef completes.
values (list): List of values that fn(actor, value) should be
applied to.
Returns:
Iterator over results from applying fn to the actors and values.
Examples:
>>> from ray.util.actor_pool import ActorPool
>>> pool = ActorPool(...) # doctest: +SKIP
>>> print(list(pool.map_unordered(lambda a, v: a.double.remote(v),
... [1, 2, 3, 4]))) # doctest: +SKIP
[6, 2, 4, 8]
"""
# Ignore/Cancel all the previous submissions
# by calling `has_next` and `gen_next_unordered` repeteadly.
while self.has_next():
try:
self.get_next_unordered(timeout=0)
except TimeoutError:
pass
for v in values:
self.submit(fn, v)
while self.has_next():
yield self.get_next_unordered()
def submit(self, fn, value):
"""Schedule a single task to run in the pool.
This has the same argument semantics as map(), but takes on a single
value instead of a list of values. The result can be retrieved using
get_next() / get_next_unordered().
Arguments:
fn (func): Function that takes (actor, value) as argument and
returns an ObjectRef computing the result over the value. The
actor will be considered busy until the ObjectRef completes.
value (object): Value to compute a result for.
Examples:
>>> from ray.util.actor_pool import ActorPool
>>> pool = ActorPool(...) # doctest: +SKIP
>>> pool.submit(lambda a, v: a.double.remote(v), 1) # doctest: +SKIP
>>> pool.submit(lambda a, v: a.double.remote(v), 2) # doctest: +SKIP
>>> print(pool.get_next(), pool.get_next()) # doctest: +SKIP
2, 4
"""
if self._idle_actors:
actor = self._idle_actors.pop()
future = fn(actor, value)
future_key = tuple(future) if isinstance(future, list) else future
self._future_to_actor[future_key] = (self._next_task_index, actor)
self._index_to_future[self._next_task_index] = future
self._next_task_index += 1
else:
self._pending_submits.append((fn, value))
def has_next(self):
"""Returns whether there are any pending results to return.
Returns:
True if there are any pending results not yet returned.
Examples:
>>> from ray.util.actor_pool import ActorPool
>>> pool = ActorPool(...) # doctest: +SKIP
>>> pool.submit(lambda a, v: a.double.remote(v), 1) # doctest: +SKIP
>>> print(pool.has_next()) # doctest: +SKIP
True
>>> print(pool.get_next()) # doctest: +SKIP
2
>>> print(pool.has_next()) # doctest: +SKIP
False
"""
return bool(self._future_to_actor)
def get_next(self, timeout=None):
"""Returns the next pending result in order.
This returns the next result produced by submit(), blocking for up to
the specified timeout until it is available.
Returns:
The next result.
Raises:
TimeoutError if the timeout is reached.
Examples:
>>> from ray.util.actor_pool import ActorPool
>>> pool = ActorPool(...) # doctest: +SKIP
>>> pool.submit(lambda a, v: a.double.remote(v), 1) # doctest: +SKIP
>>> print(pool.get_next()) # doctest: +SKIP
2
"""
if not self.has_next():
raise StopIteration("No more results to get")
if self._next_return_index >= self._next_task_index:
raise ValueError(
"It is not allowed to call get_next() after get_next_unordered()."
)
future = self._index_to_future[self._next_return_index]
if timeout is not None:
res, _ = ray.wait([future], timeout=timeout)
if not res:
raise TimeoutError("Timed out waiting for result")
del self._index_to_future[self._next_return_index]
self._next_return_index += 1
future_key = tuple(future) if isinstance(future, list) else future
i, a = self._future_to_actor.pop(future_key)
self._return_actor(a)
return ray.get(future)
def get_next_unordered(self, timeout=None):
"""Returns any of the next pending results.
This returns some result produced by submit(), blocking for up to
the specified timeout until it is available. Unlike get_next(), the
results are not always returned in same order as submitted, which can
improve performance.
Returns:
The next result.
Raises:
TimeoutError if the timeout is reached.
Examples:
>>> from ray.util.actor_pool import ActorPool
>>> pool = ActorPool(...) # doctest: +SKIP
>>> pool.submit(lambda a, v: a.double.remote(v), 1) # doctest: +SKIP
>>> pool.submit(lambda a, v: a.double.remote(v), 2) # doctest: +SKIP
>>> print(pool.get_next_unordered()) # doctest: +SKIP
4
>>> print(pool.get_next_unordered()) # doctest: +SKIP
2
"""
if not self.has_next():
raise StopIteration("No more results to get")
# TODO(ekl) bulk wait for performance
res, _ = ray.wait(list(self._future_to_actor), num_returns=1, timeout=timeout)
if res:
[future] = res
else:
raise TimeoutError("Timed out waiting for result")
i, a = self._future_to_actor.pop(future)
self._return_actor(a)
del self._index_to_future[i]
self._next_return_index = max(self._next_return_index, i + 1)
return ray.get(future)
def _return_actor(self, actor):
self._idle_actors.append(actor)
if self._pending_submits:
self.submit(*self._pending_submits.pop(0))
def has_free(self):
"""Returns whether there are any idle actors available.
Returns:
True if there are any idle actors and no pending submits.
Examples:
>>> @ray.remote # doctest: +SKIP
>>> class Actor: # doctest: +SKIP
... ... # doctest: +SKIP
>>> a1 = Actor.remote() # doctest: +SKIP
>>> pool = ActorPool(a1) # doctest: +SKIP
>>> pool.submit(lambda a, v: a.double.remote(v), 1) # doctest: +SKIP
>>> print(pool.has_free()) # doctest: +SKIP
False
>>> print(pool.get_next()) # doctest: +SKIP
2
>>> print(pool.has_free()) # doctest: +SKIP
True
"""
return len(self._idle_actors) > 0 and len(self._pending_submits) == 0
def pop_idle(self):
"""Removes an idle actor from the pool.
Returns:
An idle actor if one is available.
None if no actor was free to be removed.
Examples:
>>> @ray.remote # doctest: +SKIP
>>> class Actor: # doctest: +SKIP
... ... # doctest: +SKIP
>>> a1 = Actor.remote() # doctest: +SKIP
>>> pool = ActorPool([a1]) # doctest: +SKIP
>>> pool.submit(lambda a, v: a.double.remote(v), 1) # doctest: +SKIP
>>> print(pool.pop_idle()) # doctest: +SKIP
None
>>> print(pool.get_next()) # doctest: +SKIP
2
>>> print(pool.pop_idle()) # doctest: +SKIP
<ptr to a1>
"""
if self.has_free():
return self._idle_actors.pop()
return None
def push(self, actor):
"""Pushes a new actor into the current list of idle actors.
Examples:
>>> @ray.remote # doctest: +SKIP
>>> class Actor: # doctest: +SKIP
... ... # doctest: +SKIP
>>> a1, b1 = Actor.remote(), Actor.remote() # doctest: +SKIP
>>> pool = ActorPool([a1]) # doctest: +SKIP
>>> pool.submit(lambda a, v: a.double.remote(v), 1) # doctest: +SKIP
>>> print(pool.get_next()) # doctest: +SKIP
2
>>> pool2 = ActorPool([b1]) # doctest: +SKIP
>>> pool2.push(pool.pop_idle()) # doctest: +SKIP
"""
busy_actors = []
if self._future_to_actor.values():
_, busy_actors = zip(*self._future_to_actor.values())
if actor in self._idle_actors or actor in busy_actors:
raise ValueError("Actor already belongs to current ActorPool")
else:
self._idle_actors.append(actor)
| 37.630094
| 86
| 0.559064
|
174942b28e48f699f26a4c51d2add9385d44e6c5
| 213
|
py
|
Python
|
dayrolling.py
|
ClownMonster/StockPrediction_MLmodel_python
|
ea5562ce377422f072b6907e7547a44483d1e81e
|
[
"MIT"
] | null | null | null |
dayrolling.py
|
ClownMonster/StockPrediction_MLmodel_python
|
ea5562ce377422f072b6907e7547a44483d1e81e
|
[
"MIT"
] | null | null | null |
dayrolling.py
|
ClownMonster/StockPrediction_MLmodel_python
|
ea5562ce377422f072b6907e7547a44483d1e81e
|
[
"MIT"
] | null | null | null |
'''
Prints the data rolling back of 7days from the day need to visualize
'''
from ProcessedDataframe import trainData
def mean_data():
train_df = trainData()
d = train_df.rolling(7).mean()
return d
| 19.363636
| 69
| 0.704225
|
93f393308f448c848b1d173bb51bfb0997d32ef1
| 2,074
|
py
|
Python
|
{{cookiecutter.project_name}}/{{cookiecutter.project_name}}_api/api/schemas.py
|
frank2411/cookiecutter_flasktemplate
|
fc80827f0f7e7b87679790c8c1d9094518576b5b
|
[
"Apache-2.0"
] | null | null | null |
{{cookiecutter.project_name}}/{{cookiecutter.project_name}}_api/api/schemas.py
|
frank2411/cookiecutter_flasktemplate
|
fc80827f0f7e7b87679790c8c1d9094518576b5b
|
[
"Apache-2.0"
] | null | null | null |
{{cookiecutter.project_name}}/{{cookiecutter.project_name}}_api/api/schemas.py
|
frank2411/cookiecutter_flasktemplate
|
fc80827f0f7e7b87679790c8c1d9094518576b5b
|
[
"Apache-2.0"
] | null | null | null |
import uuid
from marshmallow_sqlalchemy.fields import Related
from sqlalchemy.orm.exc import NoResultFound
class FixedRelated(Related): # pragma: no cover
default_error_messages = {
"invalid": "Could not deserialize related value {value!r}; "
"expected a dictionary with keys {keys!r}",
"not_found": "Related Object doesn't exist in DB",
"invalid_uuid": "Not a valid UUID."
}
def _deserialize(self, value, *args, **kwargs):
"""Deserialize a serialized value to a model instance.
If the parent schema is transient, create a new (transient) instance.
Otherwise, attempt to find an existing instance in the database.
:param value: The value to deserialize.
"""
if not isinstance(value, dict):
if len(self.related_keys) != 1:
keys = [prop.key for prop in self.related_keys]
raise self.make_error("invalid", value=value, keys=keys)
value = {self.related_keys[0].key: value}
if self.transient:
return self.related_model(**value)
if self.related_model.id.type.__str__() == "UUID":
try:
uuid.UUID(value["id"])
except (ValueError, AttributeError, TypeError) as error:
raise self.make_error("invalid_uuid") from error
try:
result = self._get_existing_instance(
self.session.query(self.related_model), value
)
except NoResultFound:
# The related-object DNE in the DB, but we still want to deserialize it
# ...perhaps we want to add it to the DB later
raise self.make_error("not_found")
return result
def _serialize(self, value, attr, obj):
ret = {prop.key: getattr(value, prop.key, None) for prop in self.related_keys}
# Little hack to prevent errors in uuid deserialization
if isinstance(ret["id"], uuid.UUID):
ret["id"] = str(ret["id"])
return ret if len(ret) > 1 else list(ret.values())[0]
| 37.709091
| 86
| 0.613308
|
29e9f493ea18f72f4489a55151f43dbc9521b162
| 743
|
py
|
Python
|
257. Binary Tree Paths.py
|
patrick-luo/Leet-Code
|
989ec20c1069ce93e1d0e9ae4a4dfc59b1b1622a
|
[
"MIT"
] | null | null | null |
257. Binary Tree Paths.py
|
patrick-luo/Leet-Code
|
989ec20c1069ce93e1d0e9ae4a4dfc59b1b1622a
|
[
"MIT"
] | null | null | null |
257. Binary Tree Paths.py
|
patrick-luo/Leet-Code
|
989ec20c1069ce93e1d0e9ae4a4dfc59b1b1622a
|
[
"MIT"
] | null | null | null |
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def binaryTreePaths(self, root):
"""
:type root: TreeNode
:rtype: List[str]
"""
def dfs(root, path, res):
if root is None:
return
path.append(str(root.val))
if root.left is None and root.right is None:
res.append('->'.join(path))
else:
dfs(root.left, path, res)
dfs(root.right, path, res)
path.pop()
res = list()
dfs(root, list(), res)
return res
| 26.535714
| 56
| 0.475101
|
25409d1814f7cde0a2f0b58785c4d633be3a806e
| 557
|
py
|
Python
|
koroviev/utils.py
|
Egnod/koroviev
|
aea948c54177357ae7e2101541221d2d907f6aeb
|
[
"MIT"
] | null | null | null |
koroviev/utils.py
|
Egnod/koroviev
|
aea948c54177357ae7e2101541221d2d907f6aeb
|
[
"MIT"
] | 2
|
2020-09-13T21:12:27.000Z
|
2020-09-13T21:13:44.000Z
|
koroviev/utils.py
|
Egnod/koroviev
|
aea948c54177357ae7e2101541221d2d907f6aeb
|
[
"MIT"
] | null | null | null |
from functools import wraps
from typing import Callable
from termcolor import cprint
def config_file_required(func: Callable) -> Callable:
"""Decorator for cli methods with required exists config file in
project."""
@wraps(func)
def wrapper(self, *args, **kwargs) -> None:
if not self._cfg_exist:
cprint(
"Error: config file does not exists. It looks like the project is not initialized.",
"red",
)
else:
func(self, *args, **kwargs)
return wrapper
| 25.318182
| 100
| 0.610413
|
568b4976bd8436dcb87f814d5247ce80a282e67a
| 838
|
py
|
Python
|
tiers/helpers.py
|
appsembler/django-tiers
|
7c59be2a31a767e1917bc4296c1c986427e35b8a
|
[
"MIT"
] | 2
|
2017-04-10T19:50:35.000Z
|
2021-08-13T09:00:07.000Z
|
tiers/helpers.py
|
appsembler/django-tiers
|
7c59be2a31a767e1917bc4296c1c986427e35b8a
|
[
"MIT"
] | 28
|
2017-03-07T19:47:15.000Z
|
2022-03-30T13:12:26.000Z
|
tiers/helpers.py
|
appsembler/django-tiers
|
7c59be2a31a767e1917bc4296c1c986427e35b8a
|
[
"MIT"
] | null | null | null |
from .app_settings import settings
def is_equal_or_sub_url(request_url, checked_url):
"""Stupidly simple method to check for URLs equality"""
if request_url == checked_url:
return True
request_url = request_url.rstrip('/')
checked_url = checked_url.rstrip('/')
return request_url.startswith(checked_url)
def is_white_listed_url(url):
"""Checks if the URL is whitelisted for non-redirect."""
if url == '/':
# Homepage is not whitelisted.
return False
white_listed_urls = settings.redirect_white_list()
if settings.expired_redirect_url():
white_listed_urls.append(settings.expired_redirect_url())
for white_listed_url in white_listed_urls:
if is_equal_or_sub_url(request_url=url, checked_url=white_listed_url):
return True
return False
| 28.896552
| 78
| 0.711217
|
6c31da8ae5a2a72ce531b9e961a8b1da09e37fd5
| 893
|
py
|
Python
|
examples/frameworks/fire/fire_grouping_cmd.py
|
thepycoder/clearml
|
717edba8c2b39fb7486bd2aba9ca0294f309b4c3
|
[
"Apache-2.0"
] | 2,097
|
2019-06-11T14:36:25.000Z
|
2020-12-21T03:52:59.000Z
|
examples/frameworks/fire/fire_grouping_cmd.py
|
thepycoder/clearml
|
717edba8c2b39fb7486bd2aba9ca0294f309b4c3
|
[
"Apache-2.0"
] | 247
|
2019-06-11T15:10:26.000Z
|
2020-12-21T17:34:32.000Z
|
examples/frameworks/fire/fire_grouping_cmd.py
|
thepycoder/clearml
|
717edba8c2b39fb7486bd2aba9ca0294f309b4c3
|
[
"Apache-2.0"
] | 256
|
2019-06-11T14:36:28.000Z
|
2020-12-18T08:32:47.000Z
|
# ClearML - Example of Python Fire integration, with commands grouped inside classes
#
from clearml import Task
import fire
class Other(object):
def status(self):
return "Other"
class IngestionStage(object):
def __init__(self):
self.other = Other()
def run(self):
return "Ingesting! Nom nom nom..."
def hello(self, hello_str):
return hello_str
class DigestionStage(object):
def run(self, volume=1):
return " ".join(["Burp!"] * volume)
def status(self):
return "Satiated."
class Pipeline(object):
def __init__(self):
self.ingestion = IngestionStage()
self.digestion = DigestionStage()
def run(self):
self.ingestion.run()
self.digestion.run()
if __name__ == "__main__":
Task.init(project_name="examples", task_name="Fire grouping command")
fire.Fire(Pipeline)
| 19.844444
| 84
| 0.641657
|
9ce54c4c1c026777bee52e1b27565a7d0d969d1a
| 186
|
py
|
Python
|
suave/urls.py
|
radiosilence/django-suave
|
19eb23de0589bdce68f91d580c53da179835ed90
|
[
"MIT"
] | null | null | null |
suave/urls.py
|
radiosilence/django-suave
|
19eb23de0589bdce68f91d580c53da179835ed90
|
[
"MIT"
] | 1
|
2020-09-25T07:00:39.000Z
|
2020-09-28T06:51:09.000Z
|
suave/urls.py
|
radiosilence/django-suave
|
19eb23de0589bdce68f91d580c53da179835ed90
|
[
"MIT"
] | null | null | null |
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('suave.views',
url(r'^(?P<url>[-\w\/]+)/$', 'page', name='page'),
url(r'^$', 'page', name='page'),
)
| 23.25
| 54
| 0.586022
|
c2906c8a3833780f8269a7eabf7eda1e474622f8
| 5,694
|
py
|
Python
|
pywikibot/families/wiktionary_family.py
|
xqt/pwb
|
9a4fe27138f32952e533256195849d05855df0b0
|
[
"MIT"
] | null | null | null |
pywikibot/families/wiktionary_family.py
|
xqt/pwb
|
9a4fe27138f32952e533256195849d05855df0b0
|
[
"MIT"
] | 1
|
2021-12-08T16:29:41.000Z
|
2021-12-08T16:29:41.000Z
|
pywikibot/families/wiktionary_family.py
|
xqt/pwb
|
9a4fe27138f32952e533256195849d05855df0b0
|
[
"MIT"
] | 2
|
2022-01-04T04:10:38.000Z
|
2022-01-04T04:18:18.000Z
|
"""Family module for Wiktionary."""
#
# (C) Pywikibot team, 2005-2022
#
# Distributed under the terms of the MIT license.
#
from pywikibot import family
from pywikibot.tools import classproperty
# The Wikimedia family that is known as Wiktionary
class Family(family.SubdomainFamily, family.WikimediaFamily):
"""Family class for Wiktionary."""
name = 'wiktionary'
closed_wikis = [
# https://noc.wikimedia.org/conf/highlight.php?file=dblists/closed.dblist
'aa', 'ab', 'ak', 'as', 'av', 'bh', 'bi', 'bm', 'bo', 'ch', 'cr', 'dz',
'ik', 'mh', 'pi', 'rm', 'rn', 'sc', 'sn', 'to', 'tw', 'xh', 'yo', 'za',
]
removed_wikis = [
# https://noc.wikimedia.org/conf/highlight.php?file=dblists/deleted.dblist
'als', 'ba', 'dk', 'mo', 'tlh', 'tokipona',
]
languages_by_size = [
'en', 'fr', 'mg', 'zh', 'ru', 'de', 'es', 'sh', 'sv', 'nl', 'el', 'pl',
'ku', 'lt', 'it', 'ca', 'fi', 'ta', 'hu', 'tr', 'io', 'hy', 'ko', 'ja',
'pt', 'kn', 'vi', 'sr', 'th', 'hi', 'ro', 'no', 'et', 'id', 'cs', 'ml',
'my', 'uz', 'li', 'or', 'eo', 'te', 'fa', 'gl', 'skr', 'ar', 'oc',
'jv', 'az', 'eu', 'uk', 'br', 'ast', 'da', 'is', 'lo', 'simple', 'bn',
'la', 'hr', 'fj', 'tg', 'ky', 'sk', 'bg', 'wa', 'sg', 'ur', 'shn',
'ps', 'cy', 'vo', 'sl', 'om', 'he', 'af', 'zh-min-nan', 'mnw', 'scn',
'tl', 'pa', 'sw', 'fy', 'lmo', 'nn', 'ka', 'lv', 'ms', 'min', 'sq',
'nds', 'co', 'mn', 'pnb', 'lb', 'bs', 'nah', 'yue', 'sa', 'kk', 'km',
'diq', 'vec', 'be', 'tk', 'mk', 'sm', 'nia', 'hsb', 'ks', 'shy', 'su',
'gd', 'ga', 'bcl', 'mr', 'gom', 'an', 'wo', 'mni', 'ia', 'ang', 'mt',
'fo', 'sd', 'tt', 'gn', 'ie', 'so', 'csb', 'ug', 'si', 'st', 'roa-rup',
'hif', 'tpi', 'kl', 'zu', 'ha', 'mi', 'ay', 'jbo', 'yi', 'ln', 'gu',
'na', 'gv', 'kw', 'am', 'ne', 'rw', 'ts', 'qu', 'ss', 'iu', 'chr',
'dv', 'ti', 'tn',
]
category_redirect_templates = {
'_default': (),
'ar': ('تحويل تصنيف',),
'zh': ('分类重定向',),
}
# Global bot allowed languages on
# https://meta.wikimedia.org/wiki/BPI#Current_implementation
# & https://meta.wikimedia.org/wiki/Special:WikiSets/2
cross_allowed = [
'af', 'am', 'an', 'ang', 'ar', 'ast', 'ay', 'az', 'be', 'bg', 'bn',
'br', 'bs', 'ca', 'chr', 'co', 'cs', 'csb', 'cy', 'da', 'dv', 'el',
'eo', 'es', 'et', 'eu', 'fa', 'fi', 'fj', 'fo', 'fy', 'ga', 'gd', 'gl',
'gn', 'gu', 'gv', 'ha', 'hsb', 'hu', 'hy', 'ia', 'id', 'ie', 'io',
'iu', 'jbo', 'jv', 'ka', 'kk', 'kl', 'km', 'kn', 'ko', 'ks', 'ku',
'kw', 'ky', 'la', 'lb', 'ln', 'lo', 'lt', 'lv', 'mg', 'mi', 'mk', 'ml',
'mn', 'ms', 'mt', 'my', 'na', 'nah', 'nds', 'ne', 'nl', 'nn', 'no',
'oc', 'om', 'or', 'pa', 'pnb', 'ps', 'pt', 'qu', 'roa-rup', 'rw', 'sa',
'scn', 'sd', 'sg', 'sh', 'si', 'simple', 'sk', 'sl', 'sm', 'so', 'sq',
'sr', 'ss', 'st', 'su', 'sv', 'sw', 'ta', 'te', 'tg', 'th', 'ti', 'tk',
'tl', 'tn', 'tpi', 'tr', 'ts', 'tt', 'ug', 'uk', 'ur', 'uz', 'vec',
'vi', 'vo', 'wa', 'wo', 'yi', 'zh', 'zh-min-nan', 'zu',
]
# Which languages have a special order for putting interlanguage links,
# and what order is it? If a language is not in interwiki_putfirst,
# alphabetical order on language code is used. For languages that are in
# interwiki_putfirst, interwiki_putfirst is checked first, and
# languages are put in the order given there. All other languages are
# put after those, in code-alphabetical order.
alphabetic_sv = [
'aa', 'af', 'ak', 'als', 'an', 'roa-rup', 'ast', 'gn', 'ay', 'az',
'id', 'ms', 'bm', 'zh-min-nan', 'jv', 'su', 'mt', 'bi', 'bo', 'bs',
'br', 'ca', 'cs', 'ch', 'sn', 'co', 'za', 'cy', 'da', 'de', 'na', 'mh',
'et', 'ang', 'en', 'es', 'eo', 'eu', 'to', 'fr', 'fy', 'fo', 'ga',
'gv', 'sm', 'gd', 'gl', 'hr', 'io', 'ia', 'ie', 'ik', 'xh', 'is', 'zu',
'it', 'kl', 'csb', 'kw', 'rw', 'rn', 'sw', 'ky', 'ku', 'la', 'lv',
'lb', 'lt', 'li', 'ln', 'jbo', 'hu', 'mg', 'mi', 'mo', 'my', 'fj',
'nah', 'nl', 'cr', 'no', 'nn', 'hsb', 'oc', 'om', 'ug', 'uz', 'nds',
'pl', 'pt', 'ro', 'rm', 'qu', 'sg', 'sc', 'st', 'tn', 'sq', 'scn',
'simple', 'ss', 'sk', 'sl', 'so', 'sh', 'fi', 'sv', 'tl', 'tt', 'vi',
'tpi', 'tr', 'tw', 'vo', 'wa', 'wo', 'ts', 'yo', 'el', 'av', 'ab',
'ba', 'be', 'bg', 'mk', 'mn', 'ru', 'sr', 'tg', 'uk', 'kk', 'hy', 'yi',
'he', 'ur', 'ar', 'tk', 'sd', 'fa', 'ha', 'ps', 'dv', 'ks', 'ne', 'pi',
'bh', 'mr', 'sa', 'hi', 'as', 'bn', 'pa', 'pnb', 'gu', 'or', 'ta',
'te', 'kn', 'ml', 'si', 'th', 'lo', 'dz', 'ka', 'ti', 'am', 'chr',
'iu', 'km', 'zh', 'ja', 'ko', 'shn',
]
@classproperty
def interwiki_putfirst(cls):
cls.interwiki_putfirst = {
'da': cls.alphabetic,
'en': cls.alphabetic,
'et': cls.alphabetic,
'fi': cls.alphabetic,
'fy': cls.fyinterwiki,
'he': ['en'],
'hu': ['en'],
'ms': cls.alphabetic_revised,
'pl': cls.alphabetic_revised,
'sv': cls.alphabetic_sv,
'simple': cls.alphabetic,
}
return cls.interwiki_putfirst
interwiki_on_one_line = ['pl']
interwiki_attop = ['pl']
# Subpages for documentation.
# TODO: List is incomplete, to be completed for missing languages.
doc_subpages = {
'_default': (('/doc', ),
['en']
),
'ar': ('/شرح', '/doc'),
'sr': ('/док', ),
}
| 44.834646
| 82
| 0.429575
|
b8e7cd240993cbbcd8fa96eba4a15e7823668348
| 218
|
py
|
Python
|
Curso Python/Mundo 1/Modulo2/Desafios/Desafios 2/des006.py
|
catabimbas/Curso-Python
|
72549952db77fa9b0ea3746b83f94592e3fdeb30
|
[
"MIT"
] | null | null | null |
Curso Python/Mundo 1/Modulo2/Desafios/Desafios 2/des006.py
|
catabimbas/Curso-Python
|
72549952db77fa9b0ea3746b83f94592e3fdeb30
|
[
"MIT"
] | null | null | null |
Curso Python/Mundo 1/Modulo2/Desafios/Desafios 2/des006.py
|
catabimbas/Curso-Python
|
72549952db77fa9b0ea3746b83f94592e3fdeb30
|
[
"MIT"
] | null | null | null |
numbase = int(input('Digite um valor: '))
duble = numbase * 2
triple = numbase * 3
rq = numbase ** 0.5
print('O dobro do valor: {} \nO triplo do valor: {} \nA raiz quadrada do valor: {:.2f}'.format(duble, triple, rq))
| 36.333333
| 114
| 0.646789
|
e9b4672409bab981632228bb65e76e1ec77b7c67
| 2,254
|
py
|
Python
|
Solutions/Problem_061.py
|
PraneethJain/Project-Euler
|
54fe34da444803ea55c49e4a4cda3ad6d4bca3b8
|
[
"MIT"
] | 2
|
2022-03-11T21:31:52.000Z
|
2022-03-11T21:37:14.000Z
|
Solutions/Problem_061.py
|
PraneethJain/Project-Euler-100
|
54fe34da444803ea55c49e4a4cda3ad6d4bca3b8
|
[
"MIT"
] | null | null | null |
Solutions/Problem_061.py
|
PraneethJain/Project-Euler-100
|
54fe34da444803ea55c49e4a4cda3ad6d4bca3b8
|
[
"MIT"
] | 1
|
2022-03-07T12:55:36.000Z
|
2022-03-07T12:55:36.000Z
|
from time import time
def triangular_check(t: int) -> bool:
n = ((8 * t + 1) ** 0.5 - 1) / 2
return n == int(n)
def square_check(s: int) -> bool:
n = s**0.5
return n == int(n)
def pentagonal_check(p: int) -> bool:
n = (1 + (24 * p + 1) ** 0.5) / 6
return n == int(n)
def hexagonal_check(h: int) -> bool:
n = (1 + (8 * h + 1) ** 0.5) / 4
return n == int(n)
def heptagonal_check(h: int) -> bool:
n = (3 + (40 * h + 9) ** 0.5) / 10
return n == int(n)
def octagonal_check(o: int) -> bool:
n = (2 + (12 * o + 4) ** 0.5) / 6
return n == int(n)
t1 = time()
octagonal_nums = [i for i in range(10**3, 10**4) if octagonal_check(i)]
all_set = set(
[
i
for i in range(10**3, 10**4)
if any(
[
triangular_check(i),
square_check(i),
pentagonal_check(i),
hexagonal_check(i),
heptagonal_check(i),
octagonal_check(i),
]
)
]
)
listoflists = []
for n1 in octagonal_nums:
for n2 in all_set:
if str(n1)[-2:] == str(n2)[:2]:
for n3 in all_set:
if str(n2)[-2:] == str(n3)[:2]:
for n4 in all_set:
if str(n3)[-2:] == str(n4)[:2]:
for n5 in all_set:
if str(n4)[-2:] == str(n5)[:2] and str(n5)[2] != "0":
n6 = int(str(n5)[-2:] + str(n1)[:2])
listoflists.append([n1, n2, n3, n4, n5, n6])
for L in listoflists:
ans = 0
for ele in L:
if heptagonal_check(ele):
ans += ele
L.remove(ele)
for ele in L:
if hexagonal_check(ele):
ans += ele
L.remove(ele)
for ele in L:
if pentagonal_check(ele):
ans += ele
L.remove(ele)
for ele in L:
if square_check(ele):
ans += ele
L.remove(ele)
for ele in L:
if triangular_check(ele):
ans += ele
L.remove(ele)
if len(L) == 1:
print(ans + L[0])
print(f"Process completed in {time()-t1}s")
raise SystemExit
| 25.044444
| 85
| 0.43567
|
dea183ba25f7dec8fcde5b8cc82c1e977ac4e87f
| 19,134
|
py
|
Python
|
python/ccxt/coinone.py
|
KaceyBolman/ccxt
|
d34a0651b209ac77453f05c4ce31883f0cd2d6b8
|
[
"MIT"
] | 1
|
2018-07-31T12:27:28.000Z
|
2018-07-31T12:27:28.000Z
|
python/ccxt/coinone.py
|
rerefreshing/ccxt
|
7c50f338dcb282c0aee4d69a1ac4ca47255fdf15
|
[
"MIT"
] | null | null | null |
python/ccxt/coinone.py
|
rerefreshing/ccxt
|
7c50f338dcb282c0aee4d69a1ac4ca47255fdf15
|
[
"MIT"
] | 2
|
2019-03-14T15:17:46.000Z
|
2019-09-08T19:26:04.000Z
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import base64
import hashlib
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import ExchangeNotAvailable
class coinone (Exchange):
def describe(self):
return self.deep_extend(super(coinone, self).describe(), {
'id': 'coinone',
'name': 'CoinOne',
'countries': ['KR'], # Korea
'rateLimit': 667,
'version': 'v2',
'has': {
'CORS': False,
'createMarketOrder': False,
'fetchTickers': True,
'fetchOrder': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/38003300-adc12fba-323f-11e8-8525-725f53c4a659.jpg',
'api': 'https://api.coinone.co.kr',
'www': 'https://coinone.co.kr',
'doc': 'https://doc.coinone.co.kr',
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'api': {
'public': {
'get': [
'orderbook/',
'trades/',
'ticker/',
],
},
'private': {
'post': [
'account/btc_deposit_address/',
'account/balance/',
'account/daily_balance/',
'account/user_info/',
'account/virtual_account/',
'order/cancel_all/',
'order/cancel/',
'order/limit_buy/',
'order/limit_sell/',
'order/complete_orders/',
'order/limit_orders/',
'order/order_info/',
'transaction/auth_number/',
'transaction/history/',
'transaction/krw/history/',
'transaction/btc/',
'transaction/coin/',
],
},
},
'markets': {
'BCH/KRW': {'id': 'bch', 'symbol': 'BCH/KRW', 'base': 'BCH', 'quote': 'KRW', 'baseId': 'bch', 'quoteId': 'krw'},
'BTC/KRW': {'id': 'btc', 'symbol': 'BTC/KRW', 'base': 'BTC', 'quote': 'KRW', 'baseId': 'btc', 'quoteId': 'krw'},
'BTG/KRW': {'id': 'btg', 'symbol': 'BTG/KRW', 'base': 'BTG', 'quote': 'KRW', 'baseId': 'btg', 'quoteId': 'krw'},
'ETC/KRW': {'id': 'etc', 'symbol': 'ETC/KRW', 'base': 'ETC', 'quote': 'KRW', 'baseId': 'etc', 'quoteId': 'krw'},
'ETH/KRW': {'id': 'eth', 'symbol': 'ETH/KRW', 'base': 'ETH', 'quote': 'KRW', 'baseId': 'eth', 'quoteId': 'krw'},
'IOTA/KRW': {'id': 'iota', 'symbol': 'IOTA/KRW', 'base': 'IOTA', 'quote': 'KRW', 'baseId': 'iota', 'quoteId': 'krw'},
'LTC/KRW': {'id': 'ltc', 'symbol': 'LTC/KRW', 'base': 'LTC', 'quote': 'KRW', 'baseId': 'ltc', 'quoteId': 'krw'},
'OMG/KRW': {'id': 'omg', 'symbol': 'OMG/KRW', 'base': 'OMG', 'quote': 'KRW', 'baseId': 'omg', 'quoteId': 'krw'},
'QTUM/KRW': {'id': 'qtum', 'symbol': 'QTUM/KRW', 'base': 'QTUM', 'quote': 'KRW', 'baseId': 'qtum', 'quoteId': 'krw'},
'XRP/KRW': {'id': 'xrp', 'symbol': 'XRP/KRW', 'base': 'XRP', 'quote': 'KRW', 'baseId': 'xrp', 'quoteId': 'krw'},
'EOS/KRW': {'id': 'eos', 'symbol': 'EOS/KRW', 'base': 'EOS', 'quote': 'KRW', 'baseId': 'eos', 'quoteId': 'krw'},
'DATA/KRW': {'id': 'data', 'symbol': 'DATA/KRW', 'base': 'DATA', 'quote': 'KRW', 'baseId': 'data', 'quoteId': 'krw'},
'ZIL/KRW': {'id': 'zil', 'symbol': 'ZIL/KRW', 'base': 'ZIL', 'quote': 'KRW', 'baseId': 'zil', 'quoteId': 'krw'},
'KNC/KRW': {'id': 'knc', 'symbol': 'KNC/KRW', 'base': 'KNC', 'quote': 'KRW', 'baseId': 'knc', 'quoteId': 'krw'},
'ZRX/KRW': {'id': 'zrx', 'symbol': 'ZRX/KRW', 'base': 'ZRX', 'quote': 'KRW', 'baseId': 'zrx', 'quoteId': 'krw'},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': 0.001,
'maker': 0.001,
'tiers': {
'taker': [
[0, 0.001],
[100000000, 0.0009],
[1000000000, 0.0008],
[5000000000, 0.0007],
[10000000000, 0.0006],
[20000000000, 0.0005],
[30000000000, 0.0004],
[40000000000, 0.0003],
[50000000000, 0.0002],
],
'maker': [
[0, 0.001],
[100000000, 0.0008],
[1000000000, 0.0006],
[5000000000, 0.0004],
[10000000000, 0.0002],
[20000000000, 0],
[30000000000, 0],
[40000000000, 0],
[50000000000, 0],
],
},
},
},
'exceptions': {
'405': ExchangeNotAvailable,
'104': OrderNotFound,
},
})
def fetch_balance(self, params={}):
response = self.privatePostAccountBalance()
result = {'info': response}
balances = self.omit(response, [
'errorCode',
'result',
'normalWallets',
])
ids = list(balances.keys())
for i in range(0, len(ids)):
id = ids[i]
balance = balances[id]
code = id.upper()
if id in self.currencies_by_id:
code = self.currencies_by_id[id]['code']
free = float(balance['avail'])
total = float(balance['balance'])
used = total - free
account = {
'free': free,
'used': used,
'total': total,
}
result[code] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, limit=None, params={}):
market = self.market(symbol)
response = self.publicGetOrderbook(self.extend({
'currency': market['id'],
'format': 'json',
}, params))
return self.parse_order_book(response, None, 'bid', 'ask', 'price', 'qty')
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetTicker(self.extend({
'currency': 'all',
'format': 'json',
}, params))
result = {}
tickers = response
ids = list(tickers.keys())
for i in range(0, len(ids)):
id = ids[i]
symbol = id
market = None
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
ticker = tickers[id]
result[symbol] = self.parse_ticker(ticker, market)
return result
def fetch_ticker(self, symbol, params={}):
market = self.market(symbol)
response = self.publicGetTicker(self.extend({
'currency': market['id'],
'format': 'json',
}, params))
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
last = self.safe_float(ticker, 'last')
previousClose = self.safe_float(ticker, 'yesterday_last')
change = None
if last is not None and previousClose is not None:
change = previousClose - last
symbol = market['symbol'] if (market is not None) else None
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': None,
'bidVolume': None,
'ask': None,
'askVolume': None,
'vwap': None,
'open': self.safe_float(ticker, 'first'),
'close': last,
'last': last,
'previousClose': previousClose,
'change': change,
'percentage': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'volume'),
'quoteVolume': None,
'info': ticker,
}
def parse_trade(self, trade, market=None):
timestamp = int(trade['timestamp']) * 1000
symbol = market['symbol'] if (market is not None) else None
return {
'id': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': None,
'symbol': symbol,
'type': None,
'side': None,
'price': self.safe_float(trade, 'price'),
'amount': self.safe_float(trade, 'qty'),
'fee': None,
'info': trade,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
market = self.market(symbol)
response = self.publicGetTrades(self.extend({
'currency': market['id'],
'period': 'hour',
'format': 'json',
}, params))
return self.parse_trades(response['completeOrders'], market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
if type != 'limit':
raise ExchangeError(self.id + ' allows limit orders only')
self.load_markets()
request = {
'price': price,
'currency': self.market_id(symbol),
'qty': amount,
}
method = 'privatePostOrder' + self.capitalize(type) + self.capitalize(side)
response = getattr(self, method)(self.extend(request, params))
id = self.safe_string(response, 'orderId')
timestamp = self.milliseconds()
cost = price * amount
order = {
'info': response,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'cost': cost,
'average': None,
'amount': amount,
'filled': None,
'remaining': amount,
'status': 'open',
'fee': None,
}
self.orders[id] = order
return order
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
result = None
market = None
if symbol is None:
if id in self.orders:
market = self.market(self.orders[id]['symbol'])
else:
raise ExchangeError(self.id + ' fetchOrder() requires a symbol argument for order ids missing in the .orders cache(the order was created with a different instance of self class or within a different run of self code).')
else:
market = self.market(symbol)
try:
response = self.privatePostOrderOrderInfo(self.extend({
'order_id': id,
'currency': market['id'],
}, params))
result = self.parse_order(response)
self.orders[id] = result
except Exception as e:
if isinstance(e, OrderNotFound):
if id in self.orders:
self.orders[id]['status'] = 'canceled'
result = self.orders[id]
else:
raise e
else:
raise e
return result
def parse_order_status(self, status):
statuses = {
'live': 'open',
'partially_filled': 'open',
'filled': 'closed',
}
if status in statuses:
return statuses[status]
return status
def parse_order(self, order, market=None):
info = self.safe_value(order, 'info')
id = self.safe_string(info, 'orderId')
timestamp = int(info['timestamp']) * 1000
status = self.safe_string(order, 'status')
status = self.parse_order_status(status)
cost = None
side = self.safe_string(info, 'type')
if side.find('ask') >= 0:
side = 'sell'
else:
side = 'buy'
price = self.safe_float(info, 'price')
amount = self.safe_float(info, 'qty')
remaining = self.safe_float(info, 'remainQty')
filled = None
if amount is not None:
if remaining is not None:
filled = amount - remaining
if price is not None:
cost = price * amount
currency = self.safe_string(info, 'currency')
fee = {
'currency': currency,
'cost': self.safe_float(info, 'fee'),
'rate': self.safe_float(info, 'feeRate'),
}
symbol = None
if market is None:
marketId = currency.lower()
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if market is not None:
symbol = market['symbol']
result = {
'info': order,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': 'limit',
'side': side,
'price': price,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
}
return result
def cancel_order(self, id, symbol=None, params={}):
order = self.safe_value(self.orders, id)
amount = None
price = None
side = None
if order is None:
if symbol is None:
# eslint-disable-next-line quotes
raise InvalidOrder(self.id + " cancelOrder could not find the order id " + id + " in orders cache. The order was probably created with a different instance of self class earlier. The `symbol` argument is missing. To cancel the order, pass a symbol argument and {'price': 12345, 'qty': 1.2345, 'is_ask': 0} in the params argument of cancelOrder.")
price = self.safe_float(params, 'price')
if price is None:
# eslint-disable-next-line quotes
raise InvalidOrder(self.id + " cancelOrder could not find the order id " + id + " in orders cache. The order was probably created with a different instance of self class earlier. The `price` parameter is missing. To cancel the order, pass a symbol argument and {'price': 12345, 'qty': 1.2345, 'is_ask': 0} in the params argument of cancelOrder.")
amount = self.safe_float(params, 'qty')
if amount is None:
# eslint-disable-next-line quotes
raise InvalidOrder(self.id + " cancelOrder could not find the order id " + id + " in orders cache. The order was probably created with a different instance of self class earlier. The `qty`(amount) parameter is missing. To cancel the order, pass a symbol argument and {'price': 12345, 'qty': 1.2345, 'is_ask': 0} in the params argument of cancelOrder.")
side = self.safe_float(params, 'is_ask')
if side is None:
# eslint-disable-next-line quotes
raise InvalidOrder(self.id + " cancelOrder could not find the order id " + id + " in orders cache. The order was probably created with a different instance of self class earlier. The `is_ask`(side) parameter is missing. To cancel the order, pass a symbol argument and {'price': 12345, 'qty': 1.2345, 'is_ask': 0} in the params argument of cancelOrder.")
else:
price = order['price']
amount = order['amount']
side = 0 if (order['side'] == 'buy') else 1
symbol = order['symbol']
request = {
'order_id': id,
'price': price,
'qty': amount,
'is_ask': side,
'currency': self.market_id(symbol),
}
self.orders[id]['status'] = 'canceled'
return self.privatePostOrderCancel(self.extend(request, params))
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
request = self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
url = self.urls['api'] + '/'
if api == 'public':
url += request
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
url += self.version + '/' + request
nonce = str(self.nonce())
json = self.json(self.extend({
'access_token': self.apiKey,
'nonce': nonce,
}, params))
payload = base64.b64encode(self.encode(json))
body = self.decode(payload)
secret = self.secret.upper()
signature = self.hmac(payload, self.encode(secret), hashlib.sha512)
headers = {
'content-type': 'application/json',
'X-COINONE-PAYLOAD': payload,
'X-COINONE-SIGNATURE': signature,
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body):
if (body[0] == '{') or (body[0] == '['):
response = json.loads(body)
if 'result' in response:
result = response['result']
if result != 'success':
#
# { "errorCode": "405", "status": "maintenance", "result": "error"}
#
code = self.safe_string(response, 'errorCode')
feedback = self.id + ' ' + self.json(response)
exceptions = self.exceptions
if code in exceptions:
raise exceptions[code](feedback)
else:
raise ExchangeError(feedback)
else:
raise ExchangeError(self.id + ' ' + body)
| 42.238411
| 369
| 0.485001
|
d1a1a77ce30f26ad3b65e414739e76dcb4c53333
| 7,824
|
py
|
Python
|
api/src/opentrons/drivers/utils.py
|
anuwrag/opentrons
|
28c8d76a19e367c6bd38f5290faaa32abf378715
|
[
"Apache-2.0"
] | null | null | null |
api/src/opentrons/drivers/utils.py
|
anuwrag/opentrons
|
28c8d76a19e367c6bd38f5290faaa32abf378715
|
[
"Apache-2.0"
] | null | null | null |
api/src/opentrons/drivers/utils.py
|
anuwrag/opentrons
|
28c8d76a19e367c6bd38f5290faaa32abf378715
|
[
"Apache-2.0"
] | null | null | null |
import binascii
import logging
import time
from typing import Dict, Optional, Mapping, Iterable, Sequence
import re
from opentrons.drivers.types import (
Temperature,
PlateTemperature,
RPM,
HeaterShakerLabwareLatchStatus,
)
log = logging.getLogger(__name__)
# Number of digits after the decimal point for temperatures being sent
# to/from Temp-Deck
TEMPDECK_GCODE_ROUNDING_PRECISION = 0
TC_GCODE_ROUNDING_PRECISION = 2
HS_GCODE_ROUNDING_PRECISION = 2
KEY_VALUE_REGEX = re.compile(r"((?P<key>\S+):(?P<value>\S+))")
class ParseError(Exception):
def __init__(self, error_message: str, parse_source: str) -> None:
self.error_message = error_message
self.parse_source = parse_source
super().__init__(
f"ParseError(error_message={error_message}, parse_source={parse_source})"
)
def parse_string_value_from_substring(substring: str) -> str:
"""
Returns the ascii value in the expected string "N:aa11bb22", where "N" is
the key, and "aa11bb22" is string value to be returned
"""
try:
value = substring.split(":")[1]
return str(value)
except (ValueError, IndexError, TypeError, AttributeError):
log.exception("Unexpected arg to parse_string_value_from_substring:")
raise ParseError(
error_message="Unexpected arg to parse_string_value_from_substring",
parse_source=substring,
)
def parse_temperature_response(
temperature_string: str, rounding_val: int, zero_target_is_unset: bool = False
) -> Temperature:
"""Parse a standard temperature response from a module
temperature_string: The string from the module after decoding
rounding_val: A value to round to
zero_target_is_unset: Whether or not to treat a 0 target temperature
as indicating that the module is regulating around the target temperature
0C (which the tempdeck and thermocycler are capable of) or that the module
does not currently have a target temperature set and is not regulating
(as the heater/shaker does - it has a resistive heater rather than a
thermoelectric cooler, and therefore cannot regulate on a temperature below
ambient).
Example input: "T:none C:25"""
data = parse_key_values(temperature_string)
try:
target = parse_optional_number(data["T"], rounding_val)
if zero_target_is_unset and target == 0.0:
target = None
return Temperature(current=parse_number(data["C"], rounding_val), target=target)
except KeyError:
raise ParseError(
error_message="Unexpected argument to parse_temperature_response",
parse_source=temperature_string,
)
def parse_rpm_response(rpm_string: str) -> RPM:
"""Example input: T:1233 C:212"""
data = parse_key_values(rpm_string)
try:
target: Optional[int] = int(parse_number(data["T"], 0))
if target == 0:
target = None
return RPM(
current=int(parse_number(data["C"], 0)),
target=target,
)
except KeyError:
raise ParseError(
error_message="Unexpected argument to parse_rpm_response",
parse_source=rpm_string,
)
def parse_labware_latch_status_response(
status_string: str,
) -> HeaterShakerLabwareLatchStatus:
"""Example format: STATUS:IDLE_OPEN"""
status_vals = parse_key_values(status_string)
try:
return HeaterShakerLabwareLatchStatus[status_vals["STATUS"]]
except KeyError:
raise ParseError(
error_message="Unexpected argument to parse_labware_latch_status_response",
parse_source=status_string,
)
def parse_plate_temperature_response(
temperature_string: str, rounding_val: int
) -> PlateTemperature:
"""Example input: "T:none C:25 H:123"""
data = parse_key_values(temperature_string)
try:
return PlateTemperature(
current=parse_number(data["C"], rounding_val),
target=parse_optional_number(data["T"], rounding_val),
hold=parse_optional_number(data["H"], rounding_val),
)
except KeyError:
raise ParseError(
error_message="Unexpected argument to parse_plate_temperature_response",
parse_source=temperature_string,
)
def parse_hs_device_information(device_info_string: str) -> Dict[str, str]:
"""Parse the device information block from a heater/shaker, which
has a slightly different set of keys for its entries
Example: "HW:A FW:21.2.1 SerialNo:TCA020B"
"""
res = parse_key_values(device_info_string)
keymap = {"HW": "model", "FW": "version", "SerialNo": "serial"}
try:
return {keymap[key]: res[key] for key in keymap.keys()}
except KeyError as e:
raise ParseError(
error_message=f"Missing key '{str(e)} in parse_hs_device_information",
parse_source=device_info_string,
)
def parse_device_information(device_info_string: str) -> Dict[str, str]:
"""
Parse the modules's device information response.
Example response from temp-deck: "serial:aa11 model:bb22 version:cc33"
"""
res = parse_key_values(device_info_string)
try:
return {key: res[key] for key in ["model", "version", "serial"]}
except KeyError as e:
raise ParseError(
error_message=f"Missing key '{str(e)}' in parse_device_information",
parse_source=device_info_string,
)
def parse_key_values(value: str) -> Dict[str, str]:
"""Convert string in the format:
'key1:value1 key2:value2'
to dict
{'key1': 'value1', 'key2': 'value2'}
"""
res = {
g.groupdict()["key"]: g.groupdict()["value"]
for g in KEY_VALUE_REGEX.finditer(value)
}
return res
def parse_optional_number(value: str, rounding_val: int) -> Optional[float]:
"""Convert number to float. 'none' will be converted to None"""
return None if value == "none" else parse_number(value, rounding_val)
def parse_number(value: str, rounding_val: int) -> float:
"""Convert string to float."""
try:
return round(float(value), rounding_val)
except ValueError:
raise ParseError(
error_message="Unexpected argument to parse_number", parse_source=value
)
class AxisMoveTimestamp:
"""Keeps track of the last time axes were known to move"""
def __init__(self, axis_iter: Sequence[str]):
self._moved_at: Dict[str, Optional[float]] = {ax: None for ax in axis_iter}
def mark_moved(self, axis_iter: Sequence[str]) -> None:
"""Indicate that a set of axes just moved"""
now = time.monotonic()
self._moved_at.update({ax: now for ax in axis_iter})
def time_since_moved(self) -> Mapping[str, Optional[float]]:
"""Get a mapping of the time since each known axis moved"""
now = time.monotonic()
return {ax: now - val if val else None for ax, val, in self._moved_at.items()}
def reset_moved(self, axis_iter: Iterable[str]) -> None:
"""Reset the clocks for a set of axes"""
self._moved_at.update({ax: None for ax in axis_iter})
def string_to_hex(val: str, min_length: int = 0) -> str:
"""
Create a hex representation of val. The end of the result will be padded
with "0" until min_length is reached.
Args:
val: The string to convert.
min_length: The minimum length of result. "0" will be used as
padding. Default is no minimum length and no padding.
Returns:
Hex string
"""
hex_string = binascii.hexlify(val.encode()).decode()
hex_string_length = len(hex_string)
if hex_string_length < min_length:
return hex_string + "0" * (min_length - hex_string_length)
return hex_string
| 34.017391
| 88
| 0.672163
|
e1c3ffda62a818e29b5e89cd39616b7dc04db044
| 631
|
py
|
Python
|
Suanfa/01_abc.py
|
ivitan/LearnPython
|
f7c1c8f450f5cbcbd8cabe03711c5e0d81dfdee3
|
[
"MIT"
] | 1
|
2020-02-05T12:13:31.000Z
|
2020-02-05T12:13:31.000Z
|
Suanfa/01_abc.py
|
ivitan/LearnPython
|
f7c1c8f450f5cbcbd8cabe03711c5e0d81dfdee3
|
[
"MIT"
] | null | null | null |
Suanfa/01_abc.py
|
ivitan/LearnPython
|
f7c1c8f450f5cbcbd8cabe03711c5e0d81dfdee3
|
[
"MIT"
] | null | null | null |
# a+b+c=100,a**2+b**2=c**2,a,b,c为自然数,求a,b,c
# 每台机器的总时间不同,但是执行基本运算数大体相同
#T(n) = n^3 * 2
import time
start_time = time.time()
# 枚举a,b,c
# 时间复杂度 T = 1000 * 1000 * 1000 * 2
# for a in range(0,1001):
# for b in range(0,1001):
# for c in range(0,1001):
# if a+b+c==1000 and a**2+b**2==c**2:
# print("a,b,c:%d,%d,%d" % (a,b,c))
# 枚举a,b
# 时间复杂度 T = 1000 * 1000 * 2
for a in range(0,1001):
for b in range(0,1001):
c = 1000 - a- b
if a+b+c==1000 and a**2+b**2==c**2:
print("a,b,c:%d,%d,%d" % (a,b,c))
end_time = time.time()
print("time:%d" % (end_time - start_time))
| 26.291667
| 51
| 0.502377
|
fed675391e037a1554b29fb2cdba756a27ac6bee
| 2,358
|
py
|
Python
|
tests/neighbors/test_nng.py
|
cthoyt/kiez
|
25f9f103ed51d4084e10f7ac532bb24183fe3894
|
[
"BSD-3-Clause"
] | 13
|
2021-07-22T12:35:07.000Z
|
2022-02-15T04:35:17.000Z
|
tests/neighbors/test_nng.py
|
cthoyt/kiez
|
25f9f103ed51d4084e10f7ac532bb24183fe3894
|
[
"BSD-3-Clause"
] | 10
|
2021-07-23T11:20:32.000Z
|
2022-02-06T12:59:06.000Z
|
tests/neighbors/test_nng.py
|
cthoyt/kiez
|
25f9f103ed51d4084e10f7ac532bb24183fe3894
|
[
"BSD-3-Clause"
] | 2
|
2021-07-23T10:53:57.000Z
|
2021-09-01T01:14:37.000Z
|
import numpy as np
import pytest
from kiez.neighbors import NNG
from numpy.testing import assert_array_equal
rng = np.random.RandomState(2)
def test_wrong_metric():
with pytest.raises(ValueError) as exc_info:
NNG(metric="jibberish")
assert "Unknown" in exc_info
def test_wrong_dir(n_samples=20, n_features=5):
source = rng.rand(n_samples, n_features)
with pytest.raises(TypeError) as exc_info:
nng = NNG(index_dir=1)
nng.fit(source)
assert "NNG requires" in exc_info
def test_right_dir(tmp_path, n_samples=20, n_features=5):
source = rng.rand(n_samples, n_features)
target = rng.rand(n_samples, n_features)
nng = NNG(index_dir=str(tmp_path))
nng.fit(source, target)
assert nng is not None
def test_none_dir(n_samples=20, n_features=5):
source = rng.rand(n_samples, n_features)
target = rng.rand(n_samples, n_features)
nng = NNG(index_dir=None)
nng.fit(source, target)
assert nng is not None
def test_self_query(n_samples=20, n_features=5, n_neighbors=5):
source = rng.rand(n_samples, n_features)
nng = NNG(index_dir=None, n_candidates=n_neighbors, epsilon=0.00001)
nng.fit(source, source)
d, i = nng.kneighbors()
i2 = nng.kneighbors(return_distance=False)
assert_array_equal(i, i2)
def test_query(n_samples=20, n_features=5, n_neighbors=5):
source = rng.rand(n_samples, n_features)
target = rng.rand(n_samples, n_features)
nng = NNG(index_dir=None, n_candidates=n_neighbors, epsilon=0.00001)
nng.fit(source, target)
d, i = nng.kneighbors(
query=source[
:5,
]
)
i2 = nng.kneighbors(
query=source[
:5,
],
return_distance=False,
)
assert_array_equal(i, i2)
def test_sqeuclidean(n_samples=20, n_features=5, n_neighbors=5):
source = rng.rand(n_samples, n_features)
target = rng.rand(n_samples, n_features)
nng1 = NNG(index_dir=None, n_candidates=n_neighbors, metric="sqeuclidean")
nng1.fit(source, target)
d, i = nng1.kneighbors(
query=source[
:5,
]
)
nng2 = NNG(index_dir=None, n_candidates=n_neighbors)
nng2.fit(source, target)
i2 = nng2.kneighbors(
query=source[
:5,
],
return_distance=False,
)
assert_array_equal(i, i2)
| 27.418605
| 78
| 0.664122
|
084f71e11e3585460d164349e7b713c8e9ce8313
| 1,468
|
py
|
Python
|
src/programy/triggers/excepter.py
|
cdoebler1/AIML2
|
ee692ec5ea3794cd1bc4cc8ec2a6b5e5c20a0d6a
|
[
"MIT"
] | 345
|
2016-11-23T22:37:04.000Z
|
2022-03-30T20:44:44.000Z
|
src/programy/triggers/excepter.py
|
MikeyBeez/program-y
|
00d7a0c7d50062f18f0ab6f4a041068e119ef7f0
|
[
"MIT"
] | 275
|
2016-12-07T10:30:28.000Z
|
2022-02-08T21:28:33.000Z
|
src/programy/triggers/excepter.py
|
VProgramMist/modified-program-y
|
f32efcafafd773683b3fe30054d5485fe9002b7d
|
[
"MIT"
] | 159
|
2016-11-28T18:59:30.000Z
|
2022-03-20T18:02:44.000Z
|
"""
Copyright (c) 2016-2020 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.triggers.trigger import Trigger
from programy.context import ClientContext
from programy.utils.console.console import outputLog
class ExceptionTrigger(Trigger):
def __init__(self):
Trigger.__init__(self)
def trigger(self, client_context: ClientContext = None, additional=None):
raise Exception("This trigger also exceptions")
| 50.62069
| 120
| 0.792916
|
210d1a32ae551d82397d81b39c2b980ce2da5647
| 792
|
py
|
Python
|
tests/functional/manifests/test_manifest.py
|
miohtama/ape
|
622deb25076d33de0edb3a23449ccdc04c3288cd
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/manifests/test_manifest.py
|
miohtama/ape
|
622deb25076d33de0edb3a23449ccdc04c3288cd
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/manifests/test_manifest.py
|
miohtama/ape
|
622deb25076d33de0edb3a23449ccdc04c3288cd
|
[
"Apache-2.0"
] | null | null | null |
import pytest # type: ignore
import requests
from hypothesis import HealthCheck, given, settings
from hypothesis_jsonschema import from_schema # type: ignore
from ape.types.manifest import PackageManifest
ETHPM_MANIFEST_SCHEMA_URI = (
"https://raw.githubusercontent.com/ethpm/ethpm-spec/master/spec/v3.spec.json"
)
@pytest.mark.xfail(reason="Schema is poorly formed")
@pytest.mark.fuzzing
@given(manifest_dict=from_schema(requests.get(ETHPM_MANIFEST_SCHEMA_URI).json()))
@settings(suppress_health_check=(HealthCheck.too_slow,))
def test_manifest_parsing(manifest_dict):
manifest = PackageManifest.from_dict(manifest_dict)
assert manifest.to_dict() == manifest_dict
def test_example_manifests(manifest):
assert PackageManifest.from_dict(manifest).to_dict() == manifest
| 33
| 81
| 0.804293
|
e6c4eb2aa0f85e7fdc96c30c22d55fdf2284c58d
| 22,597
|
py
|
Python
|
evennia/commands/default/general.py
|
FreeDelete-Software/ALPACAS-evennia
|
dd95de145ea31391238dc03d61b14b6b31a5b715
|
[
"BSD-3-Clause"
] | null | null | null |
evennia/commands/default/general.py
|
FreeDelete-Software/ALPACAS-evennia
|
dd95de145ea31391238dc03d61b14b6b31a5b715
|
[
"BSD-3-Clause"
] | null | null | null |
evennia/commands/default/general.py
|
FreeDelete-Software/ALPACAS-evennia
|
dd95de145ea31391238dc03d61b14b6b31a5b715
|
[
"BSD-3-Clause"
] | null | null | null |
"""
General Character commands usually available to all characters
"""
import re
from django.conf import settings
from evennia.utils import utils
from evennia.typeclasses.attributes import NickTemplateInvalid
COMMAND_DEFAULT_CLASS = utils.class_from_module(settings.COMMAND_DEFAULT_CLASS)
# limit symbol import for API
__all__ = (
"CmdHome",
"CmdLook",
"CmdNick",
"CmdInventory",
"CmdSetDesc",
"CmdGet",
"CmdDrop",
"CmdGive",
"CmdSay",
"CmdWhisper",
"CmdPose",
"CmdAccess",
)
class CmdHome(COMMAND_DEFAULT_CLASS):
"""
move to your character's home location
Usage:
home
Teleports you to your home location.
"""
key = "home"
locks = "cmd:perm(home) or perm(Builder)"
arg_regex = r"$"
def func(self):
"""Implement the command"""
caller = self.caller
home = caller.home
if not home:
caller.msg("You have no home!")
elif home == caller.location:
caller.msg("You are already home!")
else:
caller.msg("There's no place like home ...")
caller.move_to(home)
class CmdLook(COMMAND_DEFAULT_CLASS):
"""
look at location or object
Usage:
look
look <obj>
look *<account>
Observes your location or objects in your vicinity.
"""
key = "look"
aliases = ["l", "ls"]
locks = "cmd:all()"
arg_regex = r"\s|$"
def func(self):
"""
Handle the looking.
"""
caller = self.caller
if not self.args:
target = caller.location
if not target:
caller.msg("You have no location to look at!")
return
else:
target = caller.search(self.args)
if not target:
return
desc = caller.at_look(target)
# add the type=look to the outputfunc to make it
# easy to separate this output in client.
self.msg(text=(desc, {"type": "look"}), options=None)
class CmdNick(COMMAND_DEFAULT_CLASS):
"""
define a personal alias/nick by defining a string to
match and replace it with another on the fly
Usage:
nick[/switches] <string> [= [replacement_string]]
nick[/switches] <template> = <replacement_template>
nick/delete <string> or number
nicks
Switches:
inputline - replace on the inputline (default)
object - replace on object-lookup
account - replace on account-lookup
list - show all defined aliases (also "nicks" works)
delete - remove nick by index in /list
clearall - clear all nicks
Examples:
nick hi = say Hello, I'm Sarah!
nick/object tom = the tall man
nick build $1 $2 = create/drop $1;$2
nick tell $1 $2=page $1=$2
nick tm?$1=page tallman=$1
nick tm\=$1=page tallman=$1
A 'nick' is a personal string replacement. Use $1, $2, ... to catch arguments.
Put the last $-marker without an ending space to catch all remaining text. You
can also use unix-glob matching for the left-hand side <string>:
* - matches everything
? - matches 0 or 1 single characters
[abcd] - matches these chars in any order
[!abcd] - matches everything not among these chars
\= - escape literal '=' you want in your <string>
Note that no objects are actually renamed or changed by this command - your nicks
are only available to you. If you want to permanently add keywords to an object
for everyone to use, you need build privileges and the alias command.
"""
key = "nick"
switch_options = ("inputline", "object", "account", "list", "delete", "clearall")
aliases = ["nickname", "nicks"]
locks = "cmd:all()"
def parse(self):
"""
Support escaping of = with \=
"""
super(CmdNick, self).parse()
args = (self.lhs or "") + (" = %s" % self.rhs if self.rhs else "")
parts = re.split(r"(?<!\\)=", args, 1)
self.rhs = None
if len(parts) < 2:
self.lhs = parts[0].strip()
else:
self.lhs, self.rhs = [part.strip() for part in parts]
self.lhs = self.lhs.replace("\=", "=")
def func(self):
"""Create the nickname"""
def _cy(string):
"add color to the special markers"
return re.sub(r"(\$[0-9]+|\*|\?|\[.+?\])", r"|Y\1|n", string)
caller = self.caller
switches = self.switches
nicktypes = [switch for switch in switches if switch in ("object", "account", "inputline")]
specified_nicktype = bool(nicktypes)
nicktypes = nicktypes if specified_nicktype else ["inputline"]
nicklist = (
utils.make_iter(caller.nicks.get(category="inputline", return_obj=True) or [])
+ utils.make_iter(caller.nicks.get(category="object", return_obj=True) or [])
+ utils.make_iter(caller.nicks.get(category="account", return_obj=True) or [])
)
if "list" in switches or self.cmdstring in ("nicks",):
if not nicklist:
string = "|wNo nicks defined.|n"
else:
table = self.styled_table("#", "Type", "Nick match", "Replacement")
for inum, nickobj in enumerate(nicklist):
_, _, nickvalue, replacement = nickobj.value
table.add_row(
str(inum + 1), nickobj.db_category, _cy(nickvalue), _cy(replacement)
)
string = "|wDefined Nicks:|n\n%s" % table
caller.msg(string)
return
if "clearall" in switches:
caller.nicks.clear()
caller.account.nicks.clear()
caller.msg("Cleared all nicks.")
return
if "delete" in switches or "del" in switches:
if not self.args or not self.lhs:
caller.msg("usage nick/delete <nick> or <#num> ('nicks' for list)")
return
# see if a number was given
arg = self.args.lstrip("#")
oldnicks = []
if arg.isdigit():
# we are given a index in nicklist
delindex = int(arg)
if 0 < delindex <= len(nicklist):
oldnicks.append(nicklist[delindex - 1])
else:
caller.msg("Not a valid nick index. See 'nicks' for a list.")
return
else:
if not specified_nicktype:
nicktypes = ("object", "account", "inputline")
for nicktype in nicktypes:
oldnicks.append(caller.nicks.get(arg, category=nicktype, return_obj=True))
oldnicks = [oldnick for oldnick in oldnicks if oldnick]
if oldnicks:
for oldnick in oldnicks:
nicktype = oldnick.category
nicktypestr = "%s-nick" % nicktype.capitalize()
_, _, old_nickstring, old_replstring = oldnick.value
caller.nicks.remove(old_nickstring, category=nicktype)
caller.msg(
"%s removed: '|w%s|n' -> |w%s|n."
% (nicktypestr, old_nickstring, old_replstring)
)
else:
caller.msg("No matching nicks to remove.")
return
if not self.rhs and self.lhs:
# check what a nick is set to
strings = []
if not specified_nicktype:
nicktypes = ("object", "account", "inputline")
for nicktype in nicktypes:
nicks = [
nick
for nick in utils.make_iter(
caller.nicks.get(category=nicktype, return_obj=True)
)
if nick
]
for nick in nicks:
_, _, nick, repl = nick.value
if nick.startswith(self.lhs):
strings.append(
"{}-nick: '{}' -> '{}'".format(nicktype.capitalize(), nick, repl)
)
if strings:
caller.msg("\n".join(strings))
else:
caller.msg("No nicks found matching '{}'".format(self.lhs))
return
if not self.rhs and self.lhs:
# check what a nick is set to
strings = []
if not specified_nicktype:
nicktypes = ("object", "account", "inputline")
for nicktype in nicktypes:
if nicktype == "account":
obj = account
else:
obj = caller
nicks = utils.make_iter(obj.nicks.get(category=nicktype, return_obj=True))
for nick in nicks:
_, _, nick, repl = nick.value
if nick.startswith(self.lhs):
strings.append(
"{}-nick: '{}' -> '{}'".format(nicktype.capitalize(), nick, repl)
)
if strings:
caller.msg("\n".join(strings))
else:
caller.msg("No nicks found matching '{}'".format(self.lhs))
return
if not self.rhs and self.lhs:
# check what a nick is set to
strings = []
if not specified_nicktype:
nicktypes = ("object", "account", "inputline")
for nicktype in nicktypes:
if nicktype == "account":
obj = account
else:
obj = caller
nicks = utils.make_iter(obj.nicks.get(category=nicktype, return_obj=True))
for nick in nicks:
_, _, nick, repl = nick.value
if nick.startswith(self.lhs):
strings.append(
"{}-nick: '{}' -> '{}'".format(nicktype.capitalize(), nick, repl)
)
if strings:
caller.msg("\n".join(strings))
else:
caller.msg("No nicks found matching '{}'".format(self.lhs))
return
if not self.args or not self.lhs:
caller.msg("Usage: nick[/switches] nickname = [realname]")
return
# setting new nicks
nickstring = self.lhs
replstring = self.rhs
if replstring == nickstring:
caller.msg("No point in setting nick same as the string to replace...")
return
# check so we have a suitable nick type
errstring = ""
string = ""
for nicktype in nicktypes:
nicktypestr = "%s-nick" % nicktype.capitalize()
old_nickstring = None
old_replstring = None
oldnick = caller.nicks.get(key=nickstring, category=nicktype, return_obj=True)
if oldnick:
_, _, old_nickstring, old_replstring = oldnick.value
if replstring:
# creating new nick
errstring = ""
if oldnick:
if replstring == old_replstring:
string += "\nIdentical %s already set." % nicktypestr.lower()
else:
string += "\n%s '|w%s|n' updated to map to '|w%s|n'." % (
nicktypestr,
old_nickstring,
replstring,
)
else:
string += "\n%s '|w%s|n' mapped to '|w%s|n'." % (
nicktypestr,
nickstring,
replstring,
)
try:
caller.nicks.add(nickstring, replstring, category=nicktype)
except NickTemplateInvalid:
caller.msg(
"You must use the same $-markers both in the nick and in the replacement."
)
return
elif old_nickstring and old_replstring:
# just looking at the nick
string += "\n%s '|w%s|n' maps to '|w%s|n'." % (
nicktypestr,
old_nickstring,
old_replstring,
)
errstring = ""
string = errstring if errstring else string
caller.msg(_cy(string))
class CmdInventory(COMMAND_DEFAULT_CLASS):
"""
view inventory
Usage:
inventory
inv
Shows your inventory.
"""
key = "inventory"
aliases = ["inv", "i"]
locks = "cmd:all()"
arg_regex = r"$"
def func(self):
"""check inventory"""
items = self.caller.contents
if not items:
string = "You are not carrying anything."
else:
from evennia.utils.ansi import raw as raw_ansi
table = self.styled_table(border="header")
for item in items:
table.add_row(f"|C{item.name}|n",
"{}|n".format(utils.crop(raw_ansi(item.db.desc or ""), width=50) or ""))
string = f"|wYou are carrying:\n{table}"
self.caller.msg(string)
class CmdGet(COMMAND_DEFAULT_CLASS):
"""
pick up something
Usage:
get <obj>
Picks up an object from your location and puts it in
your inventory.
"""
key = "get"
aliases = "grab"
locks = "cmd:all();view:perm(Developer);read:perm(Developer)"
arg_regex = r"\s|$"
def func(self):
"""implements the command."""
caller = self.caller
if not self.args:
caller.msg("Get what?")
return
obj = caller.search(self.args, location=caller.location)
if not obj:
return
if caller == obj:
caller.msg("You can't get yourself.")
return
if not obj.access(caller, "get"):
if obj.db.get_err_msg:
caller.msg(obj.db.get_err_msg)
else:
caller.msg("You can't get that.")
return
# calling at_before_get hook method
if not obj.at_before_get(caller):
return
success = obj.move_to(caller, quiet=True)
if not success:
caller.msg("This can't be picked up.")
else:
caller.msg("You pick up %s." % obj.name)
caller.location.msg_contents(
"%s picks up %s." % (caller.name, obj.name), exclude=caller
)
# calling at_get hook method
obj.at_get(caller)
class CmdDrop(COMMAND_DEFAULT_CLASS):
"""
drop something
Usage:
drop <obj>
Lets you drop an object from your inventory into the
location you are currently in.
"""
key = "drop"
locks = "cmd:all()"
arg_regex = r"\s|$"
def func(self):
"""Implement command"""
caller = self.caller
if not self.args:
caller.msg("Drop what?")
return
# Because the DROP command by definition looks for items
# in inventory, call the search function using location = caller
obj = caller.search(
self.args,
location=caller,
nofound_string="You aren't carrying %s." % self.args,
multimatch_string="You carry more than one %s:" % self.args,
)
if not obj:
return
# Call the object script's at_before_drop() method.
if not obj.at_before_drop(caller):
return
success = obj.move_to(caller.location, quiet=True)
if not success:
caller.msg("This couldn't be dropped.")
else:
caller.msg("You drop %s." % (obj.name,))
caller.location.msg_contents("%s drops %s." % (caller.name, obj.name), exclude=caller)
# Call the object script's at_drop() method.
obj.at_drop(caller)
class CmdGive(COMMAND_DEFAULT_CLASS):
"""
give away something to someone
Usage:
give <inventory obj> <to||=> <target>
Gives an items from your inventory to another character,
placing it in their inventory.
"""
key = "give"
rhs_split = ("=", " to ") # Prefer = delimiter, but allow " to " usage.
locks = "cmd:all()"
arg_regex = r"\s|$"
def func(self):
"""Implement give"""
caller = self.caller
if not self.args or not self.rhs:
caller.msg("Usage: give <inventory object> = <target>")
return
to_give = caller.search(
self.lhs,
location=caller,
nofound_string="You aren't carrying %s." % self.lhs,
multimatch_string="You carry more than one %s:" % self.lhs,
)
target = caller.search(self.rhs)
if not (to_give and target):
return
if target == caller:
caller.msg("You keep %s to yourself." % to_give.key)
return
if not to_give.location == caller:
caller.msg("You are not holding %s." % to_give.key)
return
# calling at_before_give hook method
if not to_give.at_before_give(caller, target):
return
# give object
success = to_give.move_to(target, quiet=True)
if not success:
caller.msg("This could not be given.")
else:
caller.msg("You give %s to %s." % (to_give.key, target.key))
target.msg("%s gives you %s." % (caller.key, to_give.key))
# Call the object script's at_give() method.
to_give.at_give(caller, target)
class CmdSetDesc(COMMAND_DEFAULT_CLASS):
"""
describe yourself
Usage:
setdesc <description>
Add a description to yourself. This
will be visible to people when they
look at you.
"""
key = "setdesc"
locks = "cmd:all()"
arg_regex = r"\s|$"
def func(self):
"""add the description"""
if not self.args:
self.caller.msg("You must add a description.")
return
self.caller.db.desc = self.args.strip()
self.caller.msg("You set your description.")
class CmdSay(COMMAND_DEFAULT_CLASS):
"""
speak as your character
Usage:
say <message>
Talk to those in your current location.
"""
key = "say"
aliases = ['"', "'"]
locks = "cmd:all()"
def func(self):
"""Run the say command"""
caller = self.caller
if not self.args:
caller.msg("Say what?")
return
speech = self.args
# Calling the at_before_say hook on the character
speech = caller.at_before_say(speech)
# If speech is empty, stop here
if not speech:
return
# Call the at_after_say hook on the character
caller.at_say(speech, msg_self=True)
class CmdWhisper(COMMAND_DEFAULT_CLASS):
"""
Speak privately as your character to another
Usage:
whisper <character> = <message>
whisper <char1>, <char2> = <message>
Talk privately to one or more characters in your current location, without
others in the room being informed.
"""
key = "whisper"
locks = "cmd:all()"
def func(self):
"""Run the whisper command"""
caller = self.caller
if not self.lhs or not self.rhs:
caller.msg("Usage: whisper <character> = <message>")
return
receivers = [recv.strip() for recv in self.lhs.split(",")]
receivers = [caller.search(receiver) for receiver in set(receivers)]
receivers = [recv for recv in receivers if recv]
speech = self.rhs
# If the speech is empty, abort the command
if not speech or not receivers:
return
# Call a hook to change the speech before whispering
speech = caller.at_before_say(speech, whisper=True, receivers=receivers)
# no need for self-message if we are whispering to ourselves (for some reason)
msg_self = None if caller in receivers else True
caller.at_say(speech, msg_self=msg_self, receivers=receivers, whisper=True)
class CmdPose(COMMAND_DEFAULT_CLASS):
"""
strike a pose
Usage:
pose <pose text>
pose's <pose text>
Example:
pose is standing by the wall, smiling.
-> others will see:
Tom is standing by the wall, smiling.
Describe an action being taken. The pose text will
automatically begin with your name.
"""
key = "pose"
aliases = [":", "emote"]
locks = "cmd:all()"
def parse(self):
"""
Custom parse the cases where the emote
starts with some special letter, such
as 's, at which we don't want to separate
the caller's name and the emote with a
space.
"""
args = self.args
if args and not args[0] in ["'", ",", ":"]:
args = " %s" % args.strip()
self.args = args
def func(self):
"""Hook function"""
if not self.args:
msg = "What do you want to do?"
self.caller.msg(msg)
else:
msg = "%s%s" % (self.caller.name, self.args)
self.caller.location.msg_contents(text=(msg, {"type": "pose"}), from_obj=self.caller)
class CmdAccess(COMMAND_DEFAULT_CLASS):
"""
show your current game access
Usage:
access
This command shows you the permission hierarchy and
which permission groups you are a member of.
"""
key = "access"
aliases = ["groups", "hierarchy"]
locks = "cmd:all()"
arg_regex = r"$"
def func(self):
"""Load the permission groups"""
caller = self.caller
hierarchy_full = settings.PERMISSION_HIERARCHY
string = "\n|wPermission Hierarchy|n (climbing):\n %s" % ", ".join(hierarchy_full)
if self.caller.account.is_superuser:
cperms = "<Superuser>"
pperms = "<Superuser>"
else:
cperms = ", ".join(caller.permissions.all())
pperms = ", ".join(caller.account.permissions.all())
string += "\n|wYour access|n:"
string += "\nCharacter |c%s|n: %s" % (caller.key, cperms)
if hasattr(caller, "account"):
string += "\nAccount |c%s|n: %s" % (caller.account.key, pperms)
caller.msg(string)
| 30.870219
| 102
| 0.531575
|
5a3c1b9dfdee1fe3834941cb5507d1dd51fd40c9
| 2,670
|
py
|
Python
|
vendor/github.com/google/go-jsonnet/cpp-jsonnet/case_studies/micromanage/cmds.py
|
BenHall/kubeless
|
398d0ee779d848655cf34dd28739f68a2f7bac8a
|
[
"Apache-2.0"
] | null | null | null |
vendor/github.com/google/go-jsonnet/cpp-jsonnet/case_studies/micromanage/cmds.py
|
BenHall/kubeless
|
398d0ee779d848655cf34dd28739f68a2f7bac8a
|
[
"Apache-2.0"
] | null | null | null |
vendor/github.com/google/go-jsonnet/cpp-jsonnet/case_studies/micromanage/cmds.py
|
BenHall/kubeless
|
398d0ee779d848655cf34dd28739f68a2f7bac8a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
# E.g. replace Simon's cat with 'Simon'\''s cat'.
def escape(s):
return "'%s'" % s.replace("'", "'\"'\"'")
def file_glob(given_glob, to, prefix):
dirs = []
files = []
lp = len(prefix)
for f in glob.glob(given_glob):
if os.path.isdir(f):
more_files = file_glob('%s/*' % f, to, prefix)
files += more_files
else:
files.append((f, to + f[lp:]))
return files
def compile_command_to_bash(cmd):
if isinstance(cmd, basestring):
return [cmd]
elif cmd['kind'] == 'LiteralFile':
return [
'echo -n %s > %s' % (escape(cmd['content']), escape(cmd['to'])),
'chmod -v %s %s' % (cmd['filePermissions'], escape(cmd['to'])),
'chown -v %s.%s %s' % (cmd['owner'], cmd['group'], escape(cmd['to'])),
]
elif cmd['kind'] == 'CopyFile':
files = file_glob(cmd['from'], cmd['to'], os.path.dirname(cmd['from']))
dirs = set([os.path.dirname(f[1]) for f in files]) - {cmd['to']}
lines = []
for d in dirs:
lines += [
'mkdir -v -p %s' % escape(d),
'chmod -v %s %s' % (cmd['dirPermissions'], escape(d)),
'chown -v %s.%s %s' % (cmd['owner'], cmd['group'], escape(d)),
]
for f in files:
with open (f[0], "r") as stream:
content = stream.read()
lines += [
'echo -n %s > %s' % (escape(content), escape(f[1])),
'chmod -v %s %s' % (cmd['filePermissions'], escape(f[1])),
'chown -v %s.%s %s' % (cmd['owner'], cmd['group'], escape(f[1])),
]
return lines
elif cmd['kind'] == 'EnsureDir':
return [
'mkdir -v -p %s' % escape(cmd['dir']),
'chmod -v %s %s' % (cmd['dirPermissions'], escape(cmd['dir'])),
'chown -v %s.%s %s' % (cmd['owner'], cmd['group'], escape(cmd['dir'])),
]
else:
raise RuntimeError('Did not recognize image command kind: ' + cmd['kind'])
| 37.605634
| 83
| 0.535581
|
f672e10e77cb7db9e9f6da86f0db261d28e178f1
| 3,428
|
py
|
Python
|
app/gws/plugin/ows_provider/wfs/provider.py
|
gbd-consult/gbd-websuite
|
7212f41081c04614fdb4641e902d4de3424da8c5
|
[
"Apache-2.0"
] | 3
|
2020-07-24T10:10:18.000Z
|
2022-03-16T10:22:04.000Z
|
app/gws/plugin/ows_provider/wfs/provider.py
|
gbd-consult/gbd-websuite
|
7212f41081c04614fdb4641e902d4de3424da8c5
|
[
"Apache-2.0"
] | 28
|
2020-03-03T17:35:58.000Z
|
2021-07-12T12:05:47.000Z
|
app/gws/plugin/ows_provider/wfs/provider.py
|
gbd-consult/gbd-websuite
|
7212f41081c04614fdb4641e902d4de3424da8c5
|
[
"Apache-2.0"
] | 1
|
2021-02-22T14:32:10.000Z
|
2021-02-22T14:32:10.000Z
|
"""WFS provider."""
import gws
import gws.base.metadata
import gws.base.ows
import gws.lib.extent
import gws.lib.gis
import gws.lib.ows
import gws.lib.shape
import gws.types as t
from . import caps
"""
References
wfs 1.0.0: http://portal.opengeospatial.org/files/?artifact_id=7176 Sec 13.7.3
wfs 1.1.0: http://portal.opengeospatial.org/files/?artifact_id=8339 Sec 14.7.3
wfs 2.0.0: http://docs.opengeospatial.org/is/09-025r2/09-025r2.html Sec 11.1.3
see also https://docs.geoserver.org/latest/en/user/services/wfs/basics.html
"""
class Config(gws.base.ows.provider.Config):
pass
class Object(gws.base.ows.provider.Object):
protocol = gws.OwsProtocol.WFS
def configure(self):
cc = caps.parse(self.get_capabilities())
self.metadata = self.require_child(gws.base.metadata.Object, cc.metadata)
self.version = cc.version
self.operations = cc.operations
self.source_layers = cc.source_layers
self.supported_crs = cc.supported_crs
def find_features(self, args):
# first, find features within the bounds of given shapes,
# then, filter features precisely
# this is more performant than WFS spatial ops (at least for qgis)
# and also works without spatial ops support on the provider side
bounds = args.bounds
shape = None
if args.shapes:
map_tolerance = 0
if args.tolerance:
n, u = args.tolerance
map_tolerance = n * (args.resolution or 1) if u == 'px' else n
shape = gws.lib.shape.union(args.shapes).tolerance_polygon(map_tolerance)
bounds = shape.bounds
our_crs = bounds.crs
source_crs = self.source_crs or gws.lib.gis.best_crs(our_crs, self.supported_crs)
bbox = gws.lib.extent.transform(bounds.extent, our_crs, source_crs)
axis = gws.lib.gis.best_axis(source_crs, self.invert_axis_crs, gws.OwsProtocol.WFS, self.version)
invert_axis = axis == 'yx'
params = {}
if invert_axis:
bbox = gws.lib.gis.invert_bbox(bbox)
params['BBOX'] = bbox
if args.source_layer_names:
params['TYPENAMES' if self.version >= '2.0.0' else 'TYPENAME'] = args.source_layer_names
if args.limit:
params['COUNT' if self.version >= '2.0.0' else 'MAXFEATURES'] = args.limit
params['SRSNAME'] = source_crs
params['VERSION'] = self.version
params = gws.merge(params, args.get('params'))
text = gws.lib.ows.request.get_text(**self.operation_args(gws.OwsVerb.GetFeature, params=params))
features = gws.lib.ows.formats.read(text, crs=source_crs, invert_axis=invert_axis)
if features is None:
gws.log.error(f'WFS response not parsed, params={params!r}')
return []
if not shape:
return features
flt = []
for f in features:
if not f.shape:
continue
f.transform_to(our_crs)
if f.shape.intersects(shape):
flt.append(f)
if len(flt) != len(features):
gws.log.debug(f'WFS filter before={len(features)} after={len(flt)}')
return flt
##
def create(root: gws.IRoot, cfg: gws.Config, parent: gws.Node = None, shared: bool = False) -> Object:
return root.create_object(Object, cfg, parent, shared)
| 30.882883
| 105
| 0.630105
|
269d367974809d79cdaf648e542db28d3c0ae879
| 6,612
|
py
|
Python
|
yosys_spde_flow/postprocess_yosys_edif.py
|
antmicro/yosys-SpDE-flow
|
8c337d736b19e5927811dcada7b9aea9b31fe4c6
|
[
"Apache-2.0"
] | null | null | null |
yosys_spde_flow/postprocess_yosys_edif.py
|
antmicro/yosys-SpDE-flow
|
8c337d736b19e5927811dcada7b9aea9b31fe4c6
|
[
"Apache-2.0"
] | null | null | null |
yosys_spde_flow/postprocess_yosys_edif.py
|
antmicro/yosys-SpDE-flow
|
8c337d736b19e5927811dcada7b9aea9b31fe4c6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import argparse
from pathlib import Path
import re
def convert_lut_init_to_hex(val: str) -> str:
"""Converts EDIF decimal and hexadecimal notation to hexadecimal for SpDE.
Args:
val (str): value in decimal or hexadecimal notation (i.e. ``16'hABCD``)
Returns:
str: string containing only hexadecimal number, without ``0x`` prefix
(i.e. "ABCD")
"""
if "'" not in val:
return str(format(int(val), 'x')).upper()
else:
return str(val.split("'h")[1]).upper()
def find_closing_bracket(line: str, openbracketid: int) -> int:
"""Returns the index of the closing bracket for a given opening bracket.
Looks for the closing bracket in string for an opening bracket that is
pointed by the ``openbracketid``.
Args:
line (str) : a single line from the EDIF file
openbracketid (int): the index of the opening bracket for which the
closing bracket should be found
Returns:
int: index for the closing bracket or -1 if not found
"""
opencount = 0
finid = openbracketid
for c in line[openbracketid:]:
if c == '(':
opencount += 1
elif c == ')':
opencount -= 1
if opencount == 0:
return finid
finid += 1
return -1
def fix_array_line(line: str, arraysizes: dict) -> str:
"""Converts array notation from Yosys EDIF to notation acceptable by SpDE.
Arrays in EDIF file from Yosys are declared in a form::
(array (rename EDIF_ARR_NAME "verilog_name(maxid:minid)")") WIDTH)
and the members of array are accessed with::
(member EDIF_ARR_NAME MEMBER_ID)
This format is unacceptable for SpDE - it accepts only wires, so this
function converts every declaration and member access with wire-based
implementation.
Args:
line (str) : a single line from the EDIF file
arraysizes (dict): a dict mapping array name to its size. It is a
helper argument that stores the sizes from arrays from declaration
so they can be used in index recalculation in converting member
accesses
Yields:
str: Function yields lines that are produced during conversion of
declarations and accesses
"""
arrayregex = r'\(array\s*\(rename\s*(?P<name>[A-Za-z_$][A-Za-z0-9_$]*)\s*\"(?P<verilogname>[A-Za-z_$][A-Za-z0-9_$]*)\s*\((?P<left>[0-9]+)\s*:\s*(?P<right>[0-9]+)\s*\)\"\)\s*(?P<edifsize>[0-9]+)\s*\)' # noqa: E501
arrayid = line.find('(array ')
if arrayid != -1:
# extract whole array declaration
closing = find_closing_bracket(line, arrayid) + 1
tocut = line[arrayid:closing]
arraydef = re.match(arrayregex, tocut)
if not arraydef:
raise Exception(
'Array declaration format not supported: '.format(tocut))
left = int(arraydef.group('left'))
right = int(arraydef.group('right'))
numelements = (left if left > right else right) + 1
variable_base = arraydef.group('name')
orig_var = arraydef.group('verilogname')
if variable_base in arraysizes:
raise Exception(
'There is already an array with name "{}" declared'.format(
variable_base))
arraysizes[variable_base] = numelements
if left == right == 0:
entrydef = '(rename {} "{}({})")'.format(
variable_base,
orig_var,
0)
newline = line.replace(tocut, entrydef)
yield newline
else:
for i in range(numelements):
entrydef = '(rename {}_{}_ "{}({})")'.format(
variable_base,
i,
orig_var,
i)
newline = line.replace(tocut, entrydef)
yield newline
else:
memberid = line.find('(member ')
if memberid != -1:
closing = find_closing_bracket(line, memberid) + 1
tocut = line[memberid:closing]
tokens = tocut.split(' ')
variable_base = tokens[1]
index = int(tokens[2][:-1])
entrydef = '{}_{}_'.format(
variable_base,
arraysizes[variable_base] - index - 1)
newline = line.replace(tocut, entrydef)
yield newline
else:
yield line
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("input",
help="EDIF file containing the design",
type=Path)
parser.add_argument("output",
help="Output EDIF file",
type=Path)
args = parser.parse_args()
luttype = -1
lutlines = []
with open(args.input, 'r') as infile:
# since definition of the LUT cells are multi-line, this needs to
# be handled this way
for line in infile:
if '(instance ' in line: # new instance in EDIF
luttype = -1
elif '(cellRef LUT' in line:
s = '(cellRef LUT' # the new instance is LUT
numloc = line.find(s) + len(s)
luttype = int(line[numloc:].split(' ')[0])
elif '(property INIT' in line and luttype > 0:
intpre = '(integer ' # look for integer field for INIT
initdef = line.find(intpre)
if initdef == -1: # otherwise look for string field
intpre = '(string "'
initdef = line.find(intpre)
# remove the ending characters for field
initdefdel = '")' if intpre == '(string "' else ')'
initdefend = line.find(initdefdel, initdef)
# extract the number in decimal or hexadecimal notation
num = line[initdef + len(intpre):initdefend]
# compute pure hexadecimal notation
newval = convert_lut_init_to_hex(num)
# add updated LUT INIT value
line = line.replace(
line[initdef:initdefend + len(initdefdel)],
'(string "{}")'.format(newval))
lutlines.append(line)
lines = []
arraysizes = {}
for line in lutlines:
for newline in fix_array_line(line, arraysizes):
lines.append(newline)
with open(args.output, 'w') as outfile:
outfile.writelines(lines)
| 35.934783
| 217
| 0.553085
|
e86a83da2e2e4e6aed077e21c19f23f2e56d8b0a
| 11,340
|
py
|
Python
|
built-in/TensorFlow/Official/cv/image_classification/ResNet50_ID0360_for_TensorFlow2.X/tensorflow/tf2_common/training/utils.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 12
|
2020-12-13T08:34:24.000Z
|
2022-03-20T15:17:17.000Z
|
built-in/TensorFlow/Official/cv/image_classification/ResNet50_ID0360_for_TensorFlow2.X/tensorflow/tf2_common/training/utils.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 1
|
2022-01-20T03:11:05.000Z
|
2022-01-20T06:53:39.000Z
|
built-in/TensorFlow/Official/cv/image_classification/ResNet50_ID0360_for_TensorFlow2.X/tensorflow/tf2_common/training/utils.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 2
|
2021-07-10T12:40:46.000Z
|
2021-12-17T07:55:15.000Z
|
#!/usr/bin/env python
# coding=utf-8
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Some layered modules/functions to help users writing custom training loop."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import inspect
import six
import tensorflow as tf
def create_loop_fn(step_fn):
"""Creates a multiple steps function driven by the python while loop.
Args:
step_fn: A function which takes `iterator` as input.
Returns:
A callable defined as the `loop_fn` defination below.
"""
def loop_fn(iterator, num_steps, state=None, reduce_fn=None):
"""A loop function with multiple steps.
Args:
iterator: A nested structure of tf.data `Iterator` or
`DistributedIterator`.
num_steps: The number of steps in the loop. If `num_steps==-1`, will
iterate until exausting the iterator.
state: An optional initial state before running the loop.
reduce_fn: a callable defined as `def reduce_fn(state, value)`, where
`value` is the outputs from `step_fn`.
Returns:
The updated state.
"""
try:
step = 0
while (num_steps == -1 or step < num_steps):
outputs = step_fn(iterator)
if reduce_fn is not None:
state = reduce_fn(state, outputs)
step += 1
return state
except (StopIteration, tf.errors.OutOfRangeError):
return state
return loop_fn
def create_tf_while_loop_fn(step_fn):
"""Create a multiple steps function driven by tf.while_loop on the host.
Args:
step_fn: A function which takes `iterator` as input.
Returns:
A callable defined as the `loop_fn` defination below.
"""
@tf.function
def loop_fn(iterator, num_steps):
"""A loop function with multiple steps.
Args:
iterator: A nested structure of tf.data `Iterator` or
`DistributedIterator`.
num_steps: The number of steps in the loop. Must be a tf.Tensor.
"""
if not isinstance(num_steps, tf.Tensor):
raise ValueError("`num_steps` should be an `tf.Tensor`. Python object "
"may cause retracing.")
for _ in tf.range(num_steps):
step_fn(iterator)
return loop_fn
def make_distributed_dataset(strategy, dataset_or_fn, *args, **kwargs):
"""A helper function to create distributed dataset.
Args:
strategy: An instance of `tf.distribute.Strategy`.
dataset_or_fn: A instance of `tf.data.Dataset` or a function which takes an
`tf.distribute.InputContext` as input and returns a `tf.data.Dataset`. If
it is a function, it could optionally have an argument named
`input_context` which is `tf.distribute.InputContext` argument type.
*args: The list of arguments to be passed to dataset_or_fn.
**kwargs: Any keyword arguments to be passed.
Returns:
A distributed Dataset.
"""
if strategy is None:
strategy = tf.distribute.get_strategy()
if isinstance(dataset_or_fn, tf.data.Dataset):
return strategy.experimental_distribute_dataset(dataset_or_fn)
if not callable(dataset_or_fn):
raise ValueError("`dataset_or_fn` should be either callable or an instance "
"of `tf.data.Dataset`")
def dataset_fn(ctx):
"""Wrapped dataset function for creating distributed dataset.."""
# If `dataset_or_fn` is a function and has `input_context` as argument
# names, pass `ctx` as the value of `input_context` when calling
# `dataset_or_fn`. Otherwise `ctx` will not be used when calling
# `dataset_or_fn`.
if six.PY3:
argspec = inspect.getfullargspec(dataset_or_fn)
else:
argspec = inspect.getargspec(dataset_or_fn)
args_names = argspec.args
if "input_context" in args_names:
kwargs["input_context"] = ctx
ds = dataset_or_fn(*args, **kwargs)
return ds
return strategy.experimental_distribute_datasets_from_function(dataset_fn)
class SummaryManager(object):
"""A class manages writing summaries."""
def __init__(self,
summary_writer,
summary_fn,
global_step=None,
summary_interval=None):
"""Construct a summary manager object.
Args:
summary_writer: A `tf.summary.SummaryWriter` instance for writing
summaries.
summary_fn: A callable defined as `def summary_fn(name, tensor,
step=None)`, which describes the summary operation.
global_step: A `tf.Variable` instance for checking the current global step
value, in case users want to save summaries every N steps.
summary_interval: An integer, indicates the minimum step interval between
two summaries.
"""
if summary_writer is not None:
self._summary_writer = summary_writer
self._enabled = True
else:
self._summary_writer = tf.summary.create_noop_writer()
self._enabled = False
self._summary_fn = summary_fn
if global_step is None:
self._global_step = tf.summary.experimental.get_step()
else:
self._global_step = global_step
if summary_interval is not None:
if self._global_step is None:
raise ValueError("`summary_interval` is not None, but no `global_step` "
"can be obtained ")
self._last_summary_step = self._global_step.numpy()
self._summary_interval = summary_interval
@property
def summary_interval(self):
return self._summary_interval
@property
def summary_writer(self):
"""Returns the underlying summary writer."""
return self._summary_writer
def write_summaries(self, items, always_write=True):
"""Write a bulk of summaries.
Args:
items: a dictionary of `Tensors` for writing summaries.
always_write: An optional boolean. If `True`, the manager will always
write summaries unless the summaries have been written for the same
step. Otherwise the manager will only write the summaries if the
interval between summaries are larger than `summary_interval`.
Returns:
A boolean indicates whether the summaries are written or not.
"""
# TODO(rxsang): Support writing summaries with nested structure, so users
# can split the summaries into different directories for nicer visualization
# in Tensorboard, like train and eval metrics.
if not self._enabled:
return False
if self._summary_interval is not None:
current_step = self._global_step.numpy()
if current_step == self._last_summary_step:
return False
if not always_write and current_step < (self._last_summary_step +
self._summary_interval):
return False
self._last_summary_step = current_step
with self._summary_writer.as_default():
for name, tensor in items.items():
self._summary_fn(name, tensor, step=self._global_step)
return True
@six.add_metaclass(abc.ABCMeta)
class Trigger(object):
"""An abstract class representing a "trigger" for some event."""
@abc.abstractmethod
def __call__(self, value: float, force_trigger=False):
"""Maybe trigger the event based on the given value.
Args:
value: the value for triggering.
force_trigger: Whether the trigger is forced triggered.
Returns:
`True` if the trigger is triggered on the given `value`, and
`False` otherwise.
"""
@abc.abstractmethod
def reset(self):
"""Reset states in the trigger."""
class IntervalTrigger(Trigger):
"""Triggers on every fixed interval."""
def __init__(self, interval, start=0):
"""Constructs the IntervalTrigger.
Args:
interval: The triggering interval.
start: An initial value for the trigger.
"""
self._interval = interval
self._last_trigger_value = start
def __call__(self, value, force_trigger=False):
"""Maybe trigger the event based on the given value.
Args:
value: the value for triggering.
force_trigger: If True, the trigger will be forced triggered unless the
last trigger value is equal to `value`.
Returns:
`True` if the trigger is triggered on the given `value`, and
`False` otherwise.
"""
if force_trigger and value != self._last_trigger_value:
self._last_trigger_value = value
return True
if self._interval and self._interval > 0:
if value >= self._last_trigger_value + self._interval:
self._last_trigger_value = value
return True
return False
def reset(self):
"""See base class."""
self._last_trigger_value = 0
class EpochHelper(object):
"""A Helper class to handle epochs in Customized Training Loop."""
def __init__(self, epoch_steps, global_step):
"""Constructs the EpochHelper.
Args:
epoch_steps: An integer indicates how many steps in an epoch.
global_step: A `tf.Variable` instance indicates the current global step.
"""
self._epoch_steps = epoch_steps
self._global_step = global_step
self._current_epoch = None
self._epoch_start_step = None
self._in_epoch = False
def epoch_begin(self):
"""Returns whether a new epoch should begin."""
if self._in_epoch:
return False
current_step = self._global_step.numpy()
self._epoch_start_step = current_step
self._current_epoch = current_step // self._epoch_steps
self._in_epoch = True
return True
def epoch_end(self):
"""Returns whether the current epoch should end."""
if not self._in_epoch:
raise ValueError("`epoch_end` can only be called inside an epoch")
current_step = self._global_step.numpy()
epoch = current_step // self._epoch_steps
if epoch > self._current_epoch:
self._in_epoch = False
return True
return False
@property
def batch_index(self):
"""Index of the next batch within the current epoch."""
return self._global_step.numpy() - self._epoch_start_step
@property
def current_epoch(self):
return self._current_epoch
| 32.4
| 80
| 0.686684
|
dfc6fcd167a6937d6eb27e24f74ee74f81447e32
| 655
|
py
|
Python
|
tests/regression_test/markdown_snippets.py
|
plaflamme/mdbook-plantuml
|
f6814b44cbddf856ef2120557a4ff3c1cf72f19f
|
[
"MIT"
] | null | null | null |
tests/regression_test/markdown_snippets.py
|
plaflamme/mdbook-plantuml
|
f6814b44cbddf856ef2120557a4ff3c1cf72f19f
|
[
"MIT"
] | null | null | null |
tests/regression_test/markdown_snippets.py
|
plaflamme/mdbook-plantuml
|
f6814b44cbddf856ef2120557a4ff3c1cf72f19f
|
[
"MIT"
] | null | null | null |
class Snippet:
def __init__(self, code):
self.plantuml_code = code.strip()
self.markdown = "```plantuml\n{}\n```".format(self.plantuml_code)
ab_class_diagram = Snippet("""\
@startuml
A --|> B
@enduml
""")
cd_class_diagram = Snippet("""\
@startuml
C --|> D
@enduml
""")
ditaa = Snippet("""\
@startditaa
+--------+ +-------+ +-------+
| +---+ ditaa +--> | |
| Text | +-------+ |diagram|
|Document| |!magic!| | |
| {d}| | | | |
+---+----+ +-------+ +-------+
: ^
| Lots of work |
+-------------------------+
@endditaa
""")
| 19.264706
| 73
| 0.381679
|
7fe2bbaa096002ac6b37c5634f64f7e10284e0ab
| 3,810
|
py
|
Python
|
fastai/collab.py
|
fish5421/fastai_update
|
c3dbdfba59512b5004093119f7676f224eb1d15c
|
[
"Apache-2.0"
] | 1
|
2019-12-18T22:49:21.000Z
|
2019-12-18T22:49:21.000Z
|
fastai/collab.py
|
fish5421/fastai_update
|
c3dbdfba59512b5004093119f7676f224eb1d15c
|
[
"Apache-2.0"
] | null | null | null |
fastai/collab.py
|
fish5421/fastai_update
|
c3dbdfba59512b5004093119f7676f224eb1d15c
|
[
"Apache-2.0"
] | 1
|
2019-01-12T17:43:19.000Z
|
2019-01-12T17:43:19.000Z
|
"Module support for Collaborative Filtering"
from .torch_core import *
from .basic_train import *
from .data import *
from .layers import *
__all__ = ['CollabFilteringDataset', 'EmbeddingDotBias', 'get_collab_learner']
@dataclass
class CollabFilteringDataset(DatasetBase):
"Base dataset for collaborative filtering."
user:Series
item:Series
ratings:np.ndarray
def __post_init__(self):
self.user_ids = np.array(self.user.cat.codes, dtype=np.int64)
self.item_ids = np.array(self.item.cat.codes, dtype=np.int64)
def __len__(self)->int: return len(self.ratings)
def __getitem__(self, idx:int)->Tuple[Tuple[int,int],float]:
return (self.user_ids[idx],self.item_ids[idx]), self.ratings[idx]
@property
def c(self) -> int: return 1
@property
def n_user(self)->int: return len(self.user.cat.categories)
@property
def n_item(self)->int: return len(self.item.cat.categories)
@classmethod
def from_df(cls, rating_df:DataFrame, pct_val:float=0.2, user_name:Optional[str]=None, item_name:Optional[str]=None,
rating_name:Optional[str]=None) -> Tuple['ColabFilteringDataset','ColabFilteringDataset']:
"Split a given dataframe in a training and validation set."
if user_name is None: user_name = rating_df.columns[0]
if item_name is None: item_name = rating_df.columns[1]
if rating_name is None: rating_name = rating_df.columns[2]
user = rating_df[user_name]
item = rating_df[item_name]
ratings = np.array(rating_df[rating_name], dtype=np.float32)
idx = np.random.permutation(len(ratings))
if pct_val is None: return cls(user, item, ratings)
cut = int(pct_val * len(ratings))
return (cls(user[idx[cut:]], item[idx[cut:]], ratings[idx[cut:]]),
cls(user[idx[:cut]], item[idx[:cut]], ratings[idx[:cut]]))
@classmethod
def from_csv(cls, csv_name:str, **kwargs) -> Tuple['ColabFilteringDataset','ColabFilteringDataset']:
"Split a given table in a csv in a training and validation set."
df = pd.read_csv(csv_name)
return cls.from_df(df, **kwargs)
class EmbeddingDotBias(nn.Module):
"Base model for callaborative filtering."
def __init__(self, n_factors:int, n_users:int, n_items:int, min_score:float=None, max_score:float=None):
super().__init__()
self.min_score,self.max_score = min_score,max_score
(self.u_weight, self.i_weight, self.u_bias, self.i_bias) = [get_embedding(*o) for o in [
(n_users, n_factors), (n_items, n_factors), (n_users,1), (n_items,1)
]]
def forward(self, users:LongTensor, items:LongTensor) -> Tensor:
dot = self.u_weight(users)* self.i_weight(items)
res = dot.sum(1) + self.u_bias(users).squeeze() + self.i_bias(items).squeeze()
if self.min_score is None: return res
return torch.sigmoid(res) * (self.max_score-self.min_score) + self.min_score
def get_collab_learner(ratings:DataFrame, n_factors:int, pct_val:float=0.2, user_name:Optional[str]=None,
item_name:Optional[str]=None, rating_name:Optional[str]=None, test:DataFrame=None, metrics=None,
min_score:float=None, max_score:float=None, loss_fn:LossFunction=F.mse_loss, **kwargs) -> Learner:
"Create a Learner for collaborative filtering."
datasets = list(CollabFilteringDataset.from_df(ratings, pct_val, user_name, item_name, rating_name))
if test is not None:
datasets.append(CollabFilteringDataset.from_df(test, None, user_name, item_name, rating_name))
data = DataBunch.create(*datasets, **kwargs)
model = EmbeddingDotBias(n_factors, datasets[0].n_user, datasets[0].n_item, min_score, max_score)
return Learner(data, model, loss_fn=loss_fn, metrics=metrics)
| 47.625
| 120
| 0.693176
|
40ade410d871b79f8a7d9178ba86c69eff8a674c
| 4,377
|
py
|
Python
|
gunicorn/app/django_wsgi.py
|
chalkchisel/gunicorn
|
4d87f1696202fcf1f54dbaee1d86bb2638865f34
|
[
"MIT"
] | null | null | null |
gunicorn/app/django_wsgi.py
|
chalkchisel/gunicorn
|
4d87f1696202fcf1f54dbaee1d86bb2638865f34
|
[
"MIT"
] | null | null | null |
gunicorn/app/django_wsgi.py
|
chalkchisel/gunicorn
|
4d87f1696202fcf1f54dbaee1d86bb2638865f34
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
""" module used to build the django wsgi application """
import os
import re
import sys
import time
try:
from io import StringIO
from imp import reload
except ImportError:
from StringIO import StringIO
from django.conf import settings
from django.core.management.validation import get_validation_errors
from django.utils import translation
try:
from django.core.servers.basehttp import get_internal_wsgi_application
django14 = True
except ImportError:
from django.core.handlers.wsgi import WSGIHandler
django14 = False
from gunicorn import util
def make_wsgi_application():
# validate models
s = StringIO()
if not getattr(settings, "DISABLE_GUNICORN_VALIDATION", False) and get_validation_errors(s):
s.seek(0)
error = s.read()
sys.stderr.write("One or more models did not validate:\n%s" % error)
sys.stderr.flush()
sys.exit(1)
translation.activate(settings.LANGUAGE_CODE)
if django14:
return get_internal_wsgi_application()
return WSGIHandler()
def reload_django_settings():
mod = util.import_module(os.environ['DJANGO_SETTINGS_MODULE'])
# reload module
reload(mod)
# reload settings.
# USe code from django.settings.Settings module.
# Settings that should be converted into tuples if they're mistakenly entered
# as strings.
tuple_settings = ("INSTALLED_APPS", "TEMPLATE_DIRS")
for setting in dir(mod):
if setting == setting.upper():
setting_value = getattr(mod, setting)
if setting in tuple_settings and type(setting_value) == str:
setting_value = (setting_value,) # In case the user forgot the comma.
setattr(settings, setting, setting_value)
# Expand entries in INSTALLED_APPS like "django.contrib.*" to a list
# of all those apps.
new_installed_apps = []
for app in settings.INSTALLED_APPS:
if app.endswith('.*'):
app_mod = util.import_module(app[:-2])
appdir = os.path.dirname(app_mod.__file__)
app_subdirs = os.listdir(appdir)
name_pattern = re.compile(r'[a-zA-Z]\w*')
for d in sorted(app_subdirs):
if (name_pattern.match(d) and
os.path.isdir(os.path.join(appdir, d))):
new_installed_apps.append('%s.%s' % (app[:-2], d))
else:
new_installed_apps.append(app)
setattr(settings, "INSTALLED_APPS", new_installed_apps)
if hasattr(time, 'tzset') and settings.TIME_ZONE:
# When we can, attempt to validate the timezone. If we can't find
# this file, no check happens and it's harmless.
zoneinfo_root = '/usr/share/zoneinfo'
if (os.path.exists(zoneinfo_root) and not
os.path.exists(os.path.join(zoneinfo_root,
*(settings.TIME_ZONE.split('/'))))):
raise ValueError("Incorrect timezone setting: %s" %
settings.TIME_ZONE)
# Move the time zone info into os.environ. See ticket #2315 for why
# we don't do this unconditionally (breaks Windows).
os.environ['TZ'] = settings.TIME_ZONE
time.tzset()
# Settings are configured, so we can set up the logger if required
if getattr(settings, 'LOGGING_CONFIG', False):
# First find the logging configuration function ...
logging_config_path, logging_config_func_name = settings.LOGGING_CONFIG.rsplit('.', 1)
logging_config_module = util.import_module(logging_config_path)
logging_config_func = getattr(logging_config_module, logging_config_func_name)
# ... then invoke it with the logging settings
logging_config_func(settings.LOGGING)
def make_command_wsgi_application(admin_mediapath):
reload_django_settings()
try:
from django.core.servers.basehttp import AdminMediaHandler
return AdminMediaHandler(make_wsgi_application(), admin_mediapath)
except ImportError:
return make_wsgi_application()
| 36.475
| 98
| 0.64085
|
2848ad32094cfe2ff7340ab3e1ecb3055657b743
| 2,566
|
py
|
Python
|
neo/rawio/tests/test_neuralynxrawio.py
|
lkoelman/python-neo
|
6b0454519b4ead6605d3ce4100a07c33f57df830
|
[
"BSD-3-Clause"
] | 1
|
2020-01-13T16:06:56.000Z
|
2020-01-13T16:06:56.000Z
|
neo/rawio/tests/test_neuralynxrawio.py
|
lkoelman/python-neo
|
6b0454519b4ead6605d3ce4100a07c33f57df830
|
[
"BSD-3-Clause"
] | 8
|
2018-06-02T11:46:10.000Z
|
2018-09-04T15:51:45.000Z
|
src/neo/neo/rawio/tests/test_neuralynxrawio.py
|
grg2rsr/SeqPeelSort
|
58a207976fb33a50ea8e42b70d7da73b03474f42
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# needed for python 3 compatibility
from __future__ import unicode_literals, print_function, division, absolute_import
import unittest
from neo.rawio.neuralynxrawio import NeuralynxRawIO
from neo.rawio.tests.common_rawio_test import BaseTestRawIO
import logging
logging.getLogger().setLevel(logging.INFO)
class TestNeuralynxRawIO(BaseTestRawIO, unittest.TestCase, ):
rawioclass = NeuralynxRawIO
entities_to_test = [
'Cheetah_v5.5.1/original_data',
'Cheetah_v5.6.3/original_data',
'Cheetah_v5.7.4/original_data',
]
files_to_download = [
'Cheetah_v5.5.1/original_data/CheetahLogFile.txt',
'Cheetah_v5.5.1/original_data/CheetahLostADRecords.txt',
'Cheetah_v5.5.1/original_data/Events.nev',
'Cheetah_v5.5.1/original_data/STet3a.nse',
'Cheetah_v5.5.1/original_data/STet3b.nse',
'Cheetah_v5.5.1/original_data/Tet3a.ncs',
'Cheetah_v5.5.1/original_data/Tet3b.ncs',
'Cheetah_v5.5.1/plain_data/STet3a.txt',
'Cheetah_v5.5.1/plain_data/STet3b.txt',
'Cheetah_v5.5.1/plain_data/Tet3a.txt',
'Cheetah_v5.5.1/plain_data/Tet3b.txt',
'Cheetah_v5.5.1/plain_data/Events.txt',
'Cheetah_v5.5.1/README.txt',
'Cheetah_v5.6.3/original_data/CheetahLogFile.txt',
'Cheetah_v5.6.3/original_data/CheetahLostADRecords.txt',
'Cheetah_v5.6.3/original_data/Events.nev',
'Cheetah_v5.6.3/original_data/CSC1.ncs',
'Cheetah_v5.6.3/original_data/CSC2.ncs',
'Cheetah_v5.6.3/original_data/TT1.ntt',
'Cheetah_v5.6.3/original_data/TT2.ntt',
'Cheetah_v5.6.3/original_data/VT1.nvt',
'Cheetah_v5.6.3/plain_data/Events.txt',
'Cheetah_v5.6.3/plain_data/CSC1.txt',
'Cheetah_v5.6.3/plain_data/CSC2.txt',
'Cheetah_v5.6.3/plain_data/TT1.txt',
'Cheetah_v5.6.3/plain_data/TT2.txt',
'Cheetah_v5.7.4/original_data/CSC1.ncs',
'Cheetah_v5.7.4/original_data/CSC2.ncs',
'Cheetah_v5.7.4/original_data/CSC3.ncs',
'Cheetah_v5.7.4/original_data/CSC4.ncs',
'Cheetah_v5.7.4/original_data/CSC5.ncs',
'Cheetah_v5.7.4/original_data/Events.nev',
'Cheetah_v5.7.4/plain_data/CSC1.txt',
'Cheetah_v5.7.4/plain_data/CSC2.txt',
'Cheetah_v5.7.4/plain_data/CSC3.txt',
'Cheetah_v5.7.4/plain_data/CSC4.txt',
'Cheetah_v5.7.4/plain_data/CSC5.txt',
'Cheetah_v5.7.4/plain_data/Events.txt',
'Cheetah_v5.7.4/README.txt']
if __name__ == "__main__":
unittest.main()
| 38.298507
| 82
| 0.676539
|
e9be373f642f905e409db864c3453e849257f2ed
| 19,928
|
py
|
Python
|
loewieec_sync_hk/bk20170427/sale.py
|
lester-lees/extra_addons_hk
|
edd2c2595146bc9c99b75a2d0831a93f940fa55c
|
[
"Apache-2.0"
] | null | null | null |
loewieec_sync_hk/bk20170427/sale.py
|
lester-lees/extra_addons_hk
|
edd2c2595146bc9c99b75a2d0831a93f940fa55c
|
[
"Apache-2.0"
] | null | null | null |
loewieec_sync_hk/bk20170427/sale.py
|
lester-lees/extra_addons_hk
|
edd2c2595146bc9c99b75a2d0831a93f940fa55c
|
[
"Apache-2.0"
] | null | null | null |
# -*- encoding: utf-8 -*-
from openerp.osv import fields,osv
import datetime
from openpyxl.reader.excel import load_workbook
import os
import re
from openerp import tools
class product_tmalljd(osv.osv):
_name = "product.tmalljd"
#_inherits = {'product.product': 'product_id'}
def _get_ean13(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for line in self.pool.get('product.tmalljd').browse(cr, uid, ids, context=context):
if line.erp_product_id : result[line.id] = line.erp_product_id.ean13 or line.erp_product_id.default_code
return result
def _get_stock(self, cr, uid, ids, field_name, arg, context=None):
result = {}
domain_products = [('location_id','=',38)]
quants = self.pool.get('stock.quant').read_group(cr, uid, domain_products, ['product_id', 'qty'], ['product_id'], context=context)
quants = dict(map(lambda x: (x['product_id'][0], x['qty']), quants))
for line in self.pool.get('product.tmalljd').browse(cr, uid, ids, context=context):
id = line.id
if line.erp_product_id :
pid = line.erp_product_id.id
result[id] = quants.get(pid, 0.0)
else:
result[id] = 0
return result
_columns = {
'erp_product_id': fields.many2one('product.product','ERP Name'),
'erp_ean13': fields.char('ERP_EAN13'), #fields.function(_get_ean13,type='char',string='ERP_EAN13'),
'erp_stock': fields.float('ERP_Stock'),#fields.function(_get_stock,type='float',string='ERP库存'),
'ec_shop_id': fields.many2one('loewieec.shop', u'店铺'),
'ec_num_iid': fields.char(u'电商数字编码'),
'ec_sku_id': fields.char(u'SKU编码'),
'ec_title':fields.char(u'商品标题'),
'ec_price':fields.float(u'售价'),
'ec_color':fields.char(u'颜色'),
'ec_ean13': fields.char(u'条形码'),
'ec_brand': fields.char(u'品牌'),
'ec_qty': fields.integer(u'EC数量'),
'ec_outer_code': fields.char(u'商家外部编码'),
'ec_product_name': fields.char(u'产品名称'),
'ec_product_id': fields.char(u'EC产品ID'),
'ec_num_custom':fields.char(u'海关代码'),
}
class loewieec_error(osv.osv):
_name = "loewieec.error"
_columns = {
'shop_id': fields.many2one('loewieec.shop', u'店铺'),
'name': fields.char(u'错误信息'),
}
class sale_order_line(osv.osv):
_inherit = "sale.order.line"
_columns = {
'logistic_sent': fields.related('coe_no', 'logistic_sent', type='boolean', string=u'已同步运单?',readonly=True),
'coe_no': fields.many2one('sale.coe',string=u'COE单号'),
'tmi_jdi_no': fields.char(string=u'电商单号'),
'buyer_nick': fields.char(u'买家昵称'),
'pay_time': fields.datetime(u'EC支付时间'),
'create_time_tmjd': fields.datetime(u'EC创建时间'),
}
def copy_sale_order_line(self, cr, uid, ids, context=None):
for line in self.pool.get('sale.order.line').browse(cr, uid, ids, context=context):
line.copy()
class sale_order(osv.osv):
_name = "sale.order"
_inherit = "sale.order"
_columns = {
'express_ids': fields.related('order_line', 'coe_no', type='many2one', relation='sale.coe', string=u'TMI_JDI收货人'),
'tmi_jdi_nos': fields.related('order_line', 'tmi_jdi_no', type='char', string='TMI_JDI_NO'),
'selected': fields.boolean('Selected'),
'shop_id': fields.many2one('loewieec.shop', string=u"EC店铺名", readonly=True),
'sale_code': fields.char(u'EC单号', readonly=True),
'tid': fields.char(u'交易单号', readonly=True),
'buyer_nick': fields.char(u'买家昵称'),
'order_state': fields.selection([
('WAIT_SELLER_SEND_GOODS', u'等待卖家发货'),
('WAIT_BUYER_CONFIRM_GOODS', u'等待买家确认收货'),
('TRADE_FINISHED', u'交易成功'),
('TRADE_CLOSED', u'交易关闭'),
], u'订单状态'),
}
def update_orders_seller_memo(self, cr, uid, ids, context=None):
sale_order_obj = self.pool.get('sale.order').browse(cr,uid,ids[0],context=context)
shop = sale_order_obj.shop_id
if not shop : return False
if shop.code == 'JDI' :
raise osv.except_osv(u'错误',u'''JDI京东国际订单无需更新备注''')
return False
statement = "select tmi_jdi_no from sale_order_line where order_id=%d group by tmi_jdi_no" % ids[0]
cr.execute(statement)
tids = [item[0] for item in cr.fetchall()]
if not tids : return False
return shop.update_orders_seller_memo(context=context, tids=tids)
def delete_lines_of_tmijdi_no(self, cr, uid, ids, context=None): # 完整删除 天猫京东 订单的 行
sale_order_obj = self.pool.get('sale.order').browse(cr,uid,ids[0],context=context)
note = sale_order_obj.note or ''
tmijdi_nos = note.strip().split(',')
tmijdi_no_list = []
for tmijdi_no in tmijdi_nos:
if tmijdi_no.strip() != '': tmijdi_no_list.append( tmijdi_no.strip() )
statement = "delete from sale_order_line where order_id=%d and tmi_jdi_no in (%s)" % ( ids[0], ("'" + """','""".join(tmijdi_no_list) + "'") )
cr.execute(statement)
val = val1 = 0.0
cur = sale_order_obj.pricelist_id.currency_id
for line in sale_order_obj.order_line:
val1 += line.price_subtotal
val += self._amount_line_tax(cr, uid, line, context=context)
cur_obj = self.pool.get('res.currency')
amount_tax = cur_obj.round(cr, uid, cur, val)
amount_untaxed = cur_obj.round(cr, uid, cur, val1)
amount_total = amount_untaxed + amount_tax
sale_order_obj.write({'amount_tax':amount_tax, 'amount_untaxed': amount_untaxed,'amount_total':amount_total})
def delete_multi_gift_lines(self, cr, uid, ids, context=None):
sale_order_obj = self.pool.get('sale.order').browse(cr,uid,ids[0],context=context)
gift_product_id = sale_order_obj.shop_id.gift_product_id.id
coe_list = []
delete_list = []
for line in sale_order_obj.order_line.filtered(lambda r: r.product_id.id == gift_product_id):
if line.coe_no.name in coe_list :
delete_list.append( line.coe_no.name )
line.unlink()
else:
coe_list.append(line.coe_no.name)
if delete_list :
log = sale_order_obj.note or ''
sale_order_obj.note = u"删除了以下运单号的重复赠品行:" + chr(10) + ','.join(delete_list) + chr(10) + log
def delete_no_coeno_lines(self, cr, uid, ids, context=None):
statement = "(select s.id from sale_order_line s left join sale_coe c on s.coe_no=c.id where s.order_id=%d and trim(c.name) not like 'EL%sHK') union (select id from sale_order_line where order_id=%d and coe_no is Null)" % (ids[0], '%', ids[0])
cr.execute(statement)
line_ids = [ item[0] for item in cr.fetchall() ]
sale_line_obj = self.pool.get('sale.order.line').unlink(cr,uid, line_ids,context=context)
def update_waybill_no(self, cr, uid, ids, context=None):
sale_order_obj = self.pool.get('sale.order').browse(cr,uid,ids[0],context=context)
shop = sale_order_obj.shop_id
if not shop : return False
if shop.code == 'JDI' :
return shop.jdi_order_delivery(salesorder=sale_order_obj, context=context)
return shop.update_tmall_waybill(context=context, salesorder=sale_order_obj)
def view_express_data(self, cr, uid, ids, context=None):
sale_order_obj = self.pool.get('sale.order').browse(cr,uid,ids[0],context=context)
if not sale_order_obj :
raise osv.except_osv(u'Sale order 错误',u'''请先保存销售单草稿''')
return False
sale_order_line_ids = self.pool.get('sale.order.line').search(cr,uid,[('order_id','=',ids[0])],context=context)
if len(sale_order_line_ids)< 1: return False
eids = self.pool.get('sale.order.line').read(cr,uid,sale_order_line_ids,['coe_no'],context=context)
express_ids = [ eid['coe_no'] and eid['coe_no'][0] for eid in eids ]
customer_id = sale_order_obj.partner_id.id
sale_coe_obj = self.pool.get('sale.coe')
platform = sale_order_obj.shop_id.code
if len(express_ids)>0:
for express_obj in sale_coe_obj.browse(cr,uid,express_ids,context=context):
express_obj.sale_id = ids[0]
express_obj.customer = customer_id
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'loewieec_sync_hk', 'action_loewieec_salecoe')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
result['domain'] = [('id','in',express_ids)]
result['res_id'] = express_ids
result['context'] = {'default_sale_id':ids[0],'default_customer':customer_id}
return result
class sale_coe(osv.osv):
_name = "sale.coe"
_columns = {
'logistic_sent': fields.boolean(u'已同步运单?',default=False, readonly=True, copy=False),
'sale_id': fields.many2one('sale.order', string='Sales Order', readonly=True, states={'draft': [('readonly', False)]} , copy=False),
'picking_id': fields.many2one('stock.picking',string='Picing Order', readonly=True, states={'draft': [('readonly', False)]}, copy=False),
'customer': fields.many2one('res.partner',string=u'客户', readonly=True, states={'draft': [('readonly', False)]}, copy=False),
'tmi_jdi_no': fields.char(string='TMI JDI NO', readonly=True, states={'draft': [('readonly', False)]}),
'name':fields.char(string='COE NO', readonly=True, states={'draft': [('readonly', False)]}),
'receive_name': fields.char(string='Receive Name', readonly=True, states={'draft': [('readonly', False)]}),
'tel': fields.char(string='Cell Phone', readonly=True, states={'draft': [('readonly', False)]}),
'telephone': fields.char(string='Telephone', readonly=True, states={'draft': [('readonly', False)]}),
'province': fields.char(string='Province', readonly=True, states={'draft': [('readonly', False)]}),
'city': fields.char(string='City', readonly=True, states={'draft': [('readonly', False)]}),
'county': fields.char(string='County', readonly=True, states={'draft': [('readonly', False)]}),
'address': fields.char(string='Address', readonly=True, states={'draft': [('readonly', False)]}),
'zip': fields.char(string='Zip', readonly=True, states={'draft': [('readonly', False)]}),
'class_desc': fields.char(string='Desc',default=u'None', readonly=True, states={'draft': [('readonly', False)]}),
'qty': fields.integer(string='Quantity', default=1, readonly=True, states={'draft': [('readonly', False)]}),
'price': fields.float(string='Fee',default=50, readonly=True, states={'draft': [('readonly', False)]}),
'weight': fields.float(string='Weight',default=0.2, readonly=True, states={'draft': [('readonly', False)]}),
'state': fields.selection([('draft',u'草稿'),('done',u'完成')],string='State',default='draft'),
}
class stock_move(osv.osv):
_inherit = "stock.move"
def _get_coe_no(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for move in self.pool.get('stock.move').browse(cr, uid, ids, context=context):
result[move.id] = move.procurement_id.sale_line_id.coe_no.id
return result
_columns = {
#'sale_order_line': fields.function(_get_sale_order_line, type='char',string='Sales Line'),
'coe_no': fields.function(_get_coe_no,type='many2one',relation='sale.coe',string='COE NO'),
}
class stock_picking(osv.osv):
_inherit = "stock.picking"
def get_full_path(self, cr, uid, path):
# sanitize ath
path = re.sub('[.]', '', path)
path = path.strip('/\\')
return os.path.join(tools.config.filestore(cr.dbname), path)
def import_moves_from_excel(self, cr, uid, ids, context=None):
attachment_obj = self.pool.get('ir.attachment')
attachment_id = attachment_obj.search(cr,uid,[('res_id', '=', ids[0])], context=context)
if len(attachment_id)<1: return False
attach = attachment_obj.browse(cr,uid,attachment_id[0],context=context)
fname = attach.store_fname
display_name = attach.name
if not fname : return False
fname = self.get_full_path(cr, uid, fname)
wb = load_workbook(filename=fname)
#ws = wb.get_sheet_by_name("Sheet1")
ws = wb.get_sheet_by_name(wb.get_sheet_names()[0])
highest_row = ws.get_highest_row()
highest_col = ws.get_highest_column()
title_name = ws.cell(row = 0,column = 0).value
title_quantity = ws.cell(row = 0,column = 1).value
if highest_col < 2 or title_name != "name" or title_quantity != "quantity":
raise osv.except_osv(u'Excel错误',u'''文件:%s 格式不正确.''' % display_name)
row_start = 1
lines = []
product_obj = self.pool.get('product.product')
while row_start < highest_row :
name = ws.cell(row=row_start,column=0).value
name = name.strip()
qty_tmp = ws.cell(row=row_start,column=1)
quantity = qty_tmp.get_original_value() or 1
product_ids = product_obj.search(cr, uid, [('name_template','=',name)], context=context)
if not product_ids : raise osv.except_osv(u'产品名错误',u'''没有产品: %s 。''' % name)
lines.append((product_ids[0],quantity))
row_start += 1
picking_obj = self.pool.get('stock.picking').browse(cr,uid,ids[0],context=context)
picking_type = picking_obj.picking_type_id
vals = {
'product_id': 0,
'product_uom_qty':1,
'location_dest_id':picking_type.default_location_dest_id.id,
'location_id': picking_type.default_location_src_id.id,
'company_id': picking_obj.company_id.id,
'date':datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
'date_expected':(datetime.datetime.now() + datetime.timedelta(3)).strftime("%Y-%m-%d %H:%M:%S"),
'invoice_state':'none',
'name':'-',
'procure_method':'make_to_stock',
'state':'draft',
'product_uom':1,
'weight_uom_id':1,
'picking_id': ids[0],
}
move_obj = self.pool.get('stock.move')
for line in lines :
vals_move = vals.copy()
vals_move.update({'product_id':line[0], 'product_uom_qty':line[1]})
move_obj.create(cr, uid, vals_move, context=context)
def view_express_data(self, cr, uid, ids, context=None):
stock_picking_obj = self.pool.get('stock.picking').browse(cr,uid,ids[0],context=context)
if not stock_picking_obj.sale_id :
raise osv.except_osv(u'stock.picking 错误',u'''没有销售单与此仓库单有关联''')
return False
order_id = stock_picking_obj.sale_id.id
partner_id = stock_picking_obj.partner_id.id
sale_order_line_ids = self.pool.get('sale.order.line').search(cr,uid,[('order_id','=',order_id)],context=context)
if len(sale_order_line_ids)< 1: return False
eids = self.pool.get('sale.order.line').read(cr,uid,sale_order_line_ids,['coe_no'],context=context)
express_ids = [ eid['coe_no'] and eid['coe_no'][0] for eid in eids ]
if len(express_ids) < 1:
raise osv.except_osv(u'stock.picking 错误',u'''没有快递信息''')
return False
sale_coe_obj = self.pool.get('sale.coe')
for express_obj in sale_coe_obj.browse(cr,uid,express_ids,context=context):
if not express_obj.picking_id:
express_obj.picking_id = ids[0]
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'loewieec_sync_hk', 'action_loewieec_salecoe')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
result['domain'] = [('id','in', express_ids)]
result['res_id'] = express_ids
result['context'] = {'default_sale_id':order_id,'default_customer':partner_id,'default_picking_id':ids[0]}
return result
def create_return_lines_from_coe_no(self, cr, uid, ids, context=None):
picking = self.browse(cr, uid, ids[0], context=context)
coenos = picking.note or ''
if not coenos : return
coenos = coenos.strip().split(',')
coe_list = []
for coe in coenos:
coe = coe.strip()
if coe != '' : coe_list.append( coe )
statement = "select s.product_id, s.product_uom_qty, c.name from sale_order_line s left join sale_coe c on s.coe_no=c.id where s.state='done' and s.coe_no in (select id from sale_coe where name in (%s))" % ("'" + """','""".join(coe_list) + "'")
cr.execute(statement)
res = cr.fetchall()
vals_move = {
'create_uid':uid,
'product_id': 0, #,
'product_uom_qty':0,
'location_dest_id':12,
'location_id':9,
'company_id':1,
'date':datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
'date_expected':(datetime.datetime.now() + datetime.timedelta(3)).strftime("%Y-%m-%d %H:%M:%S"),
'invoice_state':'none',
'name':'-',
'procure_method':'make_to_stock',
'state':'draft',
'product_uom':1,
'weight_uom_id':1,
'picking_id': ids[0],
}
move_obj = self.pool.get('stock.move')
for line in res:
val = vals_move.copy()
val.update({'product_id':line[0],'product_uom_qty':line[1],'name':line[2]})
move_obj.create(cr,uid,val,context=context)
return True
def do_unreserve_no_coe_lines(self, cr, uid, ids, context=None):
picking = self.browse(cr, uid, ids[0], context=context)
if picking.state != 'partially_available': return
quant_obj = self.pool.get("stock.quant")
move_obj = self.pool.get("stock.move")
#waiting_ids = move_obj.search(cr,uid,[('picking_id','=',ids[0]),('state','=','confirmed')],context=context)
coe_list = []
#for move_unreserved in move_obj.browse(cr,uid,waiting_ids,context=context):
for move_unreserved in picking.move_lines.filtered(lambda r: r.state == 'confirmed'):
if move_unreserved.coe_no not in coe_list :
coe_list.append(move_unreserved.coe_no)
#assigned_ids = move_obj.search(cr,uid,[('picking_id','=',ids[0]),('state','=','assigned')],context=context)
#for move in move_obj.browse(cr,uid,assigned_ids,context=context) :
for move in picking.move_lines.filtered(lambda r: r.state == 'assigned'):
if move.coe_no not in coe_list : continue
quant_obj.quants_unreserve(cr, uid, move, context=context)
ancestors = []
move2 = move
while move2:
ancestors += [x.id for x in move2.move_orig_ids]
move2 = not move2.move_orig_ids and move2.split_from or False
if ancestors:
move.write({'state': 'waiting'})
else:
move.write({'state': 'confirmed'})
| 46.344186
| 255
| 0.601666
|
bc4163dbbfffdf72c7aabe4a148a133b86b79c2c
| 96,001
|
py
|
Python
|
tlux/plot.py
|
tchlux/tlux
|
873cf3b1cf1466863f0fb95f23afe149ff89ad79
|
[
"MIT"
] | 1
|
2022-03-30T18:43:25.000Z
|
2022-03-30T18:43:25.000Z
|
tlux/plot.py
|
tchlux/tlux
|
873cf3b1cf1466863f0fb95f23afe149ff89ad79
|
[
"MIT"
] | null | null | null |
tlux/plot.py
|
tchlux/tlux
|
873cf3b1cf1466863f0fb95f23afe149ff89ad79
|
[
"MIT"
] | null | null | null |
# This module serves to provide a simplified interface to *offline*
# python plotly plotting. The user can produce plots without ever
# interacting directly with the dictionary objects that plotly
# expects. This module currently supports 2D and 3D scatter plots with
# numerical axes, histograms, subplots (with varying numbers of plots
# in each row), animations, box-plots, and plot annotations.
#
# Required packages:
# random, numbers, os, webbrowser, sys, re, tempfile
# numpy
# scipy
#
# Imports nested in appropriate functions:
# import plotly
# from scipy.spatial import ConvexHull
# from scipy.spatial import Delaunay
#
# INSTALLATION:
#
# Installation requires a SPECIFIC VERSION OF PLOTLY. Here is the
# standard set of packages required for usage:
#
# pip install scipy
# pip install numpy
# pip install plotly==2.0.15
#
# This package will not work with newer versions of plotly because
# they changed the underlying storage data types for figures.
# Any plotly update for this body of code is unlikely.
#
#
# USAGE:
#
# The available (user accessible) functions are:
#
# plot.Plot -- The primary class for holding / creating plots.
# plot.multiplot -- A mechanism for plotting multiple Plot
# objects in the same window.
# plot.create_html -- A function for generating a local HTML file
# from a figure object (in Plotly terms).
# plot.iplot -- A convenience wrapper for generating
# interactive plots in a Jupyter notebook with
# multiplot functionality as well.
#
# --------------------------------------------------------------------
# DEVELOPER COMMENTS
#
# TODO: Plot.add_zero_line(func)
# TODO: Plot.add_frame(..., persist=True)
# TODO: Adding multiple frames where the first has no edges and the
# rest have edges causes all frames to look like first.
#
# --------------------------------------------------------------------
import random, numbers, os, webbrowser, sys, re, tempfile
import numpy as np
NOTEBOOK_MODE = False # Jupyter notebook mode
PLOT_MARGIN = 50 # In pixels
PLOT_POINTS = 1000 # Number of samples
BRIGHTNESS_RANGE = 0.6 # For default shading of points
RANDOM_SEED = 0 # Seed used for new color generation
MIN_PALETTE_COLORS = 40 # Number of palette entries to create
PREVIOUS_FILE_NAMES = [] # <- for tracking auto-append.
DEFAULT_CAMERA_POSITION = dict(
up=dict(x=0, y=0, z=1),
center=dict(x=0, y=0, z=0),
eye=dict(x=-1.0, y=-2.0, z=0.7)
) # ^^ When vieiwing 3D plots.
# Save the color palette for plotting a gradient
# PALETTE SOURCE: colorlover as cl
# PALETTE SOURCE: np.array(cl.to_numeric(cl.scales['11']['div']['Spectral']))[::-1]
DEFAULT_GRADIENT = np.array([[ 94., 79., 162.],
[ 50., 136., 189.],
[ 102., 194., 165.],
[ 171., 221., 164.],
[ 230., 245., 152.],
[ 255., 255., 191.],
[ 254., 224., 139.],
[ 253., 174., 97.],
[ 244., 109., 67.],
[ 213., 62., 79.],
[ 158., 1., 66.]])
# PALETTE SOURCE: colorlover as cl
# PALETTE SOURCE: np.array(cl.to_numeric(cl.scales['5']['qual']['Set2']))
PALETTE = np.array([[ 102., 194., 165.],
[ 252., 141., 98.],
[ 141., 160., 203.],
[ 231., 138., 195.],
[ 166., 216., 84.]])
PALETTE = PALETTE**2
PALETTE = PALETTE / np.max(PALETTE) * 255
# Re-order the palette so that the colors appear better
PALETTE = np.concatenate((PALETTE[1:], [PALETTE[0]]))
# Expand the palette using random combinations of existing colors
random.seed(RANDOM_SEED)
palette_size = len(PALETTE)
for i in range(MIN_PALETTE_COLORS - palette_size):
# Create lots of extra colors
c = np.array([random.choice(PALETTE[:palette_size,0]),
random.choice(PALETTE[:palette_size,1]),
random.choice(PALETTE[:palette_size,2])])
# Add this new random color to the palette
PALETTE = np.concatenate( (PALETTE, [c]), axis=0 )
# Re-seed the random number generator so that it is not tainted
random.seed()
# ==================================================================
# SameAs Decorator
#
# Decorator that copies the documentation and arguemnts of another
# function (specified as input). Useful for making decorators (:P)
# Optional "mention_usage" updates documentation when True to disclose
# the name of the function being wrapped (and the reuse of signature).
#
# USAGE:
#
# @same_as(<func_to_copy>)
# def <function_to_decorate>(...):
# ...
#
# OR
#
# <function> = same_as(<func_to_copy>)(<function_to_decorate>)
#
def same_as(to_copy, mention_usage=False):
import inspect
# Create a function that takes one argument, a function to be
# decorated. This will be called by python when decorating.
def decorator_handler(func):
if hasattr(func, "__name__"): original_name = func.__name__
else: original_name = str(func)
# Set the documentation string for this new function
documentation = inspect.getdoc(to_copy)
if documentation == None:
documentation = inspect.getcomments(to_copy)
# Store the documentation and signature into the wrapped function
if hasattr(to_copy, "__name__"):
func.__name__ = to_copy.__name__
if mention_usage:
documentation = (
"\nThe function '%s' has been decorated with the signature "+
"of '%s'. (likely for aliasing / decoration)\n\n")%(
original_name, to_copy.__name__) + documentation
# Try copying the signature if possible
try: func.__signature__ = inspect.signature(to_copy)
except ValueError: pass
# Finalize by copying documentation
func.__doc__ = documentation
return func
# Return the decorator handler
return decorator_handler
# Class that serves as an interface to the standard "data & layout"
# containers that need to be managed in order to produce Plotly plots.
# This class uses the offline modes of plotly to produce local HTML
# files rather than the standard web-based ones. This class also
# attempts to strip web-related features (such as the Plotly logo)
# from the upper-right hand corner plot interface.
#
# All functionality is encapsulated in the "Plot.add" command, which
# allows for all standard plotly options to be controlled in the
# construction of data, along with the "Plot.plot" command, which
# allows for all standard plotly options that control layout.
#
# Additional methods that are effectively decorated versions of the
# "add" command include:
# add_histogram -- For quickly creating vertically oriented or
# horizontally oriented histograms.
# add_function -- For passing a function and automatically
# sampling it across a meshgrid and plotting.
# add_region -- For drawing convex regions in 2D by providing
# a boolean function that is True inside the
# region and False outside of the region.
# add_annotation -- For adding text descriptions with arrows over
# points of interest in an existing plot.
#
# The "plot" function is also capable of appending to existing HTML
# files by setting the keyword argument "append=True". This is nice
# for producing a single scrollable multi-page HTML report of plots.
#
# The "multiplot" function, provided in this module (not part of the
# 'Plot' class), allows for the produciton of single pages that
# contain multiple plots. See documentation of "multiplot" for more
# detials.
#
#
# Initialization controls for a Plot can be changed at any point by
# setting the named attribute of the Plot class instantiation. They are:
#
# AXIS CONTROL
# title -- The title of this plot.
# x_title -- The x-axis title for this plot.
# y_title -- The y-axis title for this plot.
# z_title -- The z-axis title for this plot.
#
# PLOT CONTROL
# mode -- The default plotly plot mode to be used.
# palette -- A numpy array (N rows, 3 columns) of ordered plot
# series colors.
#
# FONT CONTROL
# font_family -- The family of font used for axes.
# font_color -- The color of the font used for axes.
# font_size -- The size of the font used for axes.
class Plot:
def __init__(self, title="", x_title="x", y_title="y",
z_title="z", mode="markers", palette=PALETTE,
font_family=None, font_color=None, font_size=None):
self.title = title
self.x_title = x_title
self.y_title = y_title
self.z_title = z_title
self.x_min_max = [float('inf'), -float('inf')]
self.y_min_max = [float('inf'), -float('inf')]
self.z_min_max = [float('inf'), -float('inf')]
# Specific booleans for tracking internal state
self.is_3d = False
self.to_reverse = []
# Data for tracking default plot settings
self.color_num = -1
self.data = list()
self.annotations = list()
self.mode = mode
self.palette = palette
self.palette_size = len(palette)
# Font settings
self.font_family = font_family
self.font_color = font_color
self.font_size = font_size
# Return an appropriate face color of a simplex given the simplex,
# data z values, and either (color index and opaicty, or a list of
# colors associated with each data value.
def _simp_color(self, simp, z, color_ind=None, opacity=1.0, colors=None):
shift = max(z)
scale = shift - min(z)
has_none = type(None) in (type(v) for v in z[simp])
if (scale > 0) and (not has_none):
# If colors were provided, then average them to produce out color
if type(colors) != type(None):
# Return the color if there is only one.
if (type(colors) == str): return colors
# Get the color of each node in the simplex as a numpy array
colors = [colors[i] for i in simp]
# colors = [(colors if type(colors) == str else colors[i]) for i in simp]
colors = [c[c.index('(')+1:c.index(')')].split(',')
for c in colors]
colors = np.array([list(map(float,c)) for c in colors])
if colors.shape[1] != 4:
colors = np.concatenate((
colors,np.ones(shape=(colors.shape[0],1))),
axis=1)
# return the average color of points in the simplex
return 'rgba(%f,%f,%f,%f)'%tuple(np.sum(colors,axis=0) / len(simp))
else:
simp_avg = sum(z[simp]) / len(simp)
brightness = (1.0-BRIGHTNESS_RANGE/2) + ((simp_avg - shift) / scale) * BRIGHTNESS_RANGE
else:
brightness = 1.0
return self.color(color_ind, brightness, opacity)
# Prepare all annotations for the type of plot being presented.
def _clean_annotations(self, annotations):
if not self.is_3d:
for a in annotations:
a.pop('z', '')
else:
for a in annotations:
if type(a['z']) == type(None):
a['z'] = 0
a.pop("axref","")
a.pop("ayref","")
return annotations
# Prepares all the data sets to be plotted in whatever dimension
# is highest (2 or 3). Creates 3D meshes for all surfaces. Should
# be stable if called multiple times, but this code is still in
# development stage.
def _clean_data(self, data):
from scipy.spatial import Delaunay
from scipy.spatial.qhull import QhullError
# Remove the extra color attribute stored for easy access
# any_heatmaps = any(d.get("type","") == "heatmap" for d in data)
for d in data:
d.pop("color","")
if d["type"] == "heatmap":
d.pop("marker","")
d.pop("mode","")
if d["type"] == "box":
d.pop("text","")
# d.pop("line","")
# d.pop("fill","")
# d.pop("fillcolor","")
# if any_heatmaps:
# pass
# Remove all references to 3D layout if this is a 2D plot
if not self.is_3d:
# 2D PLOT SETUP
for d in data:
d.pop('z','')
# WARNING: I COMMENTED THESE, NOT SURE WHY THEY'RE THERE
# d.pop('hoverinfo','')
# d.pop('text','')
# Special case for plotting histograms
if d['type'] == 'histogram':
if type(d.get('y','')) == type(None):
d.pop('y','')
if type(d.get('x','')) == type(None):
d.pop('x','')
d.pop('line','')
d.pop('mode','')
d.pop('fill','')
d['opacity'] = d['marker'].pop('opacity','')
d['marker'].pop('symbol','')
d['marker'].pop('size','')
d['marker']['color'] = d.pop('fillcolor','')
if d['type'] == 'box':
d['line'].pop('dash','')
d.pop('mode','')
d.pop('fill','')
d.pop('layout','')
else:
# 3D PLOT SETUP
for ind,d in enumerate(data):
# Add z values to all scatters that may have been added
if d['type'] == 'scatter':
d['z'] = np.zeros(len(d['x']))
d['type'] = 'scatter3d'
if d['marker']['size'] == None:
d['marker']['size'] = 5
# Convert fill and / or lines into surfaces
conv_2d = (not self.is_3d) and ('lines' in d['mode'])
if (d.get('fill','') == 'toself') or conv_2d:
print("WARNING: Converting 2D to 3D automatically.")
d['type'] = 'surface'
# Get the opacity of the surface
if d.get('fill','') != None:
d['opacity'] = float(d['fillcolor'].split(',')[-1].strip(')'))
else:
d['opacity'] = float(d['line']['color'].split(',')[-1].strip(')'))
# If user wants a surface, construct one!
# (plotly default surfaces are not very cooperative)
if ('surface' in d['type']):
points_2D = np.vstack([d['x'], d['y']]).T
try:
mesh = Delaunay(points_2D)
simps = mesh.simplices
# Compute the diameter of all simplices.
simp_diameters = {}
for i in range(len(simps)):
simp_pts = mesh.points[simps[i]]
center = simp_pts.mean(axis=0)
diameter = np.linalg.norm(center-simp_pts, axis=1).max()
simp_diameters[i] = diameter
diameters = list(simp_diameters.values())
hull_indices = set(np.unique(mesh.convex_hull))
# Filter out simplices with irregularly large diameter
# that are on the convex hull of the data.
diameter_50, diameter_95 = np.percentile(diameters, [50,95])
max_diameter = diameter_50 + 2*(diameter_95-diameter_50)
for i in range(len(simps)):
if ((simp_diameters[i] > max_diameter) and
(len(set(simps[i]) & hull_indices) > 0)):
simp_diameters.pop(i)
simps = simps[sorted(simp_diameters)]
# Add the plotly expected values.
d['type'] = 'mesh3d'
d['i'] = simps[:,0]
d['j'] = simps[:,1]
d['k'] = simps[:,2]
# Generate face colors with average simplex z-coordinate
d['facecolor'] = list(map(
lambda simp: self._simp_color(
simp, d['z'], ind,
d['marker']['opacity'],
d['marker']['color']),
simps
))
if 'opacity' not in d:
d['opacity'] = d['marker']['opacity']
d.pop('marker','')
d.pop('mode','')
d.pop('text','')
except QhullError:
d['type'] = 'scatter3d'
if 'mode' not in d:
d['mode'] = 'lines'
# Pop out the unnecessary attributes for 3D plots
d.pop('fill','')
d.pop('fillcolor','')
if 'line' not in d.get('mode',''):
d.pop('line','')
# Manage plotly reverse order bug (only happens with "tonext[xy]")
def _reorder_data(self, data):
start = end = None
# Cycle through the elements of data
for i,tr in enumerate(self.to_reverse):
if (tr and start==None):
start = i
if (not tr and start!=None):
end = i+1
# Reverse that group of plot series
data[start:end] = data[start:end][::-1]
start = end = None
# Reverse the final group when self.to_reverse[-1] == True
if (start!=None):
end = len(data)
data[start:end] = data[start:end][::-1]
# self.to_reverse = [False] * len(data)
# Fix the fills that should be left alone
for d in data:
if ("toprev" in str(d.get("fill",""))):
d["fill"] = d["fill"].replace("toprev","tonext")
# ===================================
# User accessible functions
# ===================================
# Interface to the automatic palette-based color scheme for this
# plot. This method produces an rgb string compatible with
# standard plotly "rgba(%i,%i,%i,%f)"%(<red>,<green>,<blue>,<alpha>).
#
# This method takes the following arguments:
# Arg Name (Default) -- Description
#
# number (None) -- Index in the palette of the desired color.
# brightness (1.0) -- Value ranging from 0.0 to 1.0 that can
# be used to produces shades of the same color.
# alpha (1.0) -- Opacity of the color produced. Note, 0.0
# for this argument will cause the color
# to be invisible.
# color (None) -- String ".*([0-9]+,[0-9]+,[0-9][,0-9\.]*).*",
# list, tuple, or numpy array meant to be
# converted into the standard rgba string.
def color(self, number=None, brightness=1.0, alpha=None, color=None):
# If the user only passed a color, swap for convenience.
if (type(number) == tuple):
number, color = None, number
# Otherwise assume a number was sent.
if type(color) == type(None):
if (number == None): number = self.color_num
if (number < len(self.palette)):
# If we have fewer entries than the palette size
c = self.palette[number]
else:
# Otherwise we have to create a new palette entry
c = np.array([random.choice(self.palette[:self.palette_size,0]),
random.choice(self.palette[:self.palette_size,1]),
random.choice(self.palette[:self.palette_size,2])])
# Add this new random color to the palette
self.palette = np.concatenate( (self.palette, [c]), axis=0 )
elif type(color) == str:
# Get the color as a list of numbers
c = color[color.index('(')+1:color.index(')')].split(',')
# Make sure the color only has [red, green, blue, alpha]
c = np.array(list(map(float,c)))
if (len(c) > 3) and (type(alpha) == type(None)):
alpha = c[-1]
c = c[:3]
elif (type(color) == tuple or
type(color) == list or
type(color) == np.ndarray):
c = np.array(color[:3])
if (len(color) > 3) and (type(alpha) == type(None)):
alpha=color[-1]
else:
raise(Exception("ERROR: Color must either be a string, tuple, list, or numpy array."))
# Define a default alpha if necessary
if type(alpha) == type(None):
alpha = 1.0
# Apply the brightness to the color
c = c*brightness
c = np.where(c > 255, 255, c)
c = np.where(c < 0, 0, c)
# Return the color as a plotly color string
return 'rgba(%i,%i,%i,%f)'%(tuple(c)+(alpha,))
# Decorated "add" function that automatically attempts to
# find the edge of a convex 2D region given a function that is
# True inside and False outside. Uses a meshgrid of "plot_points"
# points in order to approximate the boundary of the region.
#
# name -- The string name of the series being added
# func -- A function that, given a single (x,y) point
# returns True or False.
# min_max_x -- A length-2 iterable for the x-range over which
# to apply the meshgrid.
# min_max_y -- A length-2 iterable for the y-range over which
# to apply the meshgrid.
# plot_points -- The number of plot points in the
# meshgrid. Higher numbers will yield more precise
# boundaries for the region.
# ... <standard "add" arguments with adjusted defaults> ...
def add_region(self, name, func, min_max_x=None, min_max_y=None,
plot_points=PLOT_POINTS, mode="lines", opacity=0.1,
fill="toself", line_width=0, nonconvex=True, **kwargs):
from scipy.spatial import ConvexHull
if self.is_3d: raise(Exception("ERROR: Regions only work for 2D plots."))
if type(min_max_x) == type(None):
min_max_x = self.x_min_max.copy()
if type(min_max_y) == type(None):
min_max_y = self.y_min_max.copy()
if max(map(abs,min_max_x+min_max_y)) == float('inf'):
raise(Exception("ERROR: Invalid x or y range."))
# Round up the number of plot points per axis
plot_points = int(plot_points**(0.5) + 0.5)
# Calculate the mesh grid of x and y values
x_vals = (np.linspace(*min_max_x, num=plot_points),)
y_vals = (np.linspace(*min_max_y, num=plot_points),)
x,y = np.meshgrid(x_vals, y_vals)
test_pts = np.vstack((x.flatten(), y.flatten())).T
in_region = np.array([func(pt) for pt in test_pts])
region_pts = test_pts[in_region]
if nonconvex:
opacity *= 3
self.add(name, region_pts[:,0], region_pts[:,1], mode='markers', symbol='square',
opacity=opacity, marker_line_width=0, marker_size=10,**kwargs)
else:
# Try reducing to the set of convex hull points for the region
# and plotting that, if it fails simply print an error message.
hull_pts = region_pts[ConvexHull(region_pts).vertices]
self.add(name, hull_pts[:,0], hull_pts[:,1], mode=mode,
opacity=opacity, fill=fill,
line_width=line_width, **kwargs)
# Decorated "add" function that automatically generates the
# response values for a given "func" over a meshgrid using
# "plot_points" points (works for 2D or 3D plotting depending on
# how many "min_max..." ranges are provided).
#
# name -- The string name of the series being added
# func -- A function that, given a single (x[,y]) point
# returns a numeric type object.
# min_max_x -- A length-2 iterable for the x-range over which
# to apply the meshgrid.
# min_max_y -- A length-2 iterable for the y-range over which
# to apply the meshgrid. (only provided for 3D)
# grid_lines -- Whether or not to add lines whose intersections
# show where plot points were placed (only works
# for 3D plotting).
# plot_points -- The number of plot points in the meshgrid.
# vectorized -- True if the provided function can be provided a
# matrix of points as row-vectors for faster execution.
#
# ... <standard "add" arguments with adjusted defaults> ...
def add_function(self, name, func=None, min_max_x=None, min_max_y=[],
x_vals=None, y_vals=None,
grid_lines=True, plot_points=PLOT_POINTS,
vectorized=False, mode=None, plot_type=None,
use_gradient=None, **kwargs):
if (len(min_max_y) > 0):
self.is_3d = True
elif ((x_vals is not None) and (x_vals.shape[1] == 2)):
self.is_3d = True
# If we have two control axes, square root the plot points
if self.is_3d:
plot_points = int(plot_points**(0.5) + 0.5)
# If no y was provided, set it to default value
if len(min_max_y) == 0: min_max_y = [0.0,0.0]
if mode == None: plot_type = 'surface'
# Set the gradient for 3d plots.
if (use_gradient is None) and ("color" not in kwargs):
use_gradient = True
else:
if mode == None: mode = 'lines'
# If x_vals are not provided, then generate some.
if (x_vals is None):
assert (min_max_x is not None), "Expected either 'x_vals' or 'min_max_x' to be provided."
# Convert the minimum and maximum values into floats.
min_max_x = (float(min_max_x[0]), float(min_max_x[1]))
# Generate the input points
x_vals = (np.linspace(*min_max_x, num=plot_points),)
if self.is_3d:
x_vals += (np.linspace(*min_max_y, num=plot_points),)
x_vals = tuple(x.flatten() for x in np.meshgrid(*x_vals))
x_on_grid = True
else:
# If x_vals are provided, use those instead.
x_vals = np.asarray(x_vals)
if (len(x_vals.shape) == 1):
x_vals = x_vals.reshape((-1,1))
assert (x_vals.shape[1] in {1,2}), f"Expected 1 or 2 dimensions in 'x_vals', received {x_vals.shape[1]}."
x_vals = x_vals.T
x_on_grid = False
if ((y_vals is None) and (func is not None)):
assert (func is not None), "Expected either 'func' or 'y_vals' to be provided."
# Get the response values
if vectorized:
# Try vectorizing the function evaluation
response = list(func(np.vstack(x_vals).T))
else:
# Otherwise evaluate the function one point at a time
response = [func(x[0] if len(x) == 1 else x) for x in np.vstack(x_vals).T]
try:
# Make sure all "None" values are in brackets
while None in response: response[response.index(None)] = [None]
except ValueError:
raise(Exception("The provided function returned a non-numeric value."))
response = np.array(response, dtype=float).flatten()
else:
y_vals = np.asarray(y_vals)
assert ((len(y_vals.shape) == 1) or (y_vals.shape[1] == 1)), "Expected 1 dimension in 'y_vals', received {y_vals.shape[1]}."
y_vals = y_vals.flatten()
response = y_vals
if "hoverinfo" not in kwargs: kwargs["hoverinfo"] = "name+x+y"+("+z" if self.is_3d else "")
# Call the standard plot function
self.add(name, *x_vals, response, mode=mode, plot_type=plot_type,
use_gradient=use_gradient, **kwargs)
# If this is a 3D surface plot and grid_lines=True, add grid lines
if (self.is_3d and plot_type == 'surface') and grid_lines:
opacity = kwargs.get("opacity",1.0)
line_width = kwargs.get("line_width",1.0)
line_color = kwargs.get("line_color",'rgb(0,0,0)')
if (x_on_grid):
for row in range(plot_points):
x = x_vals[0][row*plot_points:(row+1)*plot_points]
y = x_vals[1][row*plot_points:(row+1)*plot_points]
z = response[row*plot_points:(row+1)*plot_points]
self.add("", x,y,z, show_in_legend=False,
group=name+" (lines)", mode="lines",
line_width=line_width, opacity=opacity,
color=line_color, hoverinfo="none")
indices = np.arange(plot_points)*plot_points + row
x = x_vals[0][indices]
y = x_vals[1][indices]
z = response[indices]
self.add("", x,y,z, show_in_legend=False,
group=name+" (lines)", mode="lines",
line_width=line_width, opacity=opacity,
color=line_color, hoverinfo="none")
else:
# Create a triangulation of the points and add lines
# around the simplices? That should probably not happen.
pass
@same_as(add_function, mention_usage=True)
def add_func(self, *args, **kwargs): return self.add_function(*args, **kwargs)
# Decorated "add" function that automatically sets the options
# necessary for plotting an N-bin PDF histogram of a given set of
# values. By default the bars are separated along "bar_spacing"
# axis, and the area of all bars together adds to 1.
#
# name -- The string name of the series being added
# values -- A list of ints or floats.
# bar_spacing -- "x" if the x-axis should be bins and y-axis
# probabilities, "y" for transposing the setup.
# num_bins -- The number of evenly spaced bins to use when
# generating the histogram.
# start -- The (inclusive) lower bound for the bins.
# end -- The (exclusive) upper bound for the bins.
# padding -- The amount of spacing on the min and max sides
# of the histogram that is produced.
# histnorm -- Standard plotly "histnorm" argument, can be
# "probability" or "count" most commonly.
# barmode -- Standard plotly "barmode" argument. When set to
# "", plotly default will be used where
# multi-series histograms will be non-overlapping.
# When set "overlay", histogram series can overlap.
# opacity -- See "add" function.
def add_histogram(self, name, values, start_end=(None,None),
bar_spacing="x", num_bins=100, padding=0.03,
opacity=0.7, histnorm='count', marker_line_width=1,
barmode='overlay', **kwargs):
# Check for errors in usage.
if bar_spacing not in ("x", "y"):
raise(Exception("ERROR: Invalid 'bar_spacing', only 'x' or 'y' are acceptable."))
if num_bins <= 0:
raise(Exception("ERROR: Invalid 'num_bins', must be a positive integer."))
if len(values) == 0:
raise(Exception("ERROR: Empty list passed in for 'values'."))
start, end = start_end
values_name = bar_spacing + "_values"
autobin = "autobin" + bar_spacing
bins = bar_spacing + "bins"
self.histogram_barmode = barmode
# Calculate the range of the histogram
hist_start_val = min(values)
hist_end_val = max(values)
if type(start) != type(None): hist_start_val = start
if type(end) != type(None): hist_end_val = end
# Update the range, start, and end values (to have padding)
hist_value_range = hist_end_val - hist_start_val
hist_start_val -= hist_value_range * padding
hist_end_val += hist_value_range * padding
# Provide necessary keyword arguments (that the user has not already)
if (values_name not in kwargs):
kwargs[values_name] = values
kwargs['histnorm'] = histnorm
if (autobin not in kwargs):
kwargs[autobin] = False
if (bins not in kwargs):
bin_settings = dict( start=hist_start_val,
end=hist_end_val,
size=(hist_value_range - hist_value_range*padding)/num_bins )
kwargs[bins] = bin_settings
# Store the correct extrema to be used for plotting
min_max = getattr(self, bar_spacing+"_min_max").copy()
min_max[0] = min(hist_start_val, min_max[0])
min_max[1] = max(hist_end_val, min_max[1])
# Call the 'add' function with updated arguments
self.add(name, plot_type='histogram', opacity=opacity, **kwargs)
# Make sure min_max were not wrongly changed, use the extrema
# of the desired bins as the range, not the extrema of values
getattr(self, bar_spacing+"_min_max")[0] = min_max[0]
getattr(self, bar_spacing+"_min_max")[1] = min_max[1]
# Decorated "add" function that automatically sets the options
# necessary for plotting a series of box plots of a given set of
# values.
#
# name -- The string name of the series being added
# box_values -- The list of lists of values for each box.
# box_locations -- The x (or y) location of each box.
# orientation -- 'v' -> vertical boxes
# 'h' -> horizontal boxes
# box_mean -- 'sd' -> overlays a standard deviation diamond
# -- True -> adds a dashed line for the mean to the box
# -- False -> only shows the standard quartiles and median
# show_labels -- True -> Show the labels for the box locations
# -- False -> Hide the labels for the box locations
#
def add_box(self, name, box_values, box_locations=None, orientation="v",
box_mean=True, show_labels=True, **kwargs):
# By default, the x values are just the name of the box
if box_locations == None: box_locations = [name] * len(box_values)
# Check for type errors (because this function requires lists)
if (type(box_locations) != list): box_locations = list(box_locations)
if (type(box_values) != list): box_values = list(box_values)
# Convert x and y to double array format if not provided that way
if type(box_values[0]) != list:
box_values = [[v] for v in box_values]
if type(box_locations[0]) != list:
box_locations = [[v] for v in box_locations]
# Handle the creation of appropriate x and y arrays for box
# plots depending on the orientation that the user wants.
box_locations = [l*len(v) for (l,v) in zip(box_locations,box_values)]
if (orientation == "v"):
# Flatten the lists
x_values = sum(box_locations, [])
y_values = sum(box_values, [])
elif (orientation == "h"):
# Flatten the lists
x_values = sum(box_values, [])
y_values = sum(box_locations, [])
else:
raise(Exception("ERROR: Only 'v' and 'h' are permissable box orientations."))
self.add(name, x_values, y_values, plot_type="box",
mode="lines", orientation=orientation, **kwargs)
# Primary function for simplifying the interface to plotly
# plotting. This single generic function can be used as a
# full-fledged interface for generating standard plotly "data"
# dictionary object. It can be used for both 2D and 3D plotting,
# and allows for control of all aspects of plot styling.
#
# STANDARD ARGUMENTS: The combination of these that is provided
# determines whether a 2D or 3D plot is produced. "x_values" are
# optional because histograms may only have y-values given. For
# most standard usage, (x,y) will be given for 2D, (x,y,z) for 3D.
#
# name -- Name of the series to be plotted
# x_values -- The x-values associated with the series
# y_values -- The y-values associated with the series
# z_values -- The z-values associated with the series
#
# HIGH-LEVEL STYLING:
# mode -- The plotly series mode, "lines", "markers",
# "text", or combinations with a "+" between.
# plot_type -- The plotly plot_type, "scatter[3d]" for plots
# of lines and dots, "surface" for 3D surfaces,
# "histogram" for producing histograms.
# group -- The legend-series group name. This is used
# for the simultaneous hide/show of multiple
# series. This will cause increased legend spacing.
# show_in_legend -- True or False for if this series should show
# in the legend. Currently plotly legends do
# *not* support 3D surfaces in legends.
# shade -- True or False if the given data series should
# be shaded with different brightnesses based
# on magnitude.
# use_gradient -- True or False if a gradient coloring should
# be applied to the given data series.
# palette -- The palette to use when creating a gradient
# of colors for the "use_gradient" option.
# text -- A list of the text strings that should be
# shown for each data point when a user hovers
# with their mouse over that data point.
#
# LOW-LEVEL STYLING:
# color -- The series color as a tuple/list/array of
# (<red>,<green>,<blue>[,<alpha>])
# rgb in [0,255], alpha in [0,1]
# opacity -- Transparency constant for series color, 0 is
# completely transparent, 1 is completely opaque.
# this value is overwritten if "color" has 4 numbers.
# line_color -- The color of the line for this series
# line_width -- The width of the line for this series
# fill -- Almost exactly the plotly "fill" argument,
# options include "toprevy" "tozeroy" "toself"
# and the same for x. If "tonext[xy]" is used,
# the legened will be reversed. (plotly bug)
# fill_color -- The color to use for the fill if active.
# fill_opacity -- The opacity of the fill color.
# symbol -- The marker symbol, standard plotly. "circle",
# "square", and a lot more on their website.
# dash -- Standard plotly "dash" option. "solid", "dot",
# "dash", or "1px,2px,5px[,[0-9]*px]*" list of lengths
# marker_size -- The size (in pixels) of markers
# marker_colors -- The color of markers
# marker_line_width -- The width of the bounding line of markers
# marker_line_color -- The color of the bounding line of markers
# hoverinfo -- The information displayed when the user's
# mouse hovers over the plot. Options include
# "x" "y" "z" "text" "name", combined with "+"
#
# ... <any additional plotly data-dictionary args> ...
def add(self, name, x_values=None, y_values=None, z_values=None,
mode=None, plot_type=None, group=None,
show_in_legend=True, shade=False, use_gradient=None,
palette=DEFAULT_GRADIENT, text=None, color=None,
opacity=1.0, line_color=None, line_width=None, fill=None,
fill_color=None, fill_opacity=0.6, symbol='circle',
dash=None, marker_size=None, marker_colors=None,
marker_line_width=0, marker_line_color='rgba(50,50,50,0.8)',
hoverinfo='name+x+y+z', frame=None, **kwargs):
# Convert the x, y (and z) values into numpy arrays and
# store 'values' for creating marker colors based on magnitude
if type(x_values) != type(None):
# WARNING: Plotly allows for string "x" values for some plots.
try: x_values = np.asarray(x_values, dtype=float)
except ValueError: pass
# Get the "values" as the 'x'.
values = x_values
no_none = [v for v in x_values if isinstance(v,numbers.Number)]
if len(no_none) != 0:
self.x_min_max = [min(min(no_none), self.x_min_max[0]),
max(max(no_none), self.x_min_max[1])]
if type(y_values) != type(None):
y_values = np.asarray(y_values, dtype=float)
values = y_values
no_none = [v for v in y_values if isinstance(v,numbers.Number)]
if len(no_none) != 0:
self.y_min_max = [min(min(no_none), self.y_min_max[0]),
max(max(no_none), self.y_min_max[1])]
if type(z_values) != type(None):
self.is_3d = True
z_values = np.asarray(z_values, dtype=float)
values = z_values
no_none = [v for v in z_values if isinstance(v,numbers.Number)]
if len(no_none) != 0:
self.z_min_max = [min(min(no_none), self.z_min_max[0]),
max(max(no_none), self.z_min_max[1])]
# Make a nice pretty gradient of color
if use_gradient and (len(values) > 1):
marker_colors = color_data(values, palette)
# Define z-values if none were given and we need them, and plot type
if self.is_3d:
if plot_type == None:
plot_type = 'scatter3d'
if type(z_values) == type(None):
z_values = np.zeros(len(x_values))
# Define text for all the data points
if (hoverinfo != None) and ("text" in hoverinfo) and (text == None):
# hoverinfo = None
# text = None
# WARNING: Sometimes this is causing problems where
# the hoverinfo labels do not update on scroll, it
# looks like another bug in the python plotly.
text = ["%s: %s<br>%s: %s<br>%s: %s"%(
self.x_title,x, self.y_title,y, self.z_title,z)
for (x,y,z) in zip(x_values,y_values,z_values)]
else:
if plot_type == None:
plot_type = 'scatter'
# Process mode
if type(mode) == type(None):
mode = self.mode
# Set the color if none was provided
if type(color) == type(None):
if (frame != None) and any((name == d["name"]) for d in self.data):
for d in self.data[::-1]:
if d["name"] == name:
color = d["color"]
else:
self.color_num += 1
color = self.color(self.color_num, alpha=opacity)
else:
# WARNING (removed): Cancel shading if a color was provided.
# shade = False
# Automatically convert integer color numbers into colors.
if (type(color) == int): color = self.color(color, alpha=opacity)
# Automatically convert tuple colors to color strings.
if (type(color) == tuple) and (len(color) in {3,4}):
color = ("rgba" if len(color) == 4 else "rgb") + str(color)
if type(line_color) == type(None):
line_color = color
if type(fill_color) == type(None):
fill_color = self.color(color=color, alpha=fill_opacity)
else:
fill_color = self.color(color=fill_color)
if not marker_colors:
if shade:
marker_colors = []
no_none = [v for v in values if v != None]
if len(no_none) > 1:
shift = min(no_none)
scale = max(no_none) - shift
if scale == 0: scale = 1.0
for v in values:
if not isinstance(v,numbers.Number):
raise(Exception((
"ERROR: '%s' not permitted. Only "+
"numbers are allowed as values.")%(v)))
brightness = ((1.0-BRIGHTNESS_RANGE/2) +
((v - shift) / scale) *
BRIGHTNESS_RANGE)
marker_colors.append( self.color(color=color,
brightness=brightness,
alpha=opacity) )
else:
marker_colors = color
else:
marker_colors = color
# Special plotly failure mode, need to reverse data for
# 'tonext' to actually mean 'next' instead of 'previous'. This
# bug has been reported, but no one in the plotly community is
# addressing it (or even noticing it) as a problem.
self.to_reverse.append((type(fill) == str) and ("tonext" in fill))
# print("Using color:", color)
# Now add the standard plotly "data" object to local storage
self.data.append(dict(
type = plot_type,
name = name,
x = x_values,
y = y_values,
z = z_values,
hoverinfo = hoverinfo,
text = text,
color = color,
# Set up the marker style
marker = dict(
# Generate colors based on point magnitude
# color = color if ("lines" in mode) else marker_colors,
color = marker_colors,
size = marker_size,
opacity = opacity,
symbol = symbol,
line = dict(
width = marker_line_width,
color = marker_line_color
)),
line = dict(
width = line_width,
color = line_color,
dash = dash
),
mode = mode,
fill = fill,
fillcolor = fill_color,
legendgroup = group,
showlegend = show_in_legend
))
# Update the newly created dictionary with any custom user settings
self.data[-1].update(kwargs)
# If the user is preparing for an animation, the store the
# frame number associated with this data dictionary.
if type(frame) != type(None):
self.data[-1]["frame"] = str(frame)
# Add an annotation to the plot. These will be text boxes
# stationed in the absolute foreground of the plot, disregarding
# occlusion in 3D plots.
#
# STANDARD ARGUMENTS
# text -- The text to display in the annotation.
# x -- The x coordinate of the arrow for the annotation
# y -- The y coordinate of the arrow for the annotation
# z -- The z coordinate (if applicable) of the arrow for the annotation
#
# ANNOTATION CONTROL
# ax -- The x screen pixels offset for the anntation box (+ is right)
# ay -- The y screen pixels offset for the annotaiton box (+ is down)
# opacity -- The transparency of the entire annotation
# textangle -- The angle of the annotation (and bounding box)
# align -- The alignment of text within the annotation box
# xanchor -- The box-x anchor point for the extending arrow
# yanchor -- The box-y anchor point for the extending arrow
#
# FONT CONTROL
# font_family -- The family of font used in the annotation
# font_color -- The color of the font used in the annotation
# font_size -- The size of the font used in the annotation
#
# BORDER CONTROL
# border_color -- The color of the border of the annotation box
# border_width -- The thickness of the border of the annotation box
# border_pad -- The padding between the annotation text and box
# bg_color -- The background color of the annotation box
#
# ARROW CONTROL
# show_arrow -- Whether or not to show an arrow at all
# arrow_color -- The color of the arrow
# arrow_size -- The size of the arrow head
# arrow_width -- The width of the arrow line
# arrow_head -- The type of arrow head. 0 -> None, 1-5 -> Arrows,
# 6 -> Dot, 7 -> Box, >7 -> None
#
# ... <any additional plotly annotation-dictionary args> ...
def add_annotation(self, text, x, y, z=None, ax=None, ay=None,
axref=None, ayref=None, opacity=0.8,
text_angle=0, align="left", x_anchor="center",
y_anchor="bottom", font_family="Arial",
font_color="#0a0a0a", font_size=12,
border_color="#1a1a1a", border_width=0,
border_pad=4, bg_color="#f0f0f0",
show_arrow=True, arrow_color="#666",
arrow_size=1, arrow_width=1, arrow_head=7,
**kwargs):
# # Assign default ax and ay references based on provided info
# if (ax != None) and (axref == None) and (z == None): axref = "x"
# if (ay != None) and (ayref == None) and (z == None): ayref = "y"
# Add computed values for the annotation x and y
if show_arrow:
if ax == None: ax = 10
if ay == None: ay = -20
else:
if ax == None: ax = 0
if ay == None: ay = 0
# Add the annotation
self.annotations.append(dict(
text=text,
# Target location
x = x,
y = y,
z = z,
ax = ax,
ay = ay,
axref = axref,
ayref = ayref,
# Annotation text control
opacity = opacity,
textangle = text_angle,
align = align,
# Anchor and shift
xanchor = x_anchor,
yanchor = y_anchor,
xshift = 0,
yshift = 0,
# Font
font = dict(
family = font_family,
color = font_color,
size = font_size
),
# Border control
bordercolor = border_color,
borderwidth = border_width,
borderpad = border_pad,
bgcolor = bg_color,
# Arrow control
showarrow = show_arrow,
arrowcolor = arrow_color,
arrowsize = arrow_size,
arrowwidth = arrow_width,
arrowhead = arrow_head,
))
self.annotations[-1].update(kwargs)
# Second part to the simplified plotly interface. This creates the
# layout-dictionary object and (optionally) produces the HTML and
# opens a browser to view the plot.
#
# COMMON ARGUMENTS:
# title -- Title to display for this plot. (can include
# HTML line break <br> and bold <b>text</b>)
# x_range -- The range of x-values to default to displaying,
# automatically determined by data if possible
# y_range -- The range of y-values to default to displaying,
# automatically determined by data if possible
# z_range -- The range of z-values to default to displaying,
# automatically determined by data if possible
# fixed -- False if plotly should automatically rescale the
# plot when series are hidden/shown, True if
# plotly should not rescale on hide/show.
# show_legend -- True if the legend should be included.
#
# LAYOUT CONTROL:
# layout -- Update to be performed to the plotly
# layout-dictionary that is generated.
# aspect_mode -- For 3D plotting, standard plotly.
# legend -- Legend settings, like the font and location.
# scene_settings -- Standard plotly, for updating the "scene"
# dictionary for 3D plotting.
# axis_settings -- Controls for each of the axes. Include
# things like showgrid, zeroline, showline,
# showticklabels (all boolean) or ticks="<str>",
# type = "log", "date", "category".
# For customizing just one, use
# "x_axis_settings", "y_axis_settings", etc.
# hovermode -- Setting for how to display hover tips, default
# for 2D data is closest x. Use "closest" otherwise.
# camera_position -- A dictionary of dictionaries of x,y,z
# values, "up" is relative up vector, "center"
# is the point about which a 3D plot rotates,
# and "eye" is the camera coordinate.
#
# OUTPUT CONTROL:
# html -- True if "create_html" should be called.
# file_name -- See "create_html".
# show -- See "create_html".
# append -- See "create_html".
# height -- The height of the plot in pixels
# width -- The width of the plot in pixels
#
# ANIMATION CONTROLS:
# loop_duration -- Length in seconds of full play cycle.
# bounce -- True if "play" should go start -> end -> start
# transition -- Type of transition for data options include:
# "linear", "cubic", "quad", "exp", "bounce"
# "elastic", "sin", (all have "-in-out" too)
# data_easing -- True if data should ease, False if not.
# redraw -- True if the plot and legend should be
# redrawn every time the frame changes.
# This will cause the slider to lock (plotly bug).
# slider_transition -- Type of transition for slider, same
# options as "transition".
# initial_frame -- The initial frame label to display.
# frame_label -- The prefix before the frame label.
# show_frame_label -- Whether or not to show a frame label.
# show_slider_labels -- Whether or not to show labels under
# slider positions (disable for long labels)
# show_play_pause -- Whether or not to show the play and pause buttons.
# autoplay -- Whether or not to autoplay on-load in browser.
# loop -- Whether or not the animtation should
# loop when playing, otherwise 1 play -> 1 loop.
# loop_pause -- The pause in seconds between animation loops.
#
# See more details at: https://github.com/plotly/plotly.js/blob/master/src/plots/animation_attributes.js
#
# ... <any additional plotly.offline.plot keyword arguments> ...
def plot(self, title=None, x_range=None, y_range=None,
z_range=None, fixed=True, show_legend=True, layout={},
aspect_mode='cube', legend={}, scene_settings={},
axis_settings={}, x_axis_settings={}, y_axis_settings={},
z_axis_settings={}, hovermode="closest",
camera_position=DEFAULT_CAMERA_POSITION, html=True,
file_name=None, show=True, append=False, height=None,
width=None, loop_duration=5, bounce=False,
transition="linear", data_easing=False, redraw=False,
slider_transition="linear", initial_frame=None,
frame_label="Frame: ", show_frame_label=True,
show_slider_labels=True, show_play_pause=True,
autoplay=False, loop=False, loop_pause=0,
**kwargs):
# Update title, and all plot axis ranges
if title == None:
title = self.title
if (fixed and x_range == None and
max(map(abs,self.x_min_max)) != float('inf')):
x_width = self.x_min_max[1] - self.x_min_max[0]
x_range = [self.x_min_max[0] - 0.05*x_width,
self.x_min_max[1] + 0.05*x_width]
if ((x_axis_settings.get("type","") == "log") or
(axis_settings.get("type","") == "log") and
(x_range[0] > 0)):
x_range = [np.log10(x_range[0]), np.log10(x_range[1])]
if (fixed and y_range == None and
max(map(abs,self.y_min_max)) != float('inf')):
y_width = self.y_min_max[1] - self.y_min_max[0]
y_range = [self.y_min_max[0] - 0.05*y_width,
self.y_min_max[1] + 0.05*y_width]
if ((y_axis_settings.get("type","") == "log") or
(axis_settings.get("type","") == "log") and
(y_range[0] > 0)):
y_range = [np.log10(y_range[0]), np.log10(y_range[1])]
if (fixed and z_range == None and
max(map(abs,self.z_min_max)) != float('inf')):
z_width = self.z_min_max[1] - self.z_min_max[0]
z_range = [self.z_min_max[0] - 0.05*z_width,
self.z_min_max[1] + 0.05*z_width]
if ((z_axis_settings.get("type","") == "log") or
(axis_settings.get("type","") == "log") and
(z_range[0] > 0)):
z_range = [np.log10(z_range[0]), np.log10(z_range[1])]
# Set up a legend font
legend_font = dict(
family = self.font_family,
color = self.font_color,
size = (max(self.font_size - 4,2) if (
type(self.font_size) != type(None)) else None),
)
if ("font" in legend): legend_font.update(legend["font"])
legend["font"] = legend_font
# Set up a title font
title_font = dict(
family = self.font_family,
color = self.font_color,
size = (self.font_size + 2) if (type(self.font_size) == int) else self.font_size,
)
# Generate the layout (titles and legend)
plot_layout = dict(
title = title,
titlefont = title_font,
showlegend = show_legend,
legend = legend,
margin = dict(t=PLOT_MARGIN,b=PLOT_MARGIN,l=10+PLOT_MARGIN,r=PLOT_MARGIN),
)
# Set width, height, and compensate for plotly spacing aroung SVG
if type(width) != type(None):
# width += 139
plot_layout.update(dict(width=width))
if type(height) != type(None):
# height += 159
plot_layout.update(dict(height=height))
# Transfer the "hovermode" property.
if type(hovermode) != type(None):
plot_layout.update(dict(hovermode=hovermode))
# Set the barmode for histograms if necessary
if (hasattr(self, 'histogram_barmode') and
len(self.histogram_barmode) > 0):
plot_layout['barmode'] = self.histogram_barmode
# Clean all annotations so they are ready for plotting
annotations = [a.copy() for a in self.annotations]
self._clean_annotations(annotations)
# Setup the title and tick fonts dictionary
fonts_dict = dict(
titlefont = dict(
family = self.font_family,
color = self.font_color,
size = self.font_size,
),
tickfont = dict(
family = self.font_family,
color = self.font_color,
size = (max(self.font_size - 4,2) if (
type(self.font_size) != type(None)) else None),
)
)
# Update axis_settings with things from fonts that it doesn't have.
fonts_dict.update(axis_settings)
axis_settings = fonts_dict
# Update all axes with the global axis settings
x_axis_settings.update(axis_settings)
y_axis_settings.update(axis_settings)
z_axis_settings.update(axis_settings)
# Setup for the axes of the plot
scene = dict(
xaxis = dict(title = self.x_title, range=x_range, **x_axis_settings),
yaxis = dict(title = self.y_title, range=y_range, **y_axis_settings),
zaxis = dict(title = self.z_title, range=z_range, **z_axis_settings),
)
# Setup the plot layout (different for 2D and 3D plots)
if not self.is_3d:
plot_layout.update(scene)
plot_layout.pop('zaxis')
plot_layout.update(dict(annotations=annotations))
else:
scene['aspectmode'] = aspect_mode
scene['camera'] = camera_position
scene.update(scene_settings)
scene.update(dict(annotations=annotations))
plot_layout['scene'] = scene
# Update the plot layout with any specific user settings given
plot_layout.update(layout)
# Make sure all the data entries are prepared to be plotted
# Make a deep copy of the locally stored data that can be
# cleaned and prepared for plotting (without risk of deleting
# information that may be necessary for re-plotting)
data = [d.copy() for d in self.data]
self._clean_data(data)
# Manage plotly reverse order bug (only happens with "tonext_")
self._reorder_data(data)
# Check for animation (if the user wanted it)
if any("frame" in d for d in data):
if any("frame" not in d for d in data):
raise(Exception("\n Partial animations are not allowed.\n Either all series must have 'frame' or none of them."))
# Make a call to handle generating the aniation figure
fig = _animate(data, plot_layout, loop_duration, bounce,
transition, data_easing, redraw,
slider_transition, initial_frame,
frame_label, show_play_pause,
show_frame_label)
else:
# Generate the figure with a standard mechanism
fig = dict(data=data, layout=plot_layout)
# Create the html file and show in browser if appropriate
if html: create_html(fig, file_name, show, append,
show_slider_labels, autoplay,
loop, loop_pause, **kwargs)
# Return the figure
return fig
@same_as(plot, mention_usage=True)
def show(self, *args, **kwargs): return self.plot(*args, **kwargs)
# This function is a light wrapper for "plot" that automatically
# sets axis settings for
def graph(self, *args, show_grid=True, show_ticks=False,
show_line=False, show_zero_line=False,
show_legend=False, show_titles=False, **kwargs):
# Set the axis labels
if (not show_titles) and ("x_title" not in kwargs) and ("y_title" not in kwargs):
self.x_title = ""
self.y_title = ""
# Set the default axis settings
axis_settings = dict(showgrid=show_grid, showticklabels=show_ticks,
showline=show_line, zeroline=show_zero_line)
if "axis_settings" in kwargs:
kwargs["axis_settings"].update(axis_settings)
else:
kwargs["axis_settings"] = axis_settings
# Update "show_legend"
kwargs["show_legend"] = show_legend
return self.plot(*args, **kwargs)
# Light wrapper for "add" which is designed to place graphical nodes.
def add_node(self, name, x, y, *args, symbol="circle",
display=True, white=True, size=30, hoverinfo="name",
marker_line_color="rgba(0,0,0,1))",
marker_line_width=2, label=False, label_y_offset=1,
label_x_offset=0, **kwargs):
# Disable "white" mode if color was provided
if ("color" in kwargs) and (type(kwargs["color"]) != type(None)):
white = False
# Set to a default color if desired
if white: kwargs["color"] = "rgba(255,255,255,1)"
# Set the defaults for some other plotting arguments
if ("text" not in kwargs):
kwargs["text"] = name
if ("marker_size" not in kwargs):
kwargs["marker_size"] = size
# Remove the marker line and color if not displayed
if not display:
marker_line_width = 0
kwargs["color"] = self.color(
color=kwargs.get("color","(0,0,0)"), alpha=0)
# Store the output of the addition.
output = self.add(name, [x], [y], *args, symbol=symbol,
marker_line_width=marker_line_width,
marker_line_color=marker_line_color,
hoverinfo=hoverinfo, **kwargs)
# Add a label if that is desired (after so it's on top).
if label: self.add_node(name+"_label", x+label_x_offset,
y+label_y_offset, mode="text", text=name,
hoverinfo="skip")
# Return the output.
return output
# Wrapper for "plot" that draws lines between nodes in a sequence.
def add_edge(self, nodes, color="rgba(0,0,0,1)", mode="lines",
*args, **kwargs):
x = []
y = []
# Default to adding a fill (if color is specified)
if ("fill_color" in kwargs):
if ("fill" not in kwargs):
kwargs["fill"] = "toself"
# Create a local function that says when a frame matches
matches_frame = lambda d: ("frame" not in kwargs) or (
("frame" in d) and (d["frame"] == str(kwargs["frame"])))
# Now find the nodes to draw between
for _ in range(len(nodes)):
for d in self.data:
if (d["name"] == nodes[0]):
# Skip data that does not match this frame
if not matches_frame(d): continue
# Track the coordinates
x += list(d["x"])
y += list(d["y"])
# Cycle the list
nodes = nodes[1:] + [nodes[0]]
break
# If we don't find a matching node, break
else: break
kwargs["hoverinfo"] = "skip"
output = self.add("", x, y, mode=mode, color=color, *args, **kwargs)
# Cycle that new element to the front of data so that it is
# rendered underneath all nodes.
for i in range(len(self.data)):
if (self.data[i]["name"] in nodes) and matches_frame(self.data[i]):
self.data.insert(i, self.data.pop(-1))
break
return output
# Functions for manipulation produces plots
# ===================================================
# Convenience function for generating interactive plots in a Jupyter
# notebook. Provide either a single plot or a list of plots as would
# be given to "plot.multiplot" to create interactive visuals.
def iplot(plot, *args, html=False, show=True, **kwargs):
# Set notebook mode for this session if it has not been set.
global NOTEBOOK_MODE
import plotly
if not NOTEBOOK_MODE:
plotly.offline.init_notebook_mode()
NOTEBOOK_MODE = True
# Disable the generation of HTML strings.
kwargs['html'] = html
kwargs['show'] = show
# Get the figure for plotting.
if (type(plot) == Plot): fig = plot.plot(*args, **kwargs)
else: fig = multiplot(plot, *args, **kwargs)
# Create an interactive plot in Jupyter.
if show: plotly.offline.iplot(fig, show_link=False)
# Return the figure.
return fig
# Generates the HTML file and fixes some javascript so that the
# plot does not have unwanted buttons and links.
#
# fig -- A plotly figure-dictionary, with all necessary keys.
# file_name -- The name of the output file to generate.
# show -- True if the output file should be opened in a
# webbrowser after writing is complete.
# append -- True if this HTML code should be appended to
# "file_name" if it already exists. This creates a
# scrollable page, where each plot takes a full screen.
# show_slider_labels -- Hack for removing the labels from the slider
# bar that must be done on the HTML.
# autoplay -- Hack for preventing plot animation from
# automatically playing once it is loaded.
# loop -- Hack for making animations automatically
# repeat by modifying raw javascript "animate".
# loop_pause -- Amount of time waited before looping an
# animation in seconds.
#
# ... <any additional plotly.offline.plot keyword arguments> ...
def create_html(fig, file_name=None, show=True, append=False,
show_slider_labels=True, autoplay=False,
loop=True, loop_pause=0, **kwargs):
# Handle the creation of a file
if (type(file_name) == type(None)):
if append and (len(PREVIOUS_FILE_NAMES) > 0):
file_name = PREVIOUS_FILE_NAMES[-1]
else:
with tempfile.NamedTemporaryFile(
mode="w", suffix=".html", delete=False) as f:
file_name = f.name
# Add 'html' extension if necessary.
if (file_name[-len('.html'):] != ".html"): file_name += ".html"
# Load the pypi package "plotly" that interfaces with plotly.js
# only once this is called, otherwise it slows down the import
import plotly
# Store the old file contents if we are appending
if (append and os.path.exists(file_name)):
with open(file_name) as f:
old_contents = f.read()
else: old_contents = ""
# Check for appending to file
if (not append):
print("Creating plot at", end=" ")
else:
print("Appending plot at", end=" ")
# Generate the plot offline
plotly.offline.plot(fig, filename=file_name, auto_open=False,
show_link=False, **kwargs)
# Remove unnecessary modebar buttons and the plotly logo link
with open(file_name) as f:
file_string = f.read()
file_string = file_string.replace(
'displaylogo:!0', 'displaylogo:!1')
file_string = file_string.replace(
'modeBarButtonsToRemove:[]',
'modeBarButtonsToRemove:["sendDataToCloud", "select2d", "lasso2d"]')
file_string += "\n\n"
# Prevent animated plots from auto-playing if the user wants
if (not autoplay):
file_string = re.sub("\\.then\\(function\\(\\)\\{Plotly\\.animate\\(\\'[0-9a-zA-Z-]*\\'\\)\\;\\}\\)", "", file_string)
# autoplay_substitution = ""
else:
print("WARNING: Cannot control transitions using autoplay.")
# autoplay_substitution = '.then(function(){Plotly.animate([null], {"frame": {"duration": 0, "redraw": false}, "mode": "immediate", "transition": {"duration": 0}})})'
# Cause animation to loop if the user wants
if loop:
# Add a global parameter storage at the top of the file
file_string = file_string.replace("*/\n!","*/\nvar ap=[];\n!")
# Name the x.animate function for internal reference and store
# the function parameters passed into the global variable
file_string = file_string.replace("x.animate=function(t,e,r){","x.animate=function af(t,e,r){ap=[t,e,r];")
# Add a recursive call at the end of the conclusion of the animate function
file_string = file_string.replace("}else c()","}else {c();setTimeout(function(){af(ap[0],ap[1],ap[2]);},"+str(1000*loop_pause)+");}")
# Remove the slider label group if necessary by adding CSS that hides it
if not show_slider_labels:
extra_css = '<style type="text/css"> g.slider-labels { display: none; } </style>'
file_string += extra_css
# If appending, put the old contents back in front of the new
if append: file_string = old_contents + file_string
# Write the contents to the file
with open(file_name, "w") as f:
f.write(file_string)
# Update the global list of previously used file names
PREVIOUS_FILE_NAMES.append(file_name)
if len(PREVIOUS_FILE_NAMES) > 1: PREVIOUS_FILE_NAMES.pop(0)
print("file '%s'"%file_name)
# Open the plot in a webbrowser if the user wants that
if show: webbrowser.open("file://"+os.path.abspath(file_name))
return file_name
# Make multiple plots fit onto one browser window, options for sharing
# axes as well for naming purposes. Mixed plot types allowed too!
# Supports different number of columns per row, but no other mismatch
#
# plots -- A 2D list of plots in the desired grid layout. Rows
# can have varying numbers of plots, columns cannot.
# x_domains -- A 2D list of pairs (3D list) each pair is [start,end]
# where 0 <= start < end <= 1. This controls the width
# of each column of plots. Same 2D shape as "plots".
# y_domains -- A 2D list of pairs (3D list) each pair is [start,end]
# where 0 <= start < end <= 1. This controls the width
# of each row of plots. Same 2D shape as "plots".
# shared_y -- True if the y-axis is shared for plots in same row.
# shared_x -- True if the x-axis is shared for plots in same column.
# gap -- The amount of space between the plots.
# specs -- A 2D list (same shape as "plots") of dictionaries
# representing plotly subplots "specs". Mostly for
# telling plotly which plots are 3D and which are 2D.
# html -- True if "create_html" should be called.
# show -- See "create_html".
# append -- See "create_html".
#
# ... <any additional plotly.offline.plot keyword arguments> ...
def multiplot(plots, x_domains=None, y_domains=None, html=True,
show=True, append=False, specs=None, shared_y=False,
shared_x=False, legend=None, show_legend=True, gap=0.12,
height=None, width=None, layout=None, **kwargs):
# Load the pypi package "plotly" that interfaces with plotly.js
# only once this is called, otherwise it slows down the import
import plotly
# Make sure the plots array is 2D
try: plots[0][0]
except: plots = [plots]
# Convert given plots into figures (if figures were not given
for r in plots:
for c in range(len(r)):
if type(r[c]) == Plot:
r[c] = r[c].plot(html=False, show=False,
show_legend=show_legend)
# Count the number of rows and columns
rows = len(plots)
cols = [len(r) for r in plots]
max_cols = max(c for c in cols)
# Generate/Process the specs
if type(specs) != type(None):
try: specs[0][0]
except: specs = [specs]
else:
specs = [[None]*max_cols for r in range(rows)]
for r,row in enumerate(plots):
for c,plot in enumerate(row):
if type(plot) == type(None): continue
sample_data = plots[r][c]['data'][0]
specs[r][c] = {"is_3d": ('z' in sample_data)}
# Generate the x and y domains if they are not provided by the user
if x_domains == None:
x_domains = []
for r in range(rows):
plot_width = (1 - (cols[r]-1)*gap) / cols[r]
x_domains.append(
[[c*(plot_width+gap), c*(plot_width+gap) + plot_width]
for c in range(cols[r])])
if y_domains == None:
plot_height = (1 - (rows-1)*gap) / rows
y_domains = [[r*(plot_height+gap), r*(plot_height+gap) + plot_height]
for r in range(rows)]
# Identify the number of dimensions provided in x an y domains, if
# too few, then make sure it is the same shape as the plots
try: x_domains[0][0][0]
except TypeError:
x_domains = [x_domains for r in range(rows)]
try: y_domains[0][0][0]
except TypeError:
y_domains = [[y_domains[r]]*cols[r] for r in range(rows)]
# Fix y-domains so that they are specified from bottom to top
flipped_y = []
gap = y_domains[1][0][0] - y_domains[0][0][1] if len(y_domains) > 1 else 0
for r in range(rows):
start = 0.0 if r == 0 else flipped_y[-1][1] + gap
plot_width = y_domains[rows-r-1][0][1] - y_domains[rows-r-1][0][0]
flipped_y.append([start, start+plot_width])
y_domains = [[flipped_y[r]]*cols[len(cols)-1-r] for r in range(rows)][::-1]
# Generate the holder for the multiplot
fig = plotly.tools.make_subplots(rows=rows, cols=max_cols,
specs=specs,
shared_yaxes=shared_y,
shared_xaxes=shared_x)
# Generate the multi plot!
counter_2d = 0
counter_3d = 0
for r,row in enumerate(plots):
for c,plot in enumerate(row):
# Allows for empty spaces
if type(plot) == type(None): continue
count = 0
# Otherwise, continue assuming we have a figure!
for d in plot['data']:
count += 1
# # Only add traces that are not redundant (same trace for different frames)
# if not any((d['name'] == f['name']) for f in fig['data']):
fig.append_trace(d, r+1, c+1)
# Add frames to the record for this figure
if ('frames' in plot):
if ('frames' in fig):
if (len(plot['frames']) != len(fig['frames'])):
raise(Exception("Each subplot must have same number of frames for multiplot animation."))
for i,f_src in enumerate(plot['frames']):
for d in f_src['data']:
# Update the x-axis and y-axis of the frame
d['xaxis'] = fig['data'][-1]['xaxis']
d['yaxis'] = fig['data'][-1]['yaxis']
fig['frames'][i]['data'] += [d]
else:
if (r != 0) or (c != 0):
raise(Exception("Each subplot must have same number of frames for multiplot animation."))
fig['frames'] = plot['frames']
for f_src in fig['frames']:
for i,d in enumerate(f_src['data']):
d['xaxis'] = fig['data'][-1]['xaxis']
d['yaxis'] = fig['data'][-1]['yaxis']
# Extract the annotations for this plot
plot_annotations = plot['layout'].pop('annotations',[])
# Handle 3D and 2D differently
if specs[r][c]['is_3d']:
counter_3d += 1
scene_name = 'scene' + str(counter_3d)
fig['layout'][scene_name].update(plot['layout']['scene'])
fig['layout'][scene_name]['domain']['x'] = x_domains[r][c]
fig['layout'][scene_name]['domain']['y'] = y_domains[r][c]
else:
counter_2d += 1
x_name = 'xaxis'+str(counter_2d)
y_name = 'yaxis'+str(counter_2d)
# For shared axes, only add the first entry of column or row
# Update the domains as specified by the user
if (not shared_x) or (r == 0):
fig['layout'][x_name].update(plot['layout'].pop('xaxis'))
fig['layout'][x_name]['domain'] = x_domains[r][c]
if (not shared_y) or (c == 0):
fig['layout'][y_name].update(plot['layout'].pop('yaxis'))
fig['layout'][y_name]['domain'] = y_domains[r][c]
for a in plot_annotations:
a['xref'] = "x" + str(counter_2d)
a['yref'] = "y" + str(counter_2d)
fig['layout']['annotations'] = fig['layout'].get(
'annotations',[]) + [a]
# Ensure that no axis layouts make it into the plot that shouldn't
plot['layout'].pop('xaxis','')
plot['layout'].pop('yaxis','')
fig['layout'].update(plot['layout'])
# Return the annotations to the plot now that the figure
# has been updated (and is not at risk of overwriting annotations)
if len(plot_annotations) > 0:
plot['layout']['annotations'] = plot_annotations
# Remove the 'scene' if there is one left over
if specs[r][c]['is_3d']: fig['layout'].pop('scene','')
# Set the height and width properties, compensate for plotly spacing aroung SVG
if type(width) != type(None):
width += 139
fig["layout"].update(dict(width=width))
if type(height) != type(None):
height += 159
fig["layout"].update(dict(height=height))
# Set up the legend if that was provided.
if (legend is not None):
fig["layout"].update(dict(legend=legend))
# Transfer any layout settings.
if (layout is not None):
fig['layout'].update(layout)
# Create the html plot if the user wants that (pass extra arguments)
if html: create_html(fig, show=show, append=append, **kwargs)
# Return the figure to be plotted
return fig
# =================================================
# Helper functions needed for this module
# =================================================
# Given some data, color the data according to a palette with uniform
# interpolation between the colors in the palette from the minimum
# value provided to the maximimum value provided
def color_data(values, palette=DEFAULT_GRADIENT, opacity=1.0):
no_none = [v for v in values if type(v) != type(None)]
shift = min(no_none)
scale = (max(no_none) - shift) * 1.11
if (scale == 0): scale = 1.0
def color(value):
if value == None: return None
# Generate the index as a float (for interpolating)
index = len(palette) * (value-shift) / scale
# Get the exact colors on either side of this index
lower = int(index)
upper = lower + 1
if (lower > len(palette)-1): lower = len(palette)-1
if (upper > len(palette)-1): upper = len(palette)-1
index -= lower
# Interpolate between the lower and upper colors
c = tuple(palette[lower]*(1-index) + palette[upper]*(index))
# Return the interpolated color.
return 'rgba(%i,%i,%i,%f)'%(c+(opacity,))
return list(map(color, values))
# Given a color string, convert it into an array of numbers
def color_string_to_array(color_string):
colors = color_string[color_string.index('(')+1:
color_string.index(')')].split(',')
color = list(map(float,colors))
if len(color) == 3: color += [1.0]
if len(color) != 4: raise(Exception("Bad number of elements in color string."))
return np.array(color)
# Private function for use only by the "plot" function. See the
# descriptions of input arguments at "def plot".
def _animate(data, plot_layout, loop_duration, bounce, transition,
data_easing, redraw, slider_transition, initial_frame,
frame_label, show_play_pause, show_frame_label):
# Get a list of all frame names
frame_names = []
for d in data:
if d["frame"] not in frame_names:
frame_names.append(d["frame"])
transition_duration = (loop_duration / len(frame_names)) * 1000
# Get a list of names and their legend groups (make sure that all
# data series have a legend group and avoid conflicts)
names_and_groups = {}
all_groups = []
for d in data:
if d["legendgroup"] not in all_groups:
all_groups.append(d["legendgroup"])
for i,d in enumerate(data):
if (d["legendgroup"] == None):
if d["name"] not in names_and_groups:
group = d["name"]
number = 1
new_group = lambda: "%s %s"%(group,number)
while group in all_groups:
name = new_group()
number += 1
all_groups.append(group)
names_and_groups[d["name"]] = group
d["legendgroup"] = names_and_groups[d["name"]]
# Remove "None" from the list of groups
if None in all_groups: all_groups.remove(None)
# Construct a universal legend group for all time steps
details = []
for group in all_groups:
names = []
for d in data:
if (d["legendgroup"] == group) and (d["name"] not in names):
names.append(d["name"])
for d in data:
if(d["legendgroup"] == group) and (d["name"] in names):
det = d.copy()
# Remove all displayable data from the details
for val in ["x", "y", "z"]:
if val in det: det[val] = [None]
if "text" in det: det["text"] = None
det.pop("frame")
details.append(det)
names.remove(d["name"])
if (len(names) == 0): break
# Organize all of the data by frame
list_data_dicts = [[d for d in data if (d["frame"] == fn)]
for fn in frame_names]
annotations = plot_layout.pop("annotations",[])
non_framed = [a for a in annotations if "frame" not in a]
annotations = [a for a in annotations if "frame" in a]
plot_layout["annotations"] = non_framed
# Initialize a figure
figure = {"data":[], "layout":plot_layout, "frames":[]}
# Pick the initial value for the animation if necessary
if type(initial_frame) == type(None):
initial_frame = frame_names[0]
if show_play_pause:
# Controls the list of elements transitioned through when "Play"
# is pressed. {"redraw": True} causes the slider to stop working.
# "transition" controls the movement of data points, NOT the slider.
# "[None]" forces a pause, which requires 'immediate" and 0 duration.
slider_menu = [{
'buttons': [
{'args': [frame_names + (frame_names[::-1] if bounce else []),
{'frame': {'duration': transition_duration, 'redraw': redraw},
'fromcurrent': True,
'transition': {'duration': transition_duration if data_easing else 0,
'easing': transition}}],
'label': 'Play', 'method': 'animate'},
{'args': [[None], {'frame': {'duration': 0, 'redraw': redraw},
'mode': 'immediate',
'transition': {'duration': 0}}],
'label':'Pause', 'method':'animate'}],
'direction': 'left',
'pad': {'r': 10, 't': 85},
'showactive': True,
'type': 'buttons',
'x': 0.1, 'y': 0,
'xanchor': 'right',
'yanchor': 'top'
}]
# Initialize a holder for 'updatemenus' if it doesn't exist
if "updatemenus" not in figure["layout"]:
figure['layout']['updatemenus'] = []
# Add the menu to the figure layout
figure['layout']['updatemenus'] += slider_menu
# "transition" controls the animation of the slider.
sliders_dict = {
# 'active': 0,
'yanchor': 'top',
'xanchor': 'left',
'currentvalue': {
'font': {'size': 16},
'prefix': frame_label,
'visible': show_frame_label,
'xanchor': 'right'
},
'transition': {'duration': transition_duration, 'easing': slider_transition},
'pad': {'b': 10, 't': 50 if max(map(len,frame_names)) < 20 else 65},
'len': 0.9 if show_play_pause else 1,
'x': 0.1 if show_play_pause else 0,
'y': 0,
'steps': []
}
# make frames
for el,data_dicts in zip(frame_names, list_data_dicts):
frame = {'data': [], 'name': el}
# Animate a plot
if el == frame_names[0]:
for d in data_dicts:
f_data = d.copy()
f_data.pop("frame","")
f_data["showlegend"] = False
# Generate data dicts in the usual way.
figure['data'].append(f_data)
for d in details:
figure['data'].append(d.copy())
# Add all data dicts for this step to the frame data
for d in data_dicts:
f_data = d.copy()
f_data.pop("frame","")
f_data["showlegend"] = False
frame['data'].append(f_data)
for d in details:
frame['data'].append(d.copy())
layout = {"annotations":[]}
for a in annotations:
if (a["frame"] == el):
layout["annotations"].append( a )
frame["layout"] = layout
figure['frames'].append(frame)
# Controls what happens when this element of the slider is
# clicked. The first duration is for the data, the second is
# for the slider.
slider_step = {'args': [[el],
{'frame': {'duration': transition_duration, 'easing':transition, 'redraw': redraw},
'transition': {'duration': transition_duration if data_easing else 0,
'easing': slider_transition}}
], 'label': el, 'method': 'animate'}
sliders_dict['steps'].append(slider_step)
figure['layout']['sliders'] = [sliders_dict]
return figure
# ================================================
# Example Usage of This Plotly Interface
# ================================================
if __name__ == "__main__":
print()
print("Creating a demonstration of most of the available (and useful) features!")
print()
# Testing code for the plotting interface
fun = lambda x: np.sum(x**2) / 10
# fun = lambda x: x[-1]*x[-2]
x = np.linspace(-10,10,100)
y = x**2 / 10
# Simple straight forward 2D plotting.
plot = Plot("2D Plotting Different Types")
# Adding a 2D function
plot.add_func("Test Func 2D", fun,[-10,10], opacity=0.5, dash="dot")
# Adding lines with dots
plot.add("V Line", [0,0], [min(y), max(y)], mode="lines+markers")
# Adding a filled region
plot.add("Square", [-2,-2,2,2], [5,10,10,5], opacity=0.8,
mode="none", fill="toself")
# Adding lines in arbitrary directions
plot.add("H Line", [-5,5], [1,1], mode="lines+markers",
symbol='square', dash="1px,3px,1px")
plot.add("H Line 2", [-5,5], [2,2], mode="lines")
plot.add_annotation("2D Annotation", 10+.1, 10-.1, ax=9, ay=2,
arrow_head=2, y_anchor="top")
plot1 = plot
# 3D plotting
plot = Plot("3D Title","X Axis", "Y Axis", "Z Axis")
rand_x = list(range(-5,6,2))
rand_y = np.random.randint(-3,4,size=6)
rand_z = np.random.randint(3,8,size=6)
# Adding a 3D line
plot.add("3D Line", rand_x, rand_y, rand_z, mode='lines')
dens = 5
x, y = np.meshgrid(np.linspace(-5,5,dens), np.linspace(-5,5,dens))
x = x.flatten()
y = y.flatten()
fun = lambda x: -.3*x[1] + 1/2*x[0] + 1
z = np.array(list(map(fun, zip(x,y))))
# Adding a 3D function, and demonstrating different marker styles
plot.add("3D Above", x, y, z+1.5, marker_size=3,
marker_line_width=1, group="Small")
plot.add("3D Below", x, y, z-1.5, marker_size=2,
marker_line_width=1, group="Small")
plot.add("3D B Next", x, y, z-1, marker_size=5, opacity=0.7,
marker_line_width=1, group="Big" )
plot.add("3D A Next", x, y, z+1, marker_size=7, opacity=0.4,
marker_line_width=1, group="Big")
plot.add_func("3D Surface", fun, [min(x),max(x)],
[min(y),max(y)], opacity=0.7, use_gradient=True)
x_val, y_val = x[-5], y[-5]
plot.add_annotation("3D Annotation", x_val, y_val,
fun([x_val,y_val])+1.5, ax=-15)
plot2 = plot
# Adding a histogram, notice they don't have the same ranges and
# that will reflect in their respective bin sizes.
plot3 = Plot("Using 'multiplot'", "x stuff", "y stuff")
plot3.add_histogram("Histogram Series 1", np.random.normal(0,3,size=(400,)))
plot3.add_histogram("Histogram Series 2", np.random.normal(15,1, size=(200,)))
plot3.add_annotation("Histogram annotation", 0, 0.005)
# Render the plots in the browser.
plot1.plot(show=False)
# Demonstrate how to put a full-screen plot beneath the first.
plot2.plot(title="'append=True' Plotting", append=True, show=False)
# Demonstrate allowing plotly to auto-scale when series are
# activated and deactivated (try turning off Histogram Series 1)
plot3.plot(title="'fixed=False' Plotting", fixed=False,
append=True, show=False)
# Showing multiple plots on one screen, a grid layout with the
# option for varying numbers of elements on each row.
multiplot([[plot1, plot2],[plot3]], gap=0.1, append=True, show=False)
# Add an example of two plots being animated side-by-side
p1 = Plot("","Plot 1")
p2 = Plot("Animation Plotting","Plot 2")
# x values for each plot
x = [-2,-1,0.01,1,2,3]
for f in range(10):
# Add the first plot series
y = list(map(lambda v: v**2 - f*v, x))
p1.add("f1", x, y, color=p1.color(0), mode='markers+lines',
shade=False, frame=f)
# Add the second plot series
y = np.array(list(map(lambda v: v**(3) + f*v, x)))
p2.add("f2", x, y, color=p2.color(1), mode='markers+lines',
shade=False, frame=f)
p1 = p1.plot(data_easing=True, bounce=True, html=False, loop_duration=2.5)
p2 = p2.plot(data_easing=True, bounce=True, html=False, loop_duration=2.5)
multiplot([[p1, p2]], append=True)
# This is an example of how to control the legend (flat, bottom).
# legend = dict(
# xanchor = "center",
# yanchor = "top",
# x = .5,
# y = -.15,
# orientation = "h",
# )
# layout_settings = dict(
# margin = dict(l=60, t=30, b=30),
# )
| 47.407901
| 174
| 0.552307
|
62ef09918a71ef82c9aa1c534062a67a20b1108d
| 2,397
|
py
|
Python
|
test/pcdcp_test/PCDCPParser_test.py
|
usgs/geomag-algorithms
|
a83a0e36bed9307828e37b9130c25dbc26dd1bc9
|
[
"CC0-1.0"
] | 49
|
2015-10-06T17:57:20.000Z
|
2022-01-12T18:40:17.000Z
|
test/pcdcp_test/PCDCPParser_test.py
|
usgs/geomag-algorithms
|
a83a0e36bed9307828e37b9130c25dbc26dd1bc9
|
[
"CC0-1.0"
] | 229
|
2015-01-26T20:10:36.000Z
|
2022-03-12T00:46:33.000Z
|
test/pcdcp_test/PCDCPParser_test.py
|
alejandrodelcampillo/geomag-algorithms
|
43a734d63a8eb2a696f14237e0054e21d36de7c3
|
[
"CC0-1.0"
] | 44
|
2015-03-03T16:18:18.000Z
|
2021-11-06T17:07:38.000Z
|
"""Tests for the PCDCP Parser class."""
from numpy.testing import assert_equal
from geomagio.pcdcp import PCDCPParser
PCDCP_EXAMPLE = """
BOU 2015 001 01-Jan-15 HEZF 0.01nT File Version 2.00
0000 2086167 -5707 4745737 5237768
0001 2086190 -5664 4745737 5237777
0002 2086213 -5638 4745741 5237787
0003 2086239 -5632 4745739 5237796
0004 2086198 -5626 4745743 5237786
0005 2086228 -5600 4745728 5237784
0006 2086242 -5578 4745725 5237787
0007 2086258 -5552 4745726 5237792
0008 2086278 -5571 4745734 5237808
"""
PCDCP_EXAMPLE_SECOND = """
BOU 2015 001 01-Jan-15 HEZF 0.001nT File Version 2.00
00000 20861520 -57095 47457409 52377630
00001 20861533 -57096 47457397 52377650
00002 20861554 -57077 47457391 52377650
00003 20861578 -57068 47457389 52377680
00004 20861600 -57068 47457384 52377660
00005 20861640 -57047 47457388 52377690
00006 20861654 -57039 47457378 52377650
00007 20861699 -57026 47457377 52377690
00008 20861721 -56995 47457365 52377680
00009 20861743 -56977 47457350 52377680
00010 20861750 -56968 47457349 52377690
"""
def test_parse_header():
"""pcdcp_test.PCDCPParser_test.test_parse_header()
Call the _parse_header method with a header.
Verify the header name and value are split at the correct column.
"""
parser = PCDCPParser()
parser._parse_header(
"BOU 2015 001 01-Jan-15 HEZF 0.01nT" + " File Version 2.00"
)
assert_equal(parser.header["date"], "01-Jan-15")
assert_equal(parser.header["station"], "BOU")
assert_equal(parser.header["year"], "2015")
assert_equal(parser.header["yearday"], "001")
assert_equal(parser.header["resolution"], "0.01nT")
def test_parse_header_sec():
"""pcdcp_test.PCDCPParser_test.test_parse_header_sec()
Call the _parse_header method with a pcdcp seconds file '.raw'
header. Verify the header name and value are split correctly.
"""
parser = PCDCPParser()
parser._parse_header(
"BOU 2015 001 01-Jan-15 HEZF 0.001nT" + " File Version 2.00"
)
assert_equal(parser.header["date"], "01-Jan-15")
assert_equal(parser.header["station"], "BOU")
assert_equal(parser.header["year"], "2015")
assert_equal(parser.header["yearday"], "001")
assert_equal(parser.header["resolution"], "0.001nT")
| 34.242857
| 73
| 0.696704
|
14880d2bd31abddf7da5875bf48154396645072e
| 10,283
|
py
|
Python
|
rcnn/soft_nms.py
|
Edward-Sun/TSP-Detection
|
da63a9f23053df22629d1ad1e2c93e548689ba84
|
[
"Apache-2.0"
] | 37
|
2021-10-12T13:05:00.000Z
|
2022-03-22T02:13:02.000Z
|
rcnn/soft_nms.py
|
Edward-Sun/TSP-Detection
|
da63a9f23053df22629d1ad1e2c93e548689ba84
|
[
"Apache-2.0"
] | 2
|
2021-11-01T09:19:55.000Z
|
2021-12-16T07:31:11.000Z
|
rcnn/soft_nms.py
|
Edward-Sun/TSP-Detection
|
da63a9f23053df22629d1ad1e2c93e548689ba84
|
[
"Apache-2.0"
] | 1
|
2021-10-15T00:40:17.000Z
|
2021-10-15T00:40:17.000Z
|
# This implementation is from
# https://github.com/facebookresearch/detectron2/pull/1183
import torch
import numpy as np
from detectron2.structures import Boxes, RotatedBoxes, pairwise_iou, pairwise_iou_rotated
def soft_nms(boxes, scores, method, gaussian_sigma, linear_threshold, prune_threshold, topk_per_image):
"""
Performs soft non-maximum suppression algorithm on axis aligned boxes
Args:
boxes (Tensor[N, 5]):
boxes where NMS will be performed. They
are expected to be in (x_ctr, y_ctr, width, height, angle_degrees) format
scores (Tensor[N]):
scores for each one of the boxes
method (str):
one of ['gaussian', 'linear', 'hard']
see paper for details. users encouraged not to use "hard", as this is the
same nms available elsewhere in detectron2
gaussian_sigma (float):
parameter for Gaussian penalty function
linear_threshold (float):
iou threshold for applying linear decay. Nt from the paper
re-used as threshold for standard "hard" nms
prune_threshold (float):
boxes with scores below this threshold are pruned at each iteration.
Dramatically reduces computation time. Authors use values in [10e-4, 10e-2]
Returns:
tuple(Tensor, Tensor):
[0]: int64 tensor with the indices of the elements that have been kept
by Soft NMS, sorted in decreasing order of scores
[1]: float tensor with the re-scored scores of the elements that were kept
"""
return _soft_nms_np(
boxes,
scores,
method,
gaussian_sigma,
linear_threshold,
prune_threshold,
topk_per_image,
)
def batched_soft_nms(
boxes, scores, idxs, method, gaussian_sigma, linear_threshold, prune_threshold, topk_per_image
):
"""
Performs soft non-maximum suppression in a batched fashion.
Each index value correspond to a category, and NMS
will not be applied between elements of different categories.
Args:
boxes (Tensor[N, 4]):
boxes where NMS will be performed. They
are expected to be in (x1, y1, x2, y2) format
scores (Tensor[N]):
scores for each one of the boxes
idxs (Tensor[N]):
indices of the categories for each one of the boxes.
method (str):
one of ['gaussian', 'linear', 'hard']
see paper for details. users encouraged not to use "hard", as this is the
same nms available elsewhere in detectron2
gaussian_sigma (float):
parameter for Gaussian penalty function
linear_threshold (float):
iou threshold for applying linear decay. Nt from the paper
re-used as threshold for standard "hard" nms
prune_threshold (float):
boxes with scores below this threshold are pruned at each iteration.
Dramatically reduces computation time. Authors use values in [10e-4, 10e-2]
Returns:
tuple(Tensor, Tensor):
[0]: int64 tensor with the indices of the elements that have been kept
by Soft NMS, sorted in decreasing order of scores
[1]: float tensor with the re-scored scores of the elements that were kept
"""
if boxes.numel() == 0:
return (
torch.empty((0,), dtype=torch.int64, device=boxes.device),
torch.empty((0,), dtype=torch.float32, device=scores.device),
)
# strategy: in order to perform NMS independently per class.
# we add an offset to all the boxes. The offset is dependent
# only on the class idx, and is large enough so that boxes
# from different classes do not overlap
max_coordinate = boxes.max()
offsets = idxs.to(boxes) * (max_coordinate + 1)
boxes_for_nms = boxes + offsets[:, None]
return soft_nms(
boxes_for_nms, scores, method, gaussian_sigma, linear_threshold, prune_threshold, topk_per_image
)
def _soft_nms(
box_class,
pairwise_iou_func,
boxes,
scores,
method,
gaussian_sigma,
linear_threshold,
prune_threshold,
topk_per_image,
):
"""
Soft non-max suppression algorithm.
Implementation of [Soft-NMS -- Improving Object Detection With One Line of Codec]
(https://arxiv.org/abs/1704.04503)
Args:
box_class (cls): one of Box, RotatedBoxes
pairwise_iou_func (func): one of pairwise_iou, pairwise_iou_rotated
boxes (Tensor[N, ?]):
boxes where NMS will be performed
if Boxes, in (x1, y1, x2, y2) format
if RotatedBoxes, in (x_ctr, y_ctr, width, height, angle_degrees) format
scores (Tensor[N]):
scores for each one of the boxes
method (str):
one of ['gaussian', 'linear', 'hard']
see paper for details. users encouraged not to use "hard", as this is the
same nms available elsewhere in detectron2
gaussian_sigma (float):
parameter for Gaussian penalty function
linear_threshold (float):
iou threshold for applying linear decay. Nt from the paper
re-used as threshold for standard "hard" nms
prune_threshold (float):
boxes with scores below this threshold are pruned at each iteration.
Dramatically reduces computation time. Authors use values in [10e-4, 10e-2]
Returns:
tuple(Tensor, Tensor):
[0]: int64 tensor with the indices of the elements that have been kept
by Soft NMS, sorted in decreasing order of scores
[1]: float tensor with the re-scored scores of the elements that were kept
"""
boxes = boxes.clone()
scores = scores.clone()
idxs = torch.arange(scores.size()[0])
idxs_out = []
scores_out = []
while scores.numel() > 0:
top_idx = torch.argmax(scores)
idxs_out.append(idxs[top_idx].item())
scores_out.append(scores[top_idx].item())
top_box = boxes[top_idx]
ious = pairwise_iou_func(box_class(top_box.unsqueeze(0)), box_class(boxes))[0]
if method == "linear":
decay = torch.ones_like(ious)
decay_mask = ious > linear_threshold
decay[decay_mask] = 1 - ious[decay_mask]
elif method == "gaussian":
decay = torch.exp(-torch.pow(ious, 2) / gaussian_sigma)
elif method == "hard": # standard NMS
decay = (ious < linear_threshold).float()
else:
raise NotImplementedError("{} soft nms method not implemented.".format(method))
scores *= decay
keep = scores > prune_threshold
keep[top_idx] = False
boxes = boxes[keep]
scores = scores[keep]
idxs = idxs[keep]
return torch.tensor(idxs_out).to(boxes.device), torch.tensor(scores_out).to(scores.device)
def pairwise_iou_np(boxes1, boxes2):
area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])
area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])
x_inter_2 = np.minimum(boxes1[:, 2], boxes2[:, 2])
x_inter_1 = np.maximum(boxes1[:, 0], boxes2[:, 0])
y_inter_2 = np.minimum(boxes1[:, 3], boxes2[:, 3])
y_inter_1 = np.maximum(boxes1[:, 1], boxes2[:, 1])
inter = np.maximum(y_inter_2 - y_inter_1, 0) * np.maximum(x_inter_2 - x_inter_1, 0)
# handle empty boxes
iou = inter / (area1 + area2 - inter + 1e-9)
return iou.reshape(1, -1)
def _soft_nms_np(
boxes,
scores,
method,
gaussian_sigma,
linear_threshold,
prune_threshold,
topk_per_image,
):
"""
Soft non-max suppression algorithm.
Implementation of [Soft-NMS -- Improving Object Detection With One Line of Codec]
(https://arxiv.org/abs/1704.04503)
Args:
boxes (Tensor[N, ?]):
boxes where NMS will be performed
if Boxes, in (x1, y1, x2, y2) format
if RotatedBoxes, in (x_ctr, y_ctr, width, height, angle_degrees) format
scores (Tensor[N]):
scores for each one of the boxes
method (str):
one of ['gaussian', 'linear', 'hard']
see paper for details. users encouraged not to use "hard", as this is the
same nms available elsewhere in detectron2
gaussian_sigma (float):
parameter for Gaussian penalty function
linear_threshold (float):
iou threshold for applying linear decay. Nt from the paper
re-used as threshold for standard "hard" nms
prune_threshold (float):
boxes with scores below this threshold are pruned at each iteration.
Dramatically reduces computation time. Authors use values in [10e-4, 10e-2]
Returns:
tuple(Tensor, Tensor):
[0]: int64 tensor with the indices of the elements that have been kept
by Soft NMS, sorted in decreasing order of scores
[1]: float tensor with the re-scored scores of the elements that were kept
"""
device = boxes.device
boxes = boxes.clone().cpu().data.numpy()
scores = scores.clone().cpu().data.numpy()
idxs = np.arange(scores.shape[0])
idxs_out = []
scores_out = []
while scores.size > 0 and len(idxs_out) < topk_per_image:
top_idx = np.argmax(scores)
idxs_out.append(idxs[top_idx].item())
scores_out.append(scores[top_idx].item())
top_box = boxes[top_idx]
ious = pairwise_iou_np(np.expand_dims(top_box, 0), boxes)[0]
if method == "linear":
decay = np.ones_like(ious)
decay_mask = ious > linear_threshold
decay[decay_mask] = 1 - ious[decay_mask]
elif method == "gaussian":
decay = np.exp(-np.power(ious, 2) / gaussian_sigma)
elif method == "hard": # standard NMS
decay = (ious < linear_threshold).float()
else:
raise NotImplementedError("{} soft nms method not implemented.".format(method))
scores *= decay
keep = scores > prune_threshold
keep[top_idx] = False
boxes = boxes[keep]
scores = scores[keep]
idxs = idxs[keep]
return torch.tensor(idxs_out).to(device), torch.tensor(scores_out).to(device)
| 38.513109
| 104
| 0.628805
|
aae32588d2b21a44e3915afa734fae6da4153001
| 2,020
|
py
|
Python
|
color_detect.py
|
prityushchandra/image-processing
|
73d975a355b7d382c67f1b39e09e4c5b952155fb
|
[
"MIT"
] | null | null | null |
color_detect.py
|
prityushchandra/image-processing
|
73d975a355b7d382c67f1b39e09e4c5b952155fb
|
[
"MIT"
] | null | null | null |
color_detect.py
|
prityushchandra/image-processing
|
73d975a355b7d382c67f1b39e09e4c5b952155fb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import numpy as np
import cv2
def nothing(x):
pass
def get_frame(cap, scaling_factor):
# Capture the frame from video capture object
ret, frame = cap.read()
# Resize the input frame
frame = cv2.resize(frame, None, fx=scaling_factor,
fy=scaling_factor, interpolation=cv2.INTER_AREA)
return frame
if __name__=='__main__':
cap = cv2.VideoCapture(0)
scaling_factor = 0.5
def greenCircleDetect():
#ret, frame = cap.read()
#hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_blue = np.array([36, 0, 0])
upper_blue = np.array([86, 255, 255])
mask = cv2.inRange(hsv, lower_blue, upper_blue)
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cv2.drawContours(frame, contours, -1, (255, 255, 0))
#cv2.imshow('frame', frame)
#cv2.imshow('green_output', mask)
def blueCircleDetect():
#ret, frame = cap.read()
#hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_blue = np.array([62, 146, 51])
upper_blue = np.array([179, 255, 100])
mask = cv2.inRange(hsv, lower_blue, upper_blue)
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cv2.drawContours(frame, contours, -1, (0, 255, 0))
#cv2.imshow('frame', frame)
#cv2.imshow('blue_output', mask)
def redCircleDetect():
#ret, frame = cap.read()
#hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_blue = np.array([0, 166, 52])
upper_blue = np.array([179, 255, 255])
mask = cv2.inRange(hsv, lower_blue, upper_blue)
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cv2.drawContours(frame, contours, -1, (0, 255, 0))
#cv2.imshow('frame', frame)
#cv2.imshow('red_output', mask)
while True:
frame = get_frame(cap, scaling_factor)##scaling factor is defined as size of window you want
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
greenCircleDetect()
redCircleDetect()
blueCircleDetect()
cv2.imshow('frame', frame)
key=cv2.waitKey(1)
if key==27:
break
cap.release()
cv2.destroyAllWindows()
| 23.218391
| 93
| 0.70099
|
66e8c5cf923b39e66acd9eff65148a7742949b13
| 4,502
|
py
|
Python
|
sympy/geometry/tests/test_parabola.py
|
STALKER2010/sympy-bleeding-edge
|
81233029a9a30866747f6da2c0e9604d1681d474
|
[
"BSD-3-Clause"
] | 8
|
2019-05-29T09:38:30.000Z
|
2021-01-20T03:36:59.000Z
|
sympy/geometry/tests/test_parabola.py
|
STALKER2010/sympy-bleeding-edge
|
81233029a9a30866747f6da2c0e9604d1681d474
|
[
"BSD-3-Clause"
] | 12
|
2021-03-09T03:01:16.000Z
|
2022-03-11T23:59:36.000Z
|
sympy/geometry/tests/test_parabola.py
|
STALKER2010/sympy-bleeding-edge
|
81233029a9a30866747f6da2c0e9604d1681d474
|
[
"BSD-3-Clause"
] | 1
|
2018-10-22T09:17:11.000Z
|
2018-10-22T09:17:11.000Z
|
from __future__ import division
from sympy import Rational, oo, sqrt
from sympy import Line, Point, Point2D, Parabola, Segment2D, Ray2D
from sympy import Circle, Ellipse
from sympy.utilities.pytest import raises
def test_parabola_geom():
p1 = Point(0, 0)
p2 = Point(3, 7)
p3 = Point(0, 4)
p4 = Point(6, 0)
d1 = Line(Point(4, 0), Point(4, 9))
d2 = Line(Point(7, 6), Point(3, 6))
d3 = Line(Point(4, 0), slope=oo)
d4 = Line(Point(7, 6), slope=0)
half = Rational(1, 2)
pa1 = Parabola(None, d2)
pa2 = Parabola(directrix=d1)
pa3 = Parabola(p1, d1)
pa4 = Parabola(p2, d2)
pa5 = Parabola(p2, d4)
pa6 = Parabola(p3, d2)
pa7 = Parabola(p2, d1)
pa8 = Parabola(p4, d1)
pa9 = Parabola(p4, d3)
raises(ValueError, lambda:
Parabola(Point(7, 8, 9), Line(Point(6, 7), Point(7, 7))))
raises(NotImplementedError, lambda:
Parabola(Point(7, 8), Line(Point(3, 7), Point(2, 9))))
raises(ValueError, lambda:
Parabola(Point(0, 2), Line(Point(7, 2), Point(6, 2))))
raises(ValueError, lambda: Parabola(Point(7, 8), Point(3, 8)))
# Basic Stuff
assert pa1.focus == Point(0, 0)
assert pa2 == pa3
assert pa4 != pa7
assert pa6 != pa7
assert pa6.focus == Point2D(0, 4)
assert pa6.focal_length == 1
assert pa6.p_parameter == -1
assert pa6.vertex == Point2D(0, 5)
assert pa6.eccentricity == 1
assert pa7.focus == Point2D(3, 7)
assert pa7.focal_length == half
assert pa7.p_parameter == -half
assert pa7.vertex == Point2D(7*half, 7)
assert pa4.focal_length == half
assert pa4.p_parameter == half
assert pa4.vertex == Point2D(3, 13*half)
assert pa8.focal_length == 1
assert pa8.p_parameter == 1
assert pa8.vertex == Point2D(5, 0)
assert pa4.focal_length == pa5.focal_length
assert pa4.p_parameter == pa5.p_parameter
assert pa4.vertex == pa5.vertex
assert pa4.equation() == pa5.equation()
assert pa8.focal_length == pa9.focal_length
assert pa8.p_parameter == pa9.p_parameter
assert pa8.vertex == pa9.vertex
assert pa8.equation() == pa9.equation()
def test_parabola_intersection():
l1 = Line(Point(1, -2), Point(-1,-2))
l2 = Line(Point(1, 2), Point(-1,2))
l3 = Line(Point(1, 0), Point(-1,0))
p1 = Point(0,0)
p2 = Point(0, -2)
p3 = Point(120, -12)
parabola1 = Parabola(p1, l1)
# parabola with parabola
assert parabola1.intersection(parabola1) == [parabola1]
assert parabola1.intersection(Parabola(p1, l2)) == [Point2D(-2, 0), Point2D(2, 0)]
assert parabola1.intersection(Parabola(p2, l3)) == [Point2D(0, -1)]
assert parabola1.intersection(Parabola(Point(16, 0), l1)) == [Point2D(8, 15)]
assert parabola1.intersection(Parabola(Point(0, 16), l1)) == [Point2D(-6, 8), Point2D(6, 8)]
assert parabola1.intersection(Parabola(p3, l3)) == []
# parabola with point
assert parabola1.intersection(p1) == []
assert parabola1.intersection(Point2D(0, -1)) == [Point2D(0, -1)]
assert parabola1.intersection(Point2D(4, 3)) == [Point2D(4, 3)]
# parabola with line
assert parabola1.intersection(Line(Point2D(-7, 3), Point(12, 3))) == [Point2D(-4, 3), Point2D(4, 3)]
assert parabola1.intersection(Line(Point(-4, -1), Point(4, -1))) == [Point(0, -1)]
assert parabola1.intersection(Line(Point(2, 0), Point(0, -2))) == [Point2D(2, 0)]
# parabola with segment
assert parabola1.intersection(Segment2D((-4, -5), (4, 3))) == [Point2D(0, -1), Point2D(4, 3)]
assert parabola1.intersection(Segment2D((0, -5), (0, 6))) == [Point2D(0, -1)]
assert parabola1.intersection(Segment2D((-12, -65), (14, -68))) == []
# parabola with ray
assert parabola1.intersection(Ray2D((-4, -5), (4, 3))) == [Point2D(0, -1), Point2D(4, 3)]
assert parabola1.intersection(Ray2D((0, 7), (1, 14))) == [Point2D(14 + 2*sqrt(57), 105 + 14*sqrt(57))]
assert parabola1.intersection(Ray2D((0, 7), (0, 14))) == []
# parabola with ellipse/circle
assert parabola1.intersection(Circle(p1, 2)) == [Point2D(-2, 0), Point2D(2, 0)]
assert parabola1.intersection(Circle(p2, 1)) == [Point2D(0, -1), Point2D(0, -1)]
assert parabola1.intersection(Ellipse(p2, 2, 1)) == [Point2D(0, -1), Point2D(0, -1)]
assert parabola1.intersection(Ellipse(Point(0, 19), 5, 7)) == []
assert parabola1.intersection(Ellipse((0, 3), 12, 4)) == \
[Point2D(0, -1), Point2D(0, -1), Point2D(-4*sqrt(17)/3, 59/9), Point2D(4*sqrt(17)/3, 59/9)]
| 41.302752
| 106
| 0.624611
|
7ac0f449eda3f698bfee0ac3d4d7f93b8ccf5cf1
| 1,693
|
py
|
Python
|
boa3/model/builtin/interop/storage/findoptionstype.py
|
OnBlockIO/neo3-boa
|
cb317292a67532a52ed26f2b0f0f7d0b10ac5f5f
|
[
"Apache-2.0"
] | null | null | null |
boa3/model/builtin/interop/storage/findoptionstype.py
|
OnBlockIO/neo3-boa
|
cb317292a67532a52ed26f2b0f0f7d0b10ac5f5f
|
[
"Apache-2.0"
] | null | null | null |
boa3/model/builtin/interop/storage/findoptionstype.py
|
OnBlockIO/neo3-boa
|
cb317292a67532a52ed26f2b0f0f7d0b10ac5f5f
|
[
"Apache-2.0"
] | null | null | null |
from typing import Any, Dict
from boa3.model.symbol import ISymbol
from boa3.model.type.itype import IType
from boa3.model.type.primitive.inttype import IntType
class FindOptionsType(IntType):
"""
A class used to represent Neo interop FindOptions type
"""
def __init__(self):
super().__init__()
self._identifier = 'FindOptions'
@property
def default_value(self) -> Any:
from boa3.builtin.interop.storage import FindOptions
return FindOptions.NONE
@classmethod
def build(cls, value: Any = None) -> IType:
if cls._is_type_of(value) or value is None:
from boa3.model.builtin.interop.interop import Interop
return Interop.FindOptionsType
@classmethod
def _is_type_of(cls, value: Any):
from boa3.builtin.interop.storage import FindOptions
return isinstance(value, (FindOptions, FindOptionsType, type(int)))
@property
def symbols(self) -> Dict[str, ISymbol]:
"""
Gets the class symbols of this type
:return: a dictionary that maps each symbol in the module with its name
"""
from boa3.builtin.interop.storage import FindOptions
from boa3.model.variable import Variable
return {name: Variable(self) for name in FindOptions.__members__.keys()}
def get_value(self, symbol_id) -> Any:
"""
Gets the literal value of a symbol
:return: the value if this type has this symbol. None otherwise.
"""
if symbol_id in self.symbols:
from boa3.builtin.interop.storage import FindOptions
return FindOptions.__members__[symbol_id]
return None
| 30.232143
| 80
| 0.665092
|
8905441b8c521a2ebe6e6b46473d058d86c843f9
| 13,326
|
py
|
Python
|
app/functions/kf_evaluator.py
|
klehman-rally/kingfisher
|
e5f0eff0fcc596b15d799d67f20e7e6e54ea7d2e
|
[
"BSD-3-Clause"
] | null | null | null |
app/functions/kf_evaluator.py
|
klehman-rally/kingfisher
|
e5f0eff0fcc596b15d799d67f20e7e6e54ea7d2e
|
[
"BSD-3-Clause"
] | null | null | null |
app/functions/kf_evaluator.py
|
klehman-rally/kingfisher
|
e5f0eff0fcc596b15d799d67f20e7e6e54ea7d2e
|
[
"BSD-3-Clause"
] | null | null | null |
import sys, os
import base64
import json
from datetime import datetime
from itertools import chain
from app.helpers.pubsub import publish
#############################################################################################################################
WEBHOOK_NOGO_TOPIC = os.getenv('KF_WEBHOOK_NOGO')
WEBHOOK_READY_TOPIC = os.getenv('KF_WEBHOOK_READY')
#############################################################################################################################
def kf_evaluateOCM(data, context):
"""
Background Cloud Function to be triggered by Pub/Sub.
Args:
data (dict): The dictionary with data specific to this type of event.
context (google.cloud.functions.Context): The Cloud Functions event
metadata.
The package pulled out of data has these keys:
message_id
action
payload
conditions
webhooks
processed_timestamp
From the kingfisher DB
items queried from the webhook table have:
0 1 2 3 4 5
id sub_id name target_url object_types(list) conditions(list of ids)
items queried from the condition table have:
0 1 2 3 4 5
id sub_id attribute_uuid attribute_name operator value'
"""
if not 'data' in data:
print("Missing top level 'data' element in data parameter, no data published to output topic.")
return
package = json.loads(base64.b64decode(data['data']).decode('utf-8'))
#print(f'keys for the provided message {repr(list(package.keys()))}')
message_id = package.get('message_id')
action = package.get('action')
payload = json.loads(package.get('payload'))
conditions = json.loads(package.get('conditions'))
webhooks = json.loads(package.get('webhooks'))
object_type = payload['object_type']
print(f'message_id: {message_id} action: {action} object_type: {object_type}')
#print(f'payload is a {type(payload)}')
#print(f'payload has these keys {list(payload.keys())}')
#payload keys: ['action', 'subscription_id', 'ref', 'detail_link', 'object_type', 'changes', 'state', 'project']
if action.lower() not in ['created', 'updated']:
print(f'Ignoring OCM action: {action} for message_id: {message_id}')
return
print(f'webhooks -> {webhooks}')
print(f'conditions -> {conditions}')
relevant_webhooks = getRelevantWebhooks(webhooks, object_type)
if not relevant_webhooks:
print(f'message_id: {message_id} no relevant webhooks for object_type: {object_type}')
return None
print(f'message_id: {message_id} relevant_webhooks: {repr(relevant_webhooks)}')
relevant_conditions = getRelevantConditions(relevant_webhooks, conditions)
print(f'message_id: {message_id} relevant_conditions: {repr(relevant_conditions)}')
condition = evaluateItemAgainstConditions(payload, relevant_conditions)
endpoint = {}
for webhook in relevant_webhooks:
disqualified = False
for cond_id in webhook[-1]: # all conditions specified by webhook must be true or the webhook is disqualified
if condition[cond_id]['status'] != True:
disqualified = True
message_dict = { "message_id" : message_id,
"action" : action,
"webhook" : webhook,
"payload" : json.dumps(payload),
"processed_timestamp" : datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"),
}
if disqualified:
message_dict['conditions'] = condition
topic_name = WEBHOOK_NOGO_TOPIC
try:
future = publish(topic_name, message_dict)
result = future.result(timeout=10)
print(f'Published message -- {message_id} topic: WEBHOOK_NOGO result: {result}')
except Exception as exception:
print('Encountered error while publishing -- message_id: {message_id} topic: WEBHOOK_NOGO exception: {exception}')
continue
if webhook[3] not in endpoint: # A payload only gets fired on its target once per OCM
endpoint[webhook[3]] = 1 # to ensure the above statement, cache the target endpoint
topic_name = WEBHOOK_READY_TOPIC
try:
message_dict['attempts'] = 0
message_dict['eligible'] = 1000 # artificially low timestamp value
future = publish(topic_name, message_dict)
result = future.result(timeout=10)
print('Published message -- {message_id} topic: WEBHOOK_READY result: {result}')
except Exception as exception:
print('Encountered error while publishing -- {message_id} topic: WEBHOOK_READY exception: {exception}')
#############################################################################################################################
def getRelevantWebhooks(webhooks, object_type):
"""
Given a list of webhooks (show structure of a webhook) and a object_type value
return the set of webhooks where object_type is in the webhook item ix 4 or whose webhook item 4 list is empty
"""
relevant_webhooks = [wh for wh in webhooks if object_type in wh[4] or len(wh[4]) == 0 ] # wh[4] is the list of object_types
return relevant_webhooks
#############################################################################################################################
def getRelevantConditions(webhooks, conditions):
"""
Given a sequence of webhook items and sequence of conditions
identify and return the conditions that match the condition ids in each webhook
"""
conds = [wh[-1]for wh in webhooks] # wh[-1] is a list of integers where each integer is the id value of a condition
cond_ids = list(set(chain(*conds)))
relevant_conds = [cond for cond in conditions if cond[0] in cond_ids]
return relevant_conds
#############################################################################################################################
def evaluateItemAgainstConditions(payload, relevant_conditions):
"""
Given a payload (dict) in which there is a 'state' key with a sub-dict with attr_name : attr_value pairs
and a relevant_conditions sequence
"""
condition = {}
for cond in relevant_conditions:
cond_id, sub_id, condition_attr_uuid, condition_attr_name, condition_relation, condition_value = cond
attribute = payload['state'][condition_attr_uuid]
#attr_value = payload['state'][condition_attr_uuid]['value']['value']
attr_value = 'no such value key in attribute'
if 'value' in attribute:
attr_value = attribute['value']
if attr_value and isinstance(attr_value, dict):
if 'value' in attr_value:
attr_value = attr_value['value']
elif 'name' in attr_value:
attr_value = attr_value['name']
expression = f'{condition_attr_name}({attr_value}) {condition_relation} {condition_value}'
status = isQualified(payload, cond)
print(f'{expression} ? {status}')
condition[cond_id] = {'condition' : expression, 'status' : status}
return condition
#############################################################################################################################
""" from the Pigeon Webhooks API documentation
Operators
The required fields in an Expression depend on the Operator.
------------
The following operators require both a Value and exactly one of AttributeID or AttributeName.
Operator Description
= Equal
!= Not equal
< Less than
<= Less than or equal
> Greater than
>= Greater than or equal
changed-to Value changed to
changed-from Value changed from
------------
The following operators require an AttributeID or AttributeName, and a Value that is an Array of individual values.
Operator Description
~ "Equals one of". Matches when the object's value for the attribute
is equal to one of the values given in the Expression
!~ "Equals none of". Matches when the object's value for the attribute
is not equal to any of the values given in the Expression
-----------
The following operators require only an AttributeID or AttributeName (no Value)
Operator Description
has The object has some (non-null) value for the attribute
!has The object does not have the attribute, or its value is null
changed The value of the attribute was changed on the object
"""
def isEqual(ocm_attr_value, expression_value):
return ocm_attr_value == expression_value
def isNotEqual(ocm_attr_value, expression_value):
return ocm_attr_value != expression_value
def isLessThan(ocm_attr_value, expression_value):
return ocm_attr_value < expression_value
def isLessThanOrEqual(ocm_attr_value, expression_value):
return ocm_attr_value <= expression_value
def isGreaterThan(ocm_attr_value, expression_value):
return ocm_attr_value > expression_value
def isGreaterThanOrEqual(ocm_attr_value, expression_value):
return ocm_attr_value >= expression_value
# def isChangedTo(ocm, ocm_attr_id, expression_value):
# return False
# def isChangedFrom(ocm, ocm_attr_id, expression_value):
# return False
def isOneOf(ocm_attr_value, expression_value):
return ocm_attr_value in expression_value # "cast" expression_value to a list
def isNotOneOf(ocm_attr_value, expression_value):
return ocm_attr_value not in expression_value # "cast" expression_value to a list
# def hasSomeValue(ocm, ocm_attr_id, expression_value):
# return False
# def hasNoValue(ocm, ocm_attr_id, expression_value):
# return False
expression_eval = {'=' : isEqual,
'!=' : isNotEqual,
'<' : isLessThan,
'<=' : isLessThanOrEqual,
'>' : isGreaterThan,
'>=' : isGreaterThanOrEqual,
# 'changed-to' : isChangedTo,
# 'changed-from' : isChangedFrom,
# expressions that take an attribute_id|name and a list of possible values
'~' : isOneOf,
'!~' : isNotOneOf,
# expressions that take just an attribute_id|name
# 'has' : hasSomeValue,
# '!has' : hasNoValue,
# 'changed' : valueWasChanged
}
def isQualified(ocm, condition):
#ocm keys: ['action', 'subscription_id', 'ref', 'detail_link', 'object_type', 'changes', 'state', 'project']
cond_id, sub_id, condition_attr_uuid, condition_attr_name, condition_relation, condition_value = condition
state = ocm['state']
changes = ocm['changes']
var_info = state[condition_attr_uuid]
attr_value = var_info['value']
if attr_value and isinstance(attr_value, dict):
if 'value' in attr_value:
attr_value = attr_value['value']
elif 'name' in attr_value:
attr_value = attr_value['name']
try:
condition_value = int(condition_value) # in case condition_value
except:
pass
# alternative to above is to attempt a rough determination of the type of the attr_value and coerce condition_value to same
#if attr_value and isinstance(attr_value, int)
# if condition_value:
# try:
# condition_value = int(condition_value)
# except:
# pass
#elif attr_value and isinstance(attr_value, float)
# if condition_value:
# try:
# condition_value = float(condition_value)
# except:
# pass
print(f'{condition_attr_name}({attr_value}) {condition_relation} {condition_value} ?')
if condition_relation not in ['changed-to', 'changed-from', 'has', '!has', 'changed']:
if not attr_value:
return False
take = expression_eval[condition_relation](attr_value, condition_value)
return take
if condition_relation == 'changed-to':
ac = [changes[attr_uuid]['value'] for attr_uuid in changes.keys() if attr_uuid == condition_attr_uuid]
if not ac:
return False
else:
return ac[0] == condition_value
elif condition_relation == 'changed-from':
ac = [changes[attr_uuid]['old_value'] for attr_uuid in changes.keys() if attr_uuid == condition_attr_uuid]
if not ac:
return False
else:
return ac[0] == condition_value
elif condition_relation == 'has':
return attr_value != None
elif condition_relation == '!has':
return attr_value == None
else: # must be 'changed'
ac = [changes[attr_uuid]['value'] for attr_uuid in changes.keys() if attr_uuid == condition_attr_uuid]
return len(ac) > 0
| 41.385093
| 130
| 0.597629
|
61cef6b38bd76a0c904df8b01ba97efc25c2eed7
| 4,550
|
py
|
Python
|
open_spiel/python/algorithms/external_sampling_mccfr_test.py
|
Limmen/open_spiel
|
2d4d7b783a9161e2c4c90f70dec29d6982fac6c1
|
[
"Apache-2.0"
] | 1
|
2021-12-31T01:45:58.000Z
|
2021-12-31T01:45:58.000Z
|
open_spiel/python/algorithms/external_sampling_mccfr_test.py
|
Limmen/open_spiel
|
2d4d7b783a9161e2c4c90f70dec29d6982fac6c1
|
[
"Apache-2.0"
] | null | null | null |
open_spiel/python/algorithms/external_sampling_mccfr_test.py
|
Limmen/open_spiel
|
2d4d7b783a9161e2c4c90f70dec29d6982fac6c1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.cfr."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import numpy as np
from open_spiel.python.algorithms import exploitability
from open_spiel.python.algorithms import external_sampling_mccfr
import pyspiel
SEED = 39823987
class ExternalSamplingMCCFRTest(absltest.TestCase):
def test_external_sampling_leduc_2p_simple(self):
np.random.seed(SEED)
game = pyspiel.load_game("leduc_poker")
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.SIMPLE)
for _ in range(10):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Leduc2P, conv = {}".format(conv))
self.assertLess(conv, 5)
# ensure that to_tabular() works on the returned policy and
# the tabular policy is equivalent
tabular_policy = es_solver.average_policy().to_tabular()
conv2 = exploitability.nash_conv(game, tabular_policy)
self.assertEqual(conv, conv2)
def test_external_sampling_leduc_2p_full(self):
np.random.seed(SEED)
game = pyspiel.load_game("leduc_poker")
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.FULL)
for _ in range(10):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Leduc2P, conv = {}".format(conv))
self.assertLess(conv, 5)
def test_external_sampling_kuhn_2p_simple(self):
np.random.seed(SEED)
game = pyspiel.load_game("kuhn_poker")
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.SIMPLE)
for _ in range(10):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Kuhn2P, conv = {}".format(conv))
self.assertLess(conv, 1)
def test_external_sampling_kuhn_2p_full(self):
np.random.seed(SEED)
game = pyspiel.load_game("kuhn_poker")
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.FULL)
for _ in range(10):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Kuhn2P, conv = {}".format(conv))
self.assertLess(conv, 1)
# Liar's dice takes too long, so disable this test. Leave code for reference.
# pylint: disable=g-unreachable-test-method
def disabled_test_external_sampling_liars_dice_2p_simple(self):
np.random.seed(SEED)
game = pyspiel.load_game("liars_dice")
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.SIMPLE)
for _ in range(1):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Liar's dice, conv = {}".format(conv))
self.assertLess(conv, 2)
def test_external_sampling_kuhn_3p_simple(self):
np.random.seed(SEED)
game = pyspiel.load_game("kuhn_poker", {"players": 3})
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.SIMPLE)
for _ in range(10):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Kuhn3P, conv = {}".format(conv))
self.assertLess(conv, 2)
def test_external_sampling_kuhn_3p_full(self):
np.random.seed(SEED)
game = pyspiel.load_game("kuhn_poker", {"players": 3})
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.FULL)
for _ in range(10):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Kuhn3P, conv = {}".format(conv))
self.assertLess(conv, 2)
if __name__ == "__main__":
absltest.main()
| 38.235294
| 79
| 0.741978
|
4f0530765db952e51b16d15b3d9e6d875317f870
| 3,871
|
py
|
Python
|
nova/db/sqlalchemy/migrate_repo/versions/070_untie_nova_network_models.py
|
russellb/nova
|
99c2e02b44a1012c8e26fc7658dc40ec4620a1ee
|
[
"Apache-2.0"
] | null | null | null |
nova/db/sqlalchemy/migrate_repo/versions/070_untie_nova_network_models.py
|
russellb/nova
|
99c2e02b44a1012c8e26fc7658dc40ec4620a1ee
|
[
"Apache-2.0"
] | null | null | null |
nova/db/sqlalchemy/migrate_repo/versions/070_untie_nova_network_models.py
|
russellb/nova
|
99c2e02b44a1012c8e26fc7658dc40ec4620a1ee
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Table
from migrate import ForeignKeyConstraint
from nova import log as logging
meta = MetaData()
LOG = logging.getLogger(__name__)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta.bind = migrate_engine
dialect = migrate_engine.url.get_dialect().name
if dialect.startswith('sqlite'):
return
instances = Table('instances', meta, autoload=True)
networks = Table('networks', meta, autoload=True)
vifs = Table('virtual_interfaces', meta, autoload=True)
fixed_ips = Table('fixed_ips', meta, autoload=True)
floating_ips = Table('floating_ips', meta, autoload=True)
try:
fkeys = list(fixed_ips.c.network_id.foreign_keys)
if fkeys:
fkey_name = fkeys[0].constraint.name
ForeignKeyConstraint(columns=[fixed_ips.c.network_id],
refcolumns=[networks.c.id],
name=fkey_name).drop()
fkeys = list(fixed_ips.c.virtual_interface_id.foreign_keys)
if fkeys:
fkey_name = fkeys[0].constraint.name
ForeignKeyConstraint(columns=[fixed_ips.c.virtual_interface_id],
refcolumns=[vifs.c.id],
name=fkey_name).drop()
fkeys = list(fixed_ips.c.instance_id.foreign_keys)
if fkeys:
fkey_name = fkeys[0].constraint.name
ForeignKeyConstraint(columns=[fixed_ips.c.instance_id],
refcolumns=[instances.c.id],
name=fkey_name).drop()
fkeys = list(floating_ips.c.fixed_ip_id.foreign_keys)
if fkeys:
fkey_name = fkeys[0].constraint.name
ForeignKeyConstraint(columns=[floating_ips.c.fixed_ip_id],
refcolumns=[fixed_ips.c.id],
name=fkey_name).drop()
except Exception:
LOG.error(_("foreign key constraint couldn't be removed"))
raise
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta.bind = migrate_engine
dialect = migrate_engine.url.get_dialect().name
if dialect.startswith('sqlite'):
return
instances = Table('instances', meta, autoload=True)
networks = Table('networks', meta, autoload=True)
vifs = Table('virtual_interfaces', meta, autoload=True)
fixed_ips = Table('fixed_ips', meta, autoload=True)
floating_ips = Table('floating_ips', meta, autoload=True)
try:
ForeignKeyConstraint(columns=[fixed_ips.c.network_id],
refcolumns=[networks.c.id]).create()
ForeignKeyConstraint(columns=[fixed_ips.c.virtual_interface_id],
refcolumns=[vifs.c.id]).create()
ForeignKeyConstraint(columns=[fixed_ips.c.instance_id],
refcolumns=[instances.c.id]).create()
ForeignKeyConstraint(columns=[floating_ips.c.fixed_ip_id],
refcolumns=[fixed_ips.c.id]).create()
except Exception:
LOG.error(_("foreign key constraint couldn't be added"))
raise
| 38.71
| 78
| 0.633686
|
9b1f80e3a404c4cdcde8be748c8de57a9799d0c6
| 123
|
py
|
Python
|
AI/data/constants.py
|
yast-ia/YastAI
|
f5a05841126da4acd9b7250c5bf6f627ac1703d5
|
[
"MIT"
] | 1
|
2020-08-23T22:00:17.000Z
|
2020-08-23T22:00:17.000Z
|
AI/data/constants.py
|
sborquez/her2bdl
|
f9ac9ef19bf5023f3f9d15bef663d3b1a0c92c81
|
[
"MIT"
] | null | null | null |
AI/data/constants.py
|
sborquez/her2bdl
|
f9ac9ef19bf5023f3f9d15bef663d3b1a0c92c81
|
[
"MIT"
] | 1
|
2020-08-23T18:34:12.000Z
|
2020-08-23T18:34:12.000Z
|
"""
Data and dataset constants
==========================
Collections of variables for datasets and data processing.
"""
| 15.375
| 58
| 0.593496
|
8338a6379b1b37687901146fdc48795c6c581e40
| 1,922
|
py
|
Python
|
filelists/fileCheck.py
|
cbaeck1/T2KWP
|
e3682bee5b96d049d66c586910d34d802ab47637
|
[
"BSD-3-Clause"
] | null | null | null |
filelists/fileCheck.py
|
cbaeck1/T2KWP
|
e3682bee5b96d049d66c586910d34d802ab47637
|
[
"BSD-3-Clause"
] | null | null | null |
filelists/fileCheck.py
|
cbaeck1/T2KWP
|
e3682bee5b96d049d66c586910d34d802ab47637
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import glob
def load_filepaths_and_text(filename, split="|"):
with open(filename, encoding='utf-8-sig') as f:
filepaths_and_text = [line.strip().split(split) for line in f]
return filepaths_and_text
# 전체
datapath = '/mnt/d/data'
# sample
#datapath = ''
filepaths = [
'korean_public_wav/korean_public_wav_orgin.txt'
]
wavpaths = [
'selvas_wav'
]
filepathsCnt = [0,0,0]
wavpathsCnt = [0,0,0]
'''
filepaths = [
'korean_public_wav/korean_public_wav.txt', 'korean_public_wav/korean_public_wav_orgin.txt',
'kss_wav/kss_wav.txt', 'kss/kss_wav_origin.txt',
'selvas_wav/selvas_wav.txt', 'selvas_wav/selvas_wav_origin.txt'
]
# 전체, 존재, 존재하지 않음
filepathsCnt = [0,0,0, 0,0,0,
0,0,0, 0,0,0,
0,0,0, 0,0,0]
wavpathsCnt = [0,0,0, 0,0,0,
0,0,0, 0,0,0,
0,0,0, 0,0,0]
'''
# 문서기준으로 파일 존재여부 확인
iPosition = 0
for filepath in filepaths:
filepaths_and_texts = load_filepaths_and_text(os.path.join(datapath, filepath))
with open(os.path.join(datapath, filepath + '.new'), 'w', encoding='utf-8') as f:
for file_text in filepaths_and_texts:
filepathsCnt[iPosition] += 1
if os.path.isfile(file_text[0]):
filepathsCnt[iPosition+1] += 1
#print(file_text[0])
listStr = '|'.join(file_text)
#f.write('{}|{}|{}|{}\n'.format(file_text[0], file_text[1], file_text[2], file_text[3]))
f.write('{}\n'.format(listStr))
else:
filepathsCnt[iPosition+2] += 1
print(file_text[0] + ' not exists!')
iPosition += 3
print(filepathsCnt)
# 파일기준으로 문서에 있는지 확인
# iPosition = 0
# for wavpath in wavpaths:
# wav_paths = glob.glob(os.path.join(wavpath, 'wav_16000', '*', '*.wav'))
# with open(os.path.join(datapath, filepath + '.wavnew'), 'w', encoding='utf-8') as f:
# for wav_path in wav_paths:
# wav_filename = os.path.basename(wav_path)
| 28.264706
| 104
| 0.623309
|
d5bda19c7a2b5a9682bc2d0353117bf95c47fe6b
| 7,661
|
py
|
Python
|
test/tools_Fitting_FitData.py
|
jjacob/DailyPythonScripts
|
cd6c515c6242d1f3b44e97c8ad05946721b6a36a
|
[
"Apache-2.0"
] | null | null | null |
test/tools_Fitting_FitData.py
|
jjacob/DailyPythonScripts
|
cd6c515c6242d1f3b44e97c8ad05946721b6a36a
|
[
"Apache-2.0"
] | null | null | null |
test/tools_Fitting_FitData.py
|
jjacob/DailyPythonScripts
|
cd6c515c6242d1f3b44e97c8ad05946721b6a36a
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on 31 Oct 2012
@author: kreczko
'''
import unittest
from tools.Fitting import FitData, FitDataCollection
from rootpy.plotting import Hist
import numpy as np
from tools.hist_utilities import adjust_overflow_to_limit
N_bkg1 = 9000
N_signal = 1000
N_bkg1_obs = 10000
N_signal_obs = 2000
N_data = N_bkg1_obs + N_signal_obs
mu1, mu2, sigma1, sigma2 = 100, 140, 15, 5
x1 = mu1 + sigma1 * np.random.randn( N_bkg1 )
x2 = mu2 + sigma2 * np.random.randn( N_signal )
x1_obs = mu1 + sigma1 * np.random.randn( N_bkg1_obs )
x2_obs = mu2 + sigma2 * np.random.randn( N_signal_obs )
x3 = mu2 + sigma1 * np.random.randn( N_bkg1 )
x4 = mu1 + sigma2 * np.random.randn( N_signal )
x3_obs = mu2 + sigma1 * np.random.randn( N_bkg1_obs )
x4_obs = mu1 + sigma2 * np.random.randn( N_signal_obs )
x_min = 40
x_max = 200
data_scale = 1.2
N_data = N_data * data_scale
class Test( unittest.TestCase ):
def setUp( self ):
# create histograms
h_bkg1_1 = Hist( 100, 40, 200, title = 'Background' )
h_signal_1 = h_bkg1_1.Clone( title = 'Signal' )
h_data_1 = h_bkg1_1.Clone( title = 'Data' )
h_bkg1_2 = h_bkg1_1.Clone( title = 'Background' )
h_signal_2 = h_bkg1_1.Clone( title = 'Signal' )
h_data_2 = h_bkg1_1.Clone( title = 'Data' )
# fill the histograms with our distributions
map( h_bkg1_1.Fill, x1 )
map( h_signal_1.Fill, x2 )
map( h_data_1.Fill, x1_obs )
map( h_data_1.Fill, x2_obs )
map( h_bkg1_2.Fill, x3 )
map( h_signal_2.Fill, x4 )
map( h_data_2.Fill, x3_obs )
map( h_data_2.Fill, x4_obs )
h_data_1.Scale(data_scale)
h_data_2.Scale(data_scale)
self.histograms_1 = {'signal': h_signal_1,
'bkg1': h_bkg1_1}
self.histograms_2 = {'signal': h_signal_2,
'bkg1': h_bkg1_2}
self.histograms_3 = {'var1': h_signal_1,
'bkg1': h_bkg1_1}
self.fit_data_1 = FitData( h_data_1, self.histograms_1, fit_boundaries = ( x_min, x_max ))
self.fit_data_2 = FitData( h_data_2, self.histograms_2, fit_boundaries = ( x_min, x_max ))
self.fit_data_3 = FitData( h_data_1, self.histograms_3, fit_boundaries = ( x_min, x_max ))
self.collection_1 = FitDataCollection()
self.collection_1.add( self.fit_data_1, 'signal region' )
self.collection_1.add( self.fit_data_2, 'control region' )
self.collection_1.set_normalisation_constraints({'bkg1': 0.5})
self.collection_2 = FitDataCollection()
self.collection_2.add( self.fit_data_1 )
self.collection_2.add( self.fit_data_2 )
self.collection_2.set_normalisation_constraints({'bkg1': 0.5})
self.single_collection = FitDataCollection()
self.single_collection.add( self.fit_data_1 )
self.single_collection.set_normalisation_constraints({'bkg1': 0.5})
self.non_simultaneous_fit_collection = FitDataCollection()
self.non_simultaneous_fit_collection.add( self.fit_data_1 )
self.non_simultaneous_fit_collection.add( self.fit_data_3 )
self.h_data = h_data_1
self.h_bkg1 = h_bkg1_1
self.h_signal = h_signal_1
def tearDown( self ):
pass
def test_is_valid_for_simultaneous_fit( self ):
self.assertTrue( self.collection_1.is_valid_for_simultaneous_fit(), msg = 'has_same_n_samples: ' + str(self.collection_1.has_same_n_samples) + ', has_same_n_data: ' + str(self.collection_1.has_same_n_data) )
self.assertTrue( self.collection_2.is_valid_for_simultaneous_fit(), msg = 'has_same_n_samples: ' + str(self.collection_1.has_same_n_samples) + ', has_same_n_data: ' + str(self.collection_1.has_same_n_data) )
self.assertFalse( self.non_simultaneous_fit_collection.is_valid_for_simultaneous_fit() )
def test_samples( self ):
samples = sorted( self.histograms_1.keys() )
samples_from_fit_data = sorted( self.fit_data_1.samples )
samples_from_fit_data_collection = self.collection_1.mc_samples()
self.assertEqual( samples, samples_from_fit_data )
self.assertEqual( samples, samples_from_fit_data_collection )
def test_normalisation( self ):
normalisation = {name:adjust_overflow_to_limit(histogram, x_min, x_max).Integral() for name, histogram in self.histograms_1.iteritems()}
normalisation_from_fit_data = self.fit_data_1.normalisation
normalisation_from_single_collection = self.single_collection.mc_normalisation()
normalisation_from_collection = self.collection_1.mc_normalisation( 'signal region' )
normalisation_from_collection_1 = self.collection_1.mc_normalisation()['signal region']
for sample in normalisation.keys():
self.assertEqual( normalisation[sample], normalisation_from_fit_data[sample] )
self.assertEqual( normalisation[sample], normalisation_from_single_collection[sample] )
self.assertEqual( normalisation[sample], normalisation_from_collection[sample] )
self.assertEqual( normalisation[sample], normalisation_from_collection_1[sample] )
# data normalisation
normalisation = self.h_data.integral( overflow = True )
normalisation_from_fit_data = self.fit_data_1.n_data()
normalisation_from_single_collection = self.single_collection.n_data()
normalisation_from_collection = self.collection_1.n_data( 'signal region' )
normalisation_from_collection_1 = self.collection_1.n_data()['signal region']
self.assertEqual( normalisation, normalisation_from_fit_data )
self.assertEqual( normalisation, normalisation_from_single_collection )
self.assertEqual( normalisation, normalisation_from_collection )
self.assertEqual( normalisation, normalisation_from_collection_1 )
self.assertAlmostEqual(normalisation, self.collection_1.max_n_data(), delta = 1 )
def test_real_data( self ):
real_data = self.fit_data_1.real_data_histogram()
self.assertEqual( self.h_data.integral( overflow = True ), real_data.Integral() )
def test_overwrite_warning( self ):
c = FitDataCollection()
c.add( self.fit_data_1, 'var1' )
self.assertRaises( UserWarning, c.add, ( self.fit_data_1, 'var1' ) )
def test_vectors( self ):
h_signal = adjust_overflow_to_limit( self.h_signal, x_min, x_max )
h_signal.Scale(1/h_signal.Integral())
h_bkg1 = adjust_overflow_to_limit( self.h_bkg1, x_min, x_max )
h_bkg1.Scale(1/h_bkg1.Integral())
signal = list( h_signal.y() )
bkg1 = list( h_bkg1.y() )
v_from_fit_data = self.fit_data_1.vectors
v_from_single_collection = self.single_collection.vectors()
# v_from_collection = self.collection_1.vectors( 'signal region' )
# v_from_collection_1 = self.collection_1.vectors()['signal region']
self.assertEqual(signal, v_from_fit_data['signal'])
self.assertEqual(bkg1, v_from_fit_data['bkg1'])
self.assertEqual(signal, v_from_single_collection['signal'])
self.assertEqual(bkg1, v_from_single_collection['bkg1'])
def test_constraints(self):
constraint_from_single_collection = self.single_collection.constraints()['bkg1']
self.assertEqual(0.5, constraint_from_single_collection)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testTemplates']
unittest.main()
| 45.064706
| 216
| 0.679154
|
0ef56fbbc445a2a295097b6922f77a19ccba8e0e
| 26,635
|
py
|
Python
|
src/folio_migration_tools/migration_tasks/loans_migrator.py
|
chadmcinnis/folio_migration_tools
|
39ee044a713a34c323324a956e3e8b54ee05c194
|
[
"MIT"
] | null | null | null |
src/folio_migration_tools/migration_tasks/loans_migrator.py
|
chadmcinnis/folio_migration_tools
|
39ee044a713a34c323324a956e3e8b54ee05c194
|
[
"MIT"
] | null | null | null |
src/folio_migration_tools/migration_tasks/loans_migrator.py
|
chadmcinnis/folio_migration_tools
|
39ee044a713a34c323324a956e3e8b54ee05c194
|
[
"MIT"
] | null | null | null |
import copy
import csv
import json
import logging
import sys
import time
import traceback
from datetime import datetime
from datetime import timedelta
from datetime import timezone
from typing import Optional
from urllib.error import HTTPError
import requests
from dateutil import parser as du_parser
from folio_uuid.folio_namespaces import FOLIONamespaces
from pydantic import BaseModel
from folio_migration_tools.circulation_helper import CirculationHelper
from folio_migration_tools.custom_dict import InsensitiveDictReader
from folio_migration_tools.helper import Helper
from folio_migration_tools.library_configuration import FileDefinition
from folio_migration_tools.library_configuration import LibraryConfiguration
from folio_migration_tools.migration_report import MigrationReport
from folio_migration_tools.migration_tasks.migration_task_base import MigrationTaskBase
from folio_migration_tools.report_blurbs import Blurbs
from folio_migration_tools.transaction_migration.legacy_loan import LegacyLoan
from folio_migration_tools.transaction_migration.transaction_result import (
TransactionResult,
)
class LoansMigrator(MigrationTaskBase):
class TaskConfiguration(BaseModel):
name: str
utc_difference: int
migration_task_type: str
open_loans_file: FileDefinition
fallback_service_point_id: str
starting_row: Optional[int] = 1
item_files: Optional[list[FileDefinition]] = []
patron_files: Optional[list[FileDefinition]] = []
@staticmethod
def get_object_type() -> FOLIONamespaces:
return FOLIONamespaces.loans
def __init__(
self,
task_configuration: TaskConfiguration,
library_config: LibraryConfiguration,
):
csv.register_dialect("tsv", delimiter="\t")
self.migration_report = MigrationReport()
self.valid_legacy_loans = []
super().__init__(library_config, task_configuration)
self.circulation_helper = CirculationHelper(
self.folio_client,
task_configuration.fallback_service_point_id,
self.migration_report,
)
with open(
self.folder_structure.legacy_records_folder
/ task_configuration.open_loans_file.file_name,
"r",
encoding="utf-8",
) as loans_file:
self.semi_valid_legacy_loans = list(
self.load_and_validate_legacy_loans(
InsensitiveDictReader(loans_file, dialect="tsv")
)
)
logging.info(
"Loaded and validated %s loans in file",
len(self.semi_valid_legacy_loans),
)
if any(self.task_configuration.item_files) or any(self.task_configuration.patron_files):
self.valid_legacy_loans = list(self.check_barcodes())
logging.info(
"Loaded and validated %s loans against barcodes",
len(self.valid_legacy_loans),
)
else:
logging.info(
"No item or user files supplied. Not validating against"
"previously migrated objects"
)
self.valid_legacy_loans = self.semi_valid_legacy_loans
self.patron_item_combos = set()
self.t0 = time.time()
self.num_duplicate_loans = 0
self.skipped_since_already_added = 0
self.processed_items = set()
self.failed = {}
self.num_legacy_loans_processed = 0
self.failed_and_not_dupe = {}
logging.info("Starting row is %s", task_configuration.starting_row)
logging.info("Init completed")
def do_work(self):
logging.info("Starting")
if self.task_configuration.starting_row > 1:
logging.info(f"Skipping {(self.task_configuration.starting_row-1)} records")
for num_loans, legacy_loan in enumerate(
self.valid_legacy_loans[self.task_configuration.starting_row :], start=1
):
t0_migration = time.time()
self.migration_report.add_general_statistics("Processed loans")
try:
self.checkout_single_loan(legacy_loan)
except Exception as ee:
logging.exception(
f"Error in row {num_loans} Item barcode: {legacy_loan.item_barcode} "
f"Patron barcode: {legacy_loan.patron_barcode} {ee}"
)
if num_loans % 25 == 0:
logging.info(f"{timings(self.t0, t0_migration, num_loans)} {num_loans}")
def checkout_single_loan(self, legacy_loan: LegacyLoan):
"""Checks a legacy loan out. Retries once if it fails.
Args:
legacy_loan (LegacyLoan): The Legacy loan
"""
res_checkout = self.circulation_helper.check_out_by_barcode(legacy_loan)
if res_checkout.was_successful:
self.migration_report.add(Blurbs.Details, "Checked out on first try")
self.set_renewal_count(legacy_loan, res_checkout)
self.set_new_status(legacy_loan, res_checkout)
elif res_checkout.should_be_retried:
res_checkout2 = self.handle_checkout_failure(legacy_loan, res_checkout)
if res_checkout2.was_successful and res_checkout2.folio_loan:
self.migration_report.add(Blurbs.Details, "Checked out on second try")
logging.info("Checked out on second try")
self.set_renewal_count(legacy_loan, res_checkout2)
self.set_new_status(legacy_loan, res_checkout2)
elif legacy_loan.item_barcode not in self.failed:
self.failed[legacy_loan.item_barcode] = legacy_loan
logging.error("Failed on second try: %s", res_checkout2.error_message)
self.migration_report.add(
Blurbs.Details,
f"Second failure: {res_checkout2.migration_report_message}",
)
elif not res_checkout.should_be_retried:
logging.error("Failed first time. No retries: %s", res_checkout.error_message)
self.migration_report.add(
Blurbs.Details,
f"Failed 1st time. No retries: {res_checkout.migration_report_message}",
)
def set_new_status(self, legacy_loan: LegacyLoan, res_checkout: TransactionResult):
"""Updates checkout loans with their destination statuses
Args:
legacy_loan (LegacyLoan): _description_
res_checkout (TransactionResult): _description_
"""
# set new statuses
if legacy_loan.next_item_status == "Declared lost":
self.declare_lost(res_checkout.folio_loan)
elif legacy_loan.next_item_status == "Claimed returned":
self.claim_returned(res_checkout.folio_loan)
elif legacy_loan.next_item_status not in ["Available", "", "Checked out"]:
self.set_item_status(legacy_loan)
def set_renewal_count(self, legacy_loan: LegacyLoan, res_checkout: TransactionResult):
if legacy_loan.renewal_count > 0:
self.update_open_loan(res_checkout.folio_loan, legacy_loan)
self.migration_report.add_general_statistics("Updated renewal count for loan")
def wrap_up(self):
for k, v in self.failed.items():
self.failed_and_not_dupe[k] = [v.to_dict()]
self.migration_report.set(
Blurbs.GeneralStatistics, "Failed loans", len(self.failed_and_not_dupe)
)
self.migration_report.set(
Blurbs.GeneralStatistics,
"Total Rows in file",
self.num_legacy_loans_processed,
)
self.write_failed_loans_to_file()
with open(self.folder_structure.migration_reports_file, "w+") as report_file:
report_file.write("# Loans migration results \n")
report_file.write(f"Time Finished: {datetime.isoformat(datetime.now(timezone.utc))}\n")
self.migration_report.write_migration_report(report_file)
def write_failed_loans_to_file(self):
csv_columns = [
"due_date",
"item_barcode",
"next_item_status",
"out_date",
"patron_barcode",
"renewal_count",
]
with open(self.folder_structure.failed_recs_path, "w+") as failed_loans_file:
writer = csv.DictWriter(failed_loans_file, fieldnames=csv_columns, dialect="tsv")
writer.writeheader()
for _k, failed_loan in self.failed_and_not_dupe.items():
writer.writerow(failed_loan[0])
def check_barcodes(self):
user_barcodes = set()
item_barcodes = set()
self.circulation_helper.load_migrated_item_barcodes(
item_barcodes, self.task_configuration.item_files, self.folder_structure
)
self.circulation_helper.load_migrated_user_barcodes(
user_barcodes, self.task_configuration.patron_files, self.folder_structure
)
for loan in self.semi_valid_legacy_loans:
has_item_barcode = loan.item_barcode in item_barcodes
has_patron_barcode = loan.patron_barcode in user_barcodes
if has_item_barcode and has_patron_barcode:
self.migration_report.add_general_statistics(
"Loans verified against migrated user and item"
)
yield loan
else:
self.migration_report.add(
Blurbs.DiscardedLoans,
f"Loans discarded. Had migrated item barcode: {has_item_barcode}. "
f"Had migrated user barcode: {has_patron_barcode}",
)
if not has_item_barcode:
Helper.log_data_issue(
"", "Loan without matched item barcode", json.dumps(loan.to_dict())
)
if not has_patron_barcode:
Helper.log_data_issue(
"",
"Loan without matched patron barcode",
json.dumps(loan.to_dict()),
)
def load_and_validate_legacy_loans(self, loans_reader):
num_bad = 0
logging.info("Validating legacy loans in file...")
for legacy_loan_count, legacy_loan_dict in enumerate(loans_reader):
try:
legacy_loan = LegacyLoan(
legacy_loan_dict,
self.task_configuration.utc_difference,
legacy_loan_count,
)
if any(legacy_loan.errors):
num_bad += 1
self.migration_report.add_general_statistics("Discarded Loans")
for error in legacy_loan.errors:
self.migration_report.add(
Blurbs.DiscardedLoans, f"{error[0]} - {error[1]}"
)
else:
yield legacy_loan
except ValueError as ve:
logging.exception(ve)
logging.info(
f"Done validating {legacy_loan_count} legacy loans with {num_bad} rotten apples"
)
if num_bad / legacy_loan_count > 0.5:
q = num_bad / legacy_loan_count
logging.error("%s percent of loans failed to validate.", (q * 100))
self.migration_report.log_me()
logging.critical("Halting...")
sys.exit(1)
def handle_checkout_failure(
self, legacy_loan, folio_checkout: TransactionResult
) -> TransactionResult:
"""Determines what can be done about a previously failed transaction
Args:
legacy_loan (_type_): The legacy loan
folio_checkout (TransactionResult): The results from the prevous transaction
Returns:
TransactionResult: A modified TransactionResult based on the result from the
handling
"""
folio_checkout.should_be_retried = False
if folio_checkout.error_message == "5XX":
return folio_checkout
if folio_checkout.error_message.startswith(
"No patron with barcode"
) or folio_checkout.error_message.startswith("Patron barcode already detected"):
return folio_checkout
elif folio_checkout.error_message.startswith("No item with barcode"):
return folio_checkout
elif folio_checkout.error_message.startswith(
"Cannot check out item that already has an open loan"
):
return folio_checkout
elif folio_checkout.error_message.startswith("Aged to lost for item"):
return self.handle_aged_to_lost_item(legacy_loan)
elif folio_checkout.error_message == "Declared lost":
return folio_checkout
elif folio_checkout.error_message.startswith("Cannot check out to inactive user"):
return self.checkout_to_inactice_user(legacy_loan)
else:
self.migration_report.add(
Blurbs.Details,
f"Other checkout failure: {folio_checkout.error_message}",
)
# First failure. Add to list of failed loans
if legacy_loan.item_barcode not in self.failed:
self.failed[legacy_loan.item_barcode] = legacy_loan
else:
logging.debug(
f"Loan already in failed. item barcode {legacy_loan.item_barcode} "
f"Patron barcode: {legacy_loan.patron_barcode}"
)
self.failed_and_not_dupe[legacy_loan.item_barcode] = [
legacy_loan,
self.failed[legacy_loan.item_barcode],
]
logging.info(
f"Duplicate loans (or failed twice) Item barcode: "
f"{legacy_loan.item_barcode} Patron barcode: {legacy_loan.patron_barcode}"
)
self.migration_report.add(Blurbs.Details, "Duplicate loans (or failed twice)")
del self.failed[legacy_loan.item_barcode]
return TransactionResult(False, False, "", "", "")
def checkout_to_inactice_user(self, legacy_loan) -> TransactionResult:
logging.info("Cannot check out to inactive user. Activating and trying again")
user = self.get_user_by_barcode(legacy_loan.patron_barcode)
expiration_date = user.get("expirationDate", datetime.isoformat(datetime.now()))
user["expirationDate"] = datetime.isoformat(datetime.now() + timedelta(days=1))
self.activate_user(user)
logging.debug("Successfully Activated user")
res = self.circulation_helper.check_out_by_barcode(legacy_loan) # checkout_and_update
self.migration_report.add(Blurbs.Details, res.migration_report_message)
self.deactivate_user(user, expiration_date)
logging.debug("Successfully Deactivated user again")
self.migration_report.add(Blurbs.Details, "Handled inactive users")
return res
def handle_aged_to_lost_item(self, legacy_loan) -> TransactionResult:
logging.debug("Setting Available")
legacy_loan.next_item_status = "Available"
self.set_item_status(legacy_loan)
res_checkout = self.circulation_helper.check_out_by_barcode(legacy_loan)
legacy_loan.next_item_status = "Aged to lost"
self.set_item_status(legacy_loan)
s = "Successfully Checked out Aged to lost item and put the status back"
logging.info(s)
self.migration_report.add(Blurbs.Details, s)
return res_checkout
def update_open_loan(self, folio_loan: dict, legacy_loan: LegacyLoan):
due_date = du_parser.isoparse(str(legacy_loan.due_date))
out_date = du_parser.isoparse(str(legacy_loan.out_date))
renewal_count = legacy_loan.renewal_count
# TODO: add logging instead of print out
try:
loan_to_put = copy.deepcopy(folio_loan)
del loan_to_put["metadata"]
loan_to_put["dueDate"] = due_date.isoformat()
loan_to_put["loanDate"] = out_date.isoformat()
loan_to_put["renewalCount"] = renewal_count
url = f"{self.folio_client.okapi_url}/circulation/loans/{loan_to_put['id']}"
req = requests.put(
url,
headers=self.folio_client.okapi_headers,
data=json.dumps(loan_to_put),
)
if req.status_code == 422:
error_message = json.loads(req.text)["errors"][0]["message"]
s = f"Update open loan error: {error_message} {req.status_code}"
self.migration_report.add(Blurbs.Details, s)
logging.error(s)
return False
elif req.status_code in [201, 204]:
self.migration_report.add(
Blurbs.Details,
f"Successfully updated open loan ({req.status_code})",
)
return True
else:
self.migration_report.add(
Blurbs.Details,
f"Update open loan error http status: {req.status_code}",
)
req.raise_for_status()
logging.debug("Updating open loan was successful")
return True
except HTTPError as exception:
logging.error(
f"{req.status_code} PUT FAILED Extend loan to {loan_to_put['dueDate']}"
f"\t {url}\t{json.dumps(loan_to_put)}"
)
traceback.print_exc()
logging.error(exception)
return False
def handle_previously_failed_loans(self, loan):
if loan["item_id"] in self.failed:
s = "Loan succeeded but failed previously. Removing from failed "
logging.info(s)
del self.failed[loan["item_id"]]
def declare_lost(self, folio_loan):
declare_lost_url = f"/circulation/loans/{folio_loan['id']}/declare-item-lost"
logging.debug(f"Declare lost url:{declare_lost_url}")
due_date = du_parser.isoparse(folio_loan["dueDate"])
data = {
"declaredLostDateTime": datetime.isoformat(due_date + timedelta(days=1)),
"comment": "Created at migration. Date is due date + 1 day",
"servicePointId": str(self.task_configuration.fallback_service_point_id),
}
logging.debug(f"Declare lost data: {json.dumps(data, indent=4)}")
if self.folio_put_post(declare_lost_url, data, "POST", "Declare item as lost"):
self.migration_report.add(Blurbs.Details, "Successfully declared loan as lost")
else:
logging.error(f"Unsuccessfully declared loan {folio_loan} as lost")
self.migration_report.add(Blurbs.Details, "Unsuccessfully declared loan as lost")
# TODO: Exception handling
def claim_returned(self, folio_loan):
claim_returned_url = f"/circulation/loans/{folio_loan['id']}/claim-item-returned"
logging.debug(f"Claim returned url:{claim_returned_url}")
due_date = du_parser.isoparse(folio_loan["dueDate"])
data = {
"itemClaimedReturnedDateTime": datetime.isoformat(due_date + timedelta(days=1)),
"comment": "Created at migration. Date is due date + 1 day",
}
logging.debug(f"Claim returned data:\t{json.dumps(data)}")
if self.folio_put_post(claim_returned_url, data, "POST", "Declare item as lost"):
self.migration_report.add(
Blurbs.Details, "Successfully declared loan as Claimed returned"
)
else:
logging.error(f"Unsuccessfully declared loan {folio_loan} as Claimed returned")
self.migration_report.add(
Blurbs.Details,
f"Unsuccessfully declared loan {folio_loan} as Claimed returned",
)
# TODO: Exception handling
def set_item_status(self, legacy_loan: LegacyLoan):
try:
# Get Item by barcode, update status.
item_path = f'item-storage/items?query=(barcode=="{legacy_loan.item_barcode}")'
item_url = f"{self.folio_client.okapi_url}/{item_path}"
resp = requests.get(item_url, headers=self.folio_client.okapi_headers)
resp.raise_for_status()
data = resp.json()
folio_item = data["items"][0]
folio_item["status"]["name"] = legacy_loan.next_item_status
if self.update_item(folio_item):
self.migration_report.add(
Blurbs.Details,
f"Successfully set item status to {legacy_loan.next_item_status}",
)
logging.debug(
f"Successfully set item with barcode "
f"{legacy_loan.item_barcode} to {legacy_loan.next_item_status}"
)
else:
if legacy_loan.item_barcode not in self.failed:
self.failed[legacy_loan.item_barcode] = legacy_loan
logging.error(
f"Error when setting item with barcode "
f"{legacy_loan.item_barcode} to {legacy_loan.next_item_status}"
)
self.migration_report.add(
Blurbs.Details,
f"Error setting item status to {legacy_loan.next_item_status}",
)
except Exception as ee:
logging.error(
f"{resp.status_code} when trying to set item with barcode "
f"{legacy_loan.item_barcode} to {legacy_loan.next_item_status} {ee}"
)
raise ee
def activate_user(self, user):
user["active"] = True
self.update_user(user)
self.migration_report.add(Blurbs.Details, "Successfully activated user")
def deactivate_user(self, user, expiration_date):
user["expirationDate"] = expiration_date
user["active"] = False
self.update_user(user)
self.migration_report.add(Blurbs.Details, "Successfully deactivated user")
def update_item(self, item):
url = f'/item-storage/items/{item["id"]}'
return self.folio_put_post(url, item, "PUT", "Update item")
def update_user(self, user):
url = f'/users/{user["id"]}'
self.folio_put_post(url, user, "PUT", "Update user")
def get_user_by_barcode(self, barcode):
url = f'{self.folio_client.okapi_url}/users?query=(barcode=="{barcode}")'
resp = requests.get(url, headers=self.folio_client.okapi_headers)
resp.raise_for_status()
data = resp.json()
return data["users"][0]
def folio_put_post(self, url, data_dict, verb, action_description=""):
full_url = f"{self.folio_client.okapi_url}{url}"
try:
if verb == "PUT":
resp = requests.put(
full_url,
headers=self.folio_client.okapi_headers,
data=json.dumps(data_dict),
)
elif verb == "POST":
resp = requests.post(
full_url,
headers=self.folio_client.okapi_headers,
data=json.dumps(data_dict),
)
else:
raise Exception("Bad verb")
if resp.status_code == 422:
error_message = json.loads(resp.text)["errors"][0]["message"]
logging.error(error_message)
self.migration_report.add(
Blurbs.Details, f"{action_description} error: {error_message}"
)
resp.raise_for_status()
elif resp.status_code in [201, 204]:
self.migration_report.add(
Blurbs.Details,
f"Successfully {action_description} ({resp.status_code})",
)
else:
self.migration_report.add(
Blurbs.Details,
f"{action_description} error. http status: {resp.status_code}",
)
resp.raise_for_status()
return True
except HTTPError as exception:
logging.error(f"{resp.status_code}. {verb} FAILED for {url}")
traceback.print_exc()
logging.info(exception)
return False
def change_due_date(self, folio_loan, legacy_loan):
try:
api_path = f"{folio_loan['id']}/change-due-date"
api_url = f"{self.folio_client.okapi_url}/circulation/loans/{api_path}"
body = {"dueDate": du_parser.isoparse(str(legacy_loan.due_date)).isoformat()}
req = requests.post(
api_url, headers=self.folio_client.okapi_headers, data=json.dumps(body)
)
if req.status_code == 422:
error_message = json.loads(req.text)["errors"][0]["message"]
self.migration_report.add(
Blurbs.Details, f"Change due date error: {error_message}"
)
logging.info(
f"{error_message}\t",
)
self.migration_report.add(Blurbs.Details, error_message)
return False
elif req.status_code == 201:
self.migration_report.add(
Blurbs.Details, f"Successfully changed due date ({req.status_code})"
)
return True, json.loads(req.text), None
elif req.status_code == 204:
self.migration_report.add(
Blurbs.Details, f"Successfully changed due date ({req.status_code})"
)
return True, None, None
else:
self.migration_report.add(
Blurbs.Details,
f"Update open loan error http status: {req.status_code}",
)
req.raise_for_status()
except HTTPError as exception:
logging.info(
f"{req.status_code} POST FAILED Change Due Date to {api_url}\t{json.dumps(body)})"
)
traceback.print_exc()
logging.info(exception)
return False, None, None
def timings(t0, t0func, num_objects):
avg = num_objects / (time.time() - t0)
elapsed = time.time() - t0
elapsed_func = time.time() - t0func
return (
f"Total objects: {num_objects}\tTotal elapsed: {elapsed:.2f}\t"
f"Average per object: {avg:.2f}\tElapsed this time: {elapsed_func:.2f}"
)
| 44.317804
| 99
| 0.610325
|
77278587fed394619dab0cb9d6f09fc8409af44f
| 516
|
py
|
Python
|
constants.py
|
tchayintr/cfparser-service
|
8a28a572c1570efc845cfe5786cb9730f111d777
|
[
"Apache-2.0"
] | null | null | null |
constants.py
|
tchayintr/cfparser-service
|
8a28a572c1570efc845cfe5786cb9730f111d777
|
[
"Apache-2.0"
] | 1
|
2020-05-18T04:43:32.000Z
|
2020-05-18T04:43:32.000Z
|
constants.py
|
tchayintr/cfparser-service
|
8a28a572c1570efc845cfe5786cb9730f111d777
|
[
"Apache-2.0"
] | null | null | null |
# for app
APP_DEFAULT_BRACKETS_FORMAT = '[]'
APP_DEFAULT_DELIMITER_PARSED_TREE = '▏' # U+258F (Left one eighth block)
APP_DEFAULT_DELIMITER_PROB = '▁' # U+2581 (Lower one eighth block)
APP_DEFAULT_JSONIFY_KEY_RESULT = 'result'
APP_DEFAULT_LOG_DIR = 'log'
APP_DEFAULT_MODEL_PATH = 'models/main/cfparser-app.model'
APP_DEFAULT_PCFG_FORMAT = 'pcfg'
APP_DEFAULT_ROOT_NONTERMINAL = 'S'
APP_DEFAULT_SAMPLE_FORMAT = '{}{}{}{}'
APP_DEFAULT_UNUSED_NONTERMINAL = 'UNUSED'
APP_DEFAULT_VITERBI_MODEL = 'viterbi'
| 36.857143
| 77
| 0.763566
|
5faaf916b74178a25c45da68d435aa335f5db106
| 11,956
|
py
|
Python
|
airflow/operators/s3_to_hive_operator.py
|
dmnpignaud/incubator-airflow
|
9b5d2f6f6ca5f81e94169f1fd49e4372d0e88bfb
|
[
"Apache-2.0"
] | 2
|
2018-11-07T10:02:34.000Z
|
2018-11-07T10:03:40.000Z
|
airflow/airflow/operators/s3_to_hive_operator.py
|
kira-lin/ve450-airflow-on-k8s
|
f28e8b468568c8623134db5a1a8757860788799f
|
[
"Apache-2.0"
] | 1
|
2018-11-05T21:12:08.000Z
|
2019-07-26T21:00:05.000Z
|
airflow/airflow/operators/s3_to_hive_operator.py
|
kira-lin/ve450-airflow-on-k8s
|
f28e8b468568c8623134db5a1a8757860788799f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from builtins import next
from builtins import zip
from tempfile import NamedTemporaryFile
from airflow.utils.file import TemporaryDirectory
import gzip
import bz2
import tempfile
import os
from airflow.exceptions import AirflowException
from airflow.hooks.S3_hook import S3Hook
from airflow.hooks.hive_hooks import HiveCliHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.utils.compression import uncompress_file
class S3ToHiveTransfer(BaseOperator):
"""
Moves data from S3 to Hive. The operator downloads a file from S3,
stores the file locally before loading it into a Hive table.
If the ``create`` or ``recreate`` arguments are set to ``True``,
a ``CREATE TABLE`` and ``DROP TABLE`` statements are generated.
Hive data types are inferred from the cursor's metadata from.
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the tables gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param s3_key: The key to be retrieved from S3. (templated)
:type s3_key: str
:param field_dict: A dictionary of the fields name in the file
as keys and their Hive types as values
:type field_dict: dict
:param hive_table: target Hive table, use dot notation to target a
specific database. (templated)
:type hive_table: str
:param create: whether to create the table if it doesn't exist
:type create: bool
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param partition: target partition as a dict of partition columns
and values. (templated)
:type partition: dict
:param headers: whether the file contains column names on the first
line
:type headers: bool
:param check_headers: whether the column names on the first line should be
checked against the keys of field_dict
:type check_headers: bool
:param wildcard_match: whether the s3_key should be interpreted as a Unix
wildcard pattern
:type wildcard_match: bool
:param delimiter: field delimiter in the file
:type delimiter: str
:param aws_conn_id: source s3 connection
:type aws_conn_id: str
:param hive_cli_conn_id: destination hive connection
:type hive_cli_conn_id: str
:param input_compressed: Boolean to determine if file decompression is
required to process headers
:type input_compressed: bool
:param tblproperties: TBLPROPERTIES of the hive table being created
:type tblproperties: dict
:param select_expression: S3 Select expression
:type select_expression: str
"""
template_fields = ('s3_key', 'partition', 'hive_table')
template_ext = ()
ui_color = '#a0e08c'
@apply_defaults
def __init__(
self,
s3_key,
field_dict,
hive_table,
delimiter=',',
create=True,
recreate=False,
partition=None,
headers=False,
check_headers=False,
wildcard_match=False,
aws_conn_id='aws_default',
hive_cli_conn_id='hive_cli_default',
input_compressed=False,
tblproperties=None,
select_expression=None,
*args, **kwargs):
super(S3ToHiveTransfer, self).__init__(*args, **kwargs)
self.s3_key = s3_key
self.field_dict = field_dict
self.hive_table = hive_table
self.delimiter = delimiter
self.create = create
self.recreate = recreate
self.partition = partition
self.headers = headers
self.check_headers = check_headers
self.wildcard_match = wildcard_match
self.hive_cli_conn_id = hive_cli_conn_id
self.aws_conn_id = aws_conn_id
self.input_compressed = input_compressed
self.tblproperties = tblproperties
self.select_expression = select_expression
if (self.check_headers and
not (self.field_dict is not None and self.headers)):
raise AirflowException("To check_headers provide " +
"field_dict and headers")
def execute(self, context):
# Downloading file from S3
self.s3 = S3Hook(aws_conn_id=self.aws_conn_id)
self.hive = HiveCliHook(hive_cli_conn_id=self.hive_cli_conn_id)
self.log.info("Downloading S3 file")
if self.wildcard_match:
if not self.s3.check_for_wildcard_key(self.s3_key):
raise AirflowException("No key matches {0}"
.format(self.s3_key))
s3_key_object = self.s3.get_wildcard_key(self.s3_key)
else:
if not self.s3.check_for_key(self.s3_key):
raise AirflowException(
"The key {0} does not exists".format(self.s3_key))
s3_key_object = self.s3.get_key(self.s3_key)
root, file_ext = os.path.splitext(s3_key_object.key)
if (self.select_expression and self.input_compressed and
file_ext.lower() != '.gz'):
raise AirflowException("GZIP is the only compression " +
"format Amazon S3 Select supports")
with TemporaryDirectory(prefix='tmps32hive_') as tmp_dir,\
NamedTemporaryFile(mode="wb",
dir=tmp_dir,
suffix=file_ext) as f:
self.log.info("Dumping S3 key {0} contents to local file {1}"
.format(s3_key_object.key, f.name))
if self.select_expression:
option = {}
if self.headers:
option['FileHeaderInfo'] = 'USE'
if self.delimiter:
option['FieldDelimiter'] = self.delimiter
input_serialization = {'CSV': option}
if self.input_compressed:
input_serialization['CompressionType'] = 'GZIP'
content = self.s3.select_key(
bucket_name=s3_key_object.bucket_name,
key=s3_key_object.key,
expression=self.select_expression,
input_serialization=input_serialization
)
f.write(content.encode("utf-8"))
else:
s3_key_object.download_fileobj(f)
f.flush()
if self.select_expression or not self.headers:
self.log.info("Loading file %s into Hive", f.name)
self.hive.load_file(
f.name,
self.hive_table,
field_dict=self.field_dict,
create=self.create,
partition=self.partition,
delimiter=self.delimiter,
recreate=self.recreate,
tblproperties=self.tblproperties)
else:
# Decompressing file
if self.input_compressed:
self.log.info("Uncompressing file %s", f.name)
fn_uncompressed = uncompress_file(f.name,
file_ext,
tmp_dir)
self.log.info("Uncompressed to %s", fn_uncompressed)
# uncompressed file available now so deleting
# compressed file to save disk space
f.close()
else:
fn_uncompressed = f.name
# Testing if header matches field_dict
if self.check_headers:
self.log.info("Matching file header against field_dict")
header_list = self._get_top_row_as_list(fn_uncompressed)
if not self._match_headers(header_list):
raise AirflowException("Header check failed")
# Deleting top header row
self.log.info("Removing header from file %s", fn_uncompressed)
headless_file = (
self._delete_top_row_and_compress(fn_uncompressed,
file_ext,
tmp_dir))
self.log.info("Headless file %s", headless_file)
self.log.info("Loading file %s into Hive", headless_file)
self.hive.load_file(headless_file,
self.hive_table,
field_dict=self.field_dict,
create=self.create,
partition=self.partition,
delimiter=self.delimiter,
recreate=self.recreate,
tblproperties=self.tblproperties)
def _get_top_row_as_list(self, file_name):
with open(file_name, 'rt') as f:
header_line = f.readline().strip()
header_list = header_line.split(self.delimiter)
return header_list
def _match_headers(self, header_list):
if not header_list:
raise AirflowException("Unable to retrieve header row from file")
field_names = self.field_dict.keys()
if len(field_names) != len(header_list):
self.log.warning("Headers count mismatch"
"File headers:\n {header_list}\n"
"Field names: \n {field_names}\n"
.format(**locals()))
return False
test_field_match = [h1.lower() == h2.lower()
for h1, h2 in zip(header_list, field_names)]
if not all(test_field_match):
self.log.warning("Headers do not match field names"
"File headers:\n {header_list}\n"
"Field names: \n {field_names}\n"
.format(**locals()))
return False
else:
return True
def _delete_top_row_and_compress(
self,
input_file_name,
output_file_ext,
dest_dir):
# When output_file_ext is not defined, file is not compressed
open_fn = open
if output_file_ext.lower() == '.gz':
open_fn = gzip.GzipFile
elif output_file_ext.lower() == '.bz2':
open_fn = bz2.BZ2File
os_fh_output, fn_output = \
tempfile.mkstemp(suffix=output_file_ext, dir=dest_dir)
with open(input_file_name, 'rb') as f_in,\
open_fn(fn_output, 'wb') as f_out:
f_in.seek(0)
next(f_in)
for line in f_in:
f_out.write(line)
return fn_output
| 41.950877
| 78
| 0.588658
|
a9a7302bf2bdc1e18b089f25ec17e63f11337e81
| 22
|
py
|
Python
|
Python/BackgroundApp/BackgroundApp/PythonHome/WinRTExtension.zip/WinRT/__init__.py
|
Carlosgm02/UWP-Languages
|
b5653c8f452b204645e3b6276caa95de2432f77e
|
[
"MIT"
] | 6
|
2019-10-30T08:41:15.000Z
|
2021-02-24T09:20:46.000Z
|
Python/BackgroundApp/BackgroundApp/PythonHome/WinRTExtension.zip/WinRT/__init__.py
|
carlosgm02/uwp-languages
|
b5653c8f452b204645e3b6276caa95de2432f77e
|
[
"MIT"
] | null | null | null |
Python/BackgroundApp/BackgroundApp/PythonHome/WinRTExtension.zip/WinRT/__init__.py
|
carlosgm02/uwp-languages
|
b5653c8f452b204645e3b6276caa95de2432f77e
|
[
"MIT"
] | null | null | null |
from _winrt import *
| 11
| 21
| 0.727273
|
7cb2c249267e795cb5962e9a7f6364578468c89d
| 4,781
|
py
|
Python
|
subaru_calcs.py
|
mikeireland/opticstools
|
8ce59ee9016e871e92c412a9beb908f2354319b6
|
[
"MIT"
] | null | null | null |
subaru_calcs.py
|
mikeireland/opticstools
|
8ce59ee9016e871e92c412a9beb908f2354319b6
|
[
"MIT"
] | null | null | null |
subaru_calcs.py
|
mikeireland/opticstools
|
8ce59ee9016e871e92c412a9beb908f2354319b6
|
[
"MIT"
] | null | null | null |
"""Some diffraction calculations for the RHEA slit feed. Computes the overlap between
a diffraction limited beam and the RHEA IFU, averaging over a user-defined array of
pointing offsets in lenslet units. Overlaps are computed both at the
microlens plane and the fiber plane, demonstrating self-consistency.
In the case of the laboratory calculations, I assume that the coupling is the average
of the coupling over all angles, i.e. that the output of a multi-mode fiber can be
considered as an incoherent sum over all input angles.
Central lenslet mean coupling = 0.292
Edge/top lenslet mean couplings = [0.012,0.028,0.012,0.028]. Sum=0.080
Corner lenslet mean coupling = [0.002,0.010,0.010,0.010]. Sum=0.032
Total coupling = 0.404.
In the lab, with a smaller "pupil" from the SM28 fiber:
0.064
0.049 * 4
0.0369 * 4
Total Coupling = 0.407
For multi-mode fiber inputs...
50 microns: 29.5%
38 microns: 32.7%
"""
from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
import opticstools as ot
import pdb
from scipy.ndimage.interpolation import shift
plt.ion()
#Firstly, define a fiber beam
wave = 0.74e-6 #0.65e-6
m_pix = 0.1e-6
core_diam = 3.5e-6
numerical_aperture = 0.13
sz = 1024
llet_f = 4.64 * 1.1 #Lenslet focal length in mm
llet_w = 1.0 #Lenslet width in mm
nf = 20
nf = 1
f_ratios = np.linspace(1150,1150,nf)
obstruct = 0.25
#Fiber offset.
offset = 0.0e-6; label = 'Perfect Alignment'
#offset = 1.0e-6; label = '1 micron offset'
#offset = 2.0e-6; label = '2 microns offset'
#Offset of the lenslet in mm
llet_offsets=np.array( [[0,0]])
nx = 10
x = (np.arange(nx) + 0.5)/20.0 #Single-sided
x = (np.arange(nx) + 0.5 - nx//2)/10.0 #Dual-sided
xy = np.meshgrid(x,x)
llet_offsets = np.rollaxis(np.array([xy[0],xy[1]]),0,3).reshape(nx*nx,2)
plotit = False
#Now a calculation that mimics the
pup_size_microns_physical_mm = 1.45/300*7.2
pup_size_lab = 50e-3 #or 9e-3
#pup_size_lab = 38e-3 #Trying to maximise flux.
#Set non-None for this "special" calculation.
lab_pup_scale = pup_size_lab/pup_size_microns_physical_mm
#lab_pup_scale = None
#----
rad_pix = wave/(sz*m_pix)
#Metres per pixel in the lenslet plane.
m_pix_llet = rad_pix*llet_f/1e3
V = ot.compute_v_number(wave, core_diam/2, numerical_aperture)
#Compute the fiber mode for the fundumental
fib_mode = ot.mode_2d(V, core_diam/2, j=0, n=0, sampling=m_pix, sz=sz)
#Compute the far field distribution for this fiber
fib_angle = np.real(np.fft.fftshift(np.fft.fft2(np.fft.fftshift(fib_mode))))
#Offset the fiber mode to account for misalignments.
fib_mode = shift(fib_mode.real,(offset/m_pix,0), order=1)
#Create a variable "mode" which is the fiber mode in the lenslet plane.
llet = ot.square(sz, llet_w/rad_pix/llet_f)
mode = llet * fib_angle
fib_llet_loss = np.sum(mode**2)/np.sum(fib_angle**2)
couplings1 = []
couplings2 = []
#Loop through all lenslet offsets (up to +/- half a lenslet) and input
#system focal ratios, computing coupling.
for llet_offset in llet_offsets:
for f_ratio in f_ratios:
l_d_pix = f_ratio*wave/m_pix_llet
pup_diam_pix = sz/l_d_pix
#The input pupil, which changes its size dependent on focal ratio.
pup = ot.circle(sz, pup_diam_pix) - ot.circle(sz, pup_diam_pix*obstruct)
#"Special" calculation of lab pupil...
if lab_pup_scale:
pup = ot.circle(sz, pup_diam_pix*lab_pup_scale)
#Create a psf, shift it by the offset and truncate.
psf = np.real(np.fft.fftshift(np.fft.fft2(np.fft.fftshift(pup))))
psf = shift(psf, llet_offset*llet_w/rad_pix/llet_f, order=1)
psf_trunc = psf * llet
#Compute the loss associated with this truncation.
llet_loss = np.sum(psf_trunc**2)/np.sum(psf**2)
#The PSF at the fiber is complex in general
psf_fiber = np.fft.fftshift(np.fft.fft2(np.fft.fftshift(psf_trunc)))
#Couplings1 is coupling "at the microlens array", not taking into account lenslet loss.
couplings1.append(np.sum(psf*mode)**2/np.sum(psf**2)/np.sum(mode**2)*fib_llet_loss)
#Couplings2 is coupling "at the fiber", taking into account the lenslet loss.
couplings2.append(np.abs(np.sum(psf_fiber*fib_mode))**2/np.sum(np.abs(psf_fiber)**2)/np.sum(fib_mode**2)*llet_loss)
#plt.clf()
couplings1 = np.array(couplings1).reshape(len(llet_offsets), len(f_ratios))
couplings2 = np.array(couplings2).reshape(len(llet_offsets), len(f_ratios))
print(np.mean(couplings1))
#plt.plot(f_ratios,couplings1,label='Total Coupling')
if plotit:
plt.plot(f_ratios,np.mean(couplings2, axis=1),label=label)
plt.xlabel('Input focal ratio')
plt.ylabel('Central Fiber Coupling')
plt.axis([700,1650,0,.7])
| 33.907801
| 123
| 0.707174
|
d353da20297433d5db69b68c540afb54826baa6d
| 2,232
|
py
|
Python
|
tests/parsing/test_summary.py
|
mralext20/avwx-engine
|
4eabc2a4a08cd931d6f0fab7590ea09390af43e2
|
[
"MIT"
] | 30
|
2015-09-08T20:38:41.000Z
|
2019-03-10T07:10:47.000Z
|
tests/parsing/test_summary.py
|
mralext20/avwx-engine
|
4eabc2a4a08cd931d6f0fab7590ea09390af43e2
|
[
"MIT"
] | 13
|
2019-11-18T17:03:54.000Z
|
2021-09-04T03:53:55.000Z
|
tests/parsing/test_summary.py
|
mralext20/avwx-engine
|
4eabc2a4a08cd931d6f0fab7590ea09390af43e2
|
[
"MIT"
] | 16
|
2019-11-18T01:55:49.000Z
|
2021-09-20T03:22:58.000Z
|
"""
Test summary functions
"""
# library
import unittest
# module
from avwx import structs
from avwx.parsing import summary
class TestSummary(unittest.TestCase):
"""Test summary functions"""
def test_metar(self):
"""Tests that METAR translations are summarized in the proper order"""
self.assertEqual(
summary.metar(
structs.MetarTrans(
altimeter="29.92 inHg (1013 hPa)",
clouds="Broken layer at 1500ft (Cumulonimbus) - Reported AGL",
dewpoint="-1°C (30°F)",
remarks={},
temperature="3°C (37°F)",
visibility="3sm (4.8km)",
wind="N-360 (variable 340 to 020) at 12kt gusting to 20kt",
wx_codes="Heavy Rain",
)
),
(
"Winds N-360 (variable 340 to 020) at 12kt gusting to 20kt, "
"Vis 3sm, Temp 3°C, Dew -1°C, Alt 29.92 inHg, "
"Heavy Rain, Broken layer at 1500ft (Cumulonimbus)"
),
)
def test_taf(self):
"""Tests that TAF line translations are summarized in the proper order"""
self.assertEqual(
summary.taf(
structs.TafLineTrans(
altimeter="29.92 inHg (1013 hPa)",
clouds="Broken layer at 1500ft (Cumulonimbus) - Reported AGL",
icing="Light icing from 10000ft to 15000ft",
turbulence="Occasional moderate turbulence in clouds from 5500ft to 8500ft",
visibility="3sm (4.8km)",
wind_shear="Wind shear 2000ft from 070 at 40kt",
wind="N-360 at 12kt gusting to 20kt",
wx_codes="Heavy Rain",
)
),
(
"Winds N-360 at 12kt gusting to 20kt, Vis 3sm, Alt 29.92 inHg, "
"Heavy Rain, Broken layer at 1500ft (Cumulonimbus), "
"Wind shear 2000ft from 070 at 40kt, "
"Occasional moderate turbulence in clouds from 5500ft to 8500ft, "
"Light icing from 10000ft to 15000ft"
),
)
| 36.590164
| 96
| 0.512545
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.