hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bba92e1e20cfe9edec4e03afe4a6d75583a1ea97
| 3,473
|
py
|
Python
|
test/scenario_test/flow_spec_test.py
|
Axellant/gobgp
|
840755be18f226b202759aea8318de20ccc32057
|
[
"Apache-2.0"
] | null | null | null |
test/scenario_test/flow_spec_test.py
|
Axellant/gobgp
|
840755be18f226b202759aea8318de20ccc32057
|
[
"Apache-2.0"
] | null | null | null |
test/scenario_test/flow_spec_test.py
|
Axellant/gobgp
|
840755be18f226b202759aea8318de20ccc32057
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from fabric.api import local
from lib import base
from lib.gobgp import *
from lib.exabgp import *
import sys
import os
import time
import nose
from noseplugin import OptionParser, parser_option
from itertools import combinations
class GoBGPTestBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
gobgp_ctn_image_name = parser_option.gobgp_image
base.TEST_PREFIX = parser_option.test_prefix
g1 = GoBGPContainer(name='g1', asn=65000, router_id='192.168.0.1',
ctn_image_name=gobgp_ctn_image_name,
log_level=parser_option.gobgp_log_level)
e1 = ExaBGPContainer(name='e1', asn=65000, router_id='192.168.0.2')
ctns = [g1, e1]
# advertise a route from q1, q2
matchs = ['destination 10.0.0.0/24', 'source 20.0.0.0/24']
thens = ['discard']
e1.add_route(route='flow1', rf='ipv4-flowspec', matchs=matchs, thens=thens)
matchs2 = ['tcp-flags syn', 'protocol tcp udp', "packet-length '>1000&<2000'"]
thens2 = ['rate-limit 9600', 'redirect 0.10:100', 'mark 20', 'action sample']
g1.add_route(route='flow1', rf='ipv4-flowspec', matchs=matchs2, thens=thens2)
matchs3 = ['destination 2001::/24/10', 'source 2002::/24/15']
thens3 = ['discard']
e1.add_route(route='flow2', rf='ipv6-flowspec', matchs=matchs3, thens=thens3)
matchs4 = ['destination 2001::/24 10', "label '=100'"]
thens4 = ['discard']
g1.add_route(route='flow2', rf='ipv6-flowspec', matchs=matchs4, thens=thens4)
initial_wait_time = max(ctn.run() for ctn in ctns)
time.sleep(initial_wait_time)
br01 = Bridge(name='br01', subnet='192.168.10.0/24', self_ip=True)
[br01.addif(ctn) for ctn in ctns]
# ibgp peer. loop topology
for a, b in combinations(ctns, 2):
a.add_peer(b, flowspec=True)
b.add_peer(a, flowspec=True)
cls.gobgp = g1
cls.exabgp = e1
cls.bridges = {'br01': br01}
# test each neighbor state is turned establish
def test_01_neighbor_established(self):
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=self.exabgp)
def test_02_check_gobgp_global_rib(self):
self.assertTrue(len(self.gobgp.get_global_rib(rf='ipv4-flowspec')) == 2)
def test_03_check_gobgp_global_rib(self):
self.assertTrue(len(self.gobgp.get_global_rib(rf='ipv6-flowspec')) == 2)
if __name__ == '__main__':
if os.geteuid() is not 0:
print "you are not root."
sys.exit(1)
output = local("which docker 2>&1 > /dev/null ; echo $?", capture=True)
if int(output) is not 0:
print "docker not found"
sys.exit(1)
nose.main(argv=sys.argv, addplugins=[OptionParser()],
defaultTest=sys.argv[0])
| 36.557895
| 86
| 0.660236
|
ef228f92c4055f540c9c94def5f1a8641a0a506b
| 6,570
|
py
|
Python
|
configs/self6dpp/ssHB/ss_dibr_mlBCE_FreezeBN_woCenter_refinePM10_10e_train_diffRatio/ss_dibr_mlBCE_FreezeBN_woCenter_refinePM10_10e_train900/ss_dibr_mlBCE_FreezeBN_woCenter_refinePM10_10e_train900_benchvise.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | 33
|
2021-12-15T07:11:47.000Z
|
2022-03-29T08:58:32.000Z
|
configs/self6dpp/ssHB/ss_dibr_mlBCE_FreezeBN_woCenter_refinePM10_10e_train_diffRatio/ss_dibr_mlBCE_FreezeBN_woCenter_refinePM10_10e_train900/ss_dibr_mlBCE_FreezeBN_woCenter_refinePM10_10e_train900_benchvise.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | 3
|
2021-12-15T11:39:54.000Z
|
2022-03-29T07:24:23.000Z
|
configs/self6dpp/ssHB/ss_dibr_mlBCE_FreezeBN_woCenter_refinePM10_10e_train_diffRatio/ss_dibr_mlBCE_FreezeBN_woCenter_refinePM10_10e_train900/ss_dibr_mlBCE_FreezeBN_woCenter_refinePM10_10e_train900_benchvise.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | null | null | null |
_base_ = ["../../../../_base_/self6dpp_base.py"]
# refiner_cfg_path = "configs/_base_/self6dpp_refiner_base.py"
OUTPUT_DIR = "output/self6dpp/ssHB/ss_dibr_mlBCE_FreezeBN_woCenter_refinePM10_10e_train900/benchvise"
INPUT = dict(
WITH_DEPTH=True,
DZI_PAD_SCALE=1.5,
TRUNCATE_FG=False,
CHANGE_BG_PROB=0.5,
COLOR_AUG_PROB=0.8,
COLOR_AUG_TYPE="code",
COLOR_AUG_CODE=(
"Sequential(["
# Sometimes(0.5, PerspectiveTransform(0.05)),
# Sometimes(0.5, CropAndPad(percent=(-0.05, 0.1))),
# Sometimes(0.5, Affine(scale=(1.0, 1.2))),
"Sometimes(0.5, CoarseDropout( p=0.2, size_percent=0.05) ),"
"Sometimes(0.4, GaussianBlur((0., 3.))),"
"Sometimes(0.3, pillike.EnhanceSharpness(factor=(0., 50.))),"
"Sometimes(0.3, pillike.EnhanceContrast(factor=(0.2, 50.))),"
"Sometimes(0.5, pillike.EnhanceBrightness(factor=(0.1, 6.))),"
"Sometimes(0.3, pillike.EnhanceColor(factor=(0., 20.))),"
"Sometimes(0.5, Add((-25, 25), per_channel=0.3)),"
"Sometimes(0.3, Invert(0.2, per_channel=True)),"
"Sometimes(0.5, Multiply((0.6, 1.4), per_channel=0.5)),"
"Sometimes(0.5, Multiply((0.6, 1.4))),"
"Sometimes(0.1, AdditiveGaussianNoise(scale=10, per_channel=True)),"
"Sometimes(0.5, iaa.contrast.LinearContrast((0.5, 2.2), per_channel=0.3)),"
"Sometimes(0.5, Grayscale(alpha=(0.0, 1.0)))," # maybe remove for det
"], random_order=True)"
# cosy+aae
),
)
SOLVER = dict(
IMS_PER_BATCH=6, # maybe need to be < 24
TOTAL_EPOCHS=10,
LR_SCHEDULER_NAME="flat_and_anneal",
ANNEAL_METHOD="cosine", # "cosine"
ANNEAL_POINT=0.72,
# REL_STEPS=(0.3125, 0.625, 0.9375),
OPTIMIZER_CFG=dict(_delete_=True, type="Ranger", lr=1e-4, weight_decay=0),
WEIGHT_DECAY=0.0,
WARMUP_FACTOR=0.001,
WARMUP_ITERS=100, # NOTE: only real data, iterations are very small
CLIP_GRADIENTS=dict(ENABLED=True, CLIP_TYPE="full_model", CLIP_VALUE=1.0),
)
DATASETS = dict(
TRAIN=("hb_bdp_benchvise_train900",), # real data
TRAIN2=("lm_pbr_benchvise_train",), # synthetic data
TRAIN2_RATIO=0.0,
TEST=("hb_bdp_benchvise_test100",),
# for self-supervised training
DET_FILES_TRAIN=(
"datasets/hb_bench_driller_phone/init_poses/resnest50d_a6_AugCosyAAEGray_BG05_mlBCE_lm_pbr_100e_so_GdrnPose_wYolov4PbrBbox_wDeepimPose_hbBdpAll.json",
),
DET_THR_TRAIN=0.5,
DET_FILES_TEST=(
"datasets/hb_bench_driller_phone/test_bboxes/yolov4x_640_augCosyAAEGray_ranger_lm_pbr_hb_bdp_all.json",
),
)
RENDERER = dict(DIFF_RENDERER="DIBR") # DIBR | DIBR
MODEL = dict(
# synthetically trained model
WEIGHTS="output/gdrn/lm_pbr/resnest50d_a6_AugCosyAAEGray_BG05_mlBCE_lm_pbr_100e/benchvise/model_final_wo_optim-85b3563e.pth",
REFINER_WEIGHTS="",
FREEZE_BN=True,
SELF_TRAIN=True, # whether to do self-supervised training
WITH_REFINER=False, # whether to use refiner
LOAD_DETS_TRAIN=True, # NOTE: load detections for self-train
LOAD_DETS_TRAIN_WITH_POSE=True, # NOTE: load pose_refine
LOAD_DETS_TEST=True,
EMA=dict(
ENABLED=True,
INIT_CFG=dict(decay=0.999, updates=0), # epoch-based
UPDATE_FREQ=10, # update the mean teacher every n epochs
),
POSE_NET=dict(
NAME="GDRN", # used module file name
# NOTE: for self-supervised training phase, use offline labels should be more accurate
XYZ_ONLINE=False, # rendering xyz online
XYZ_BP=True, # calculate xyz from depth by backprojection
NUM_CLASSES=13,
USE_MTL=False, # uncertainty multi-task weighting
INPUT_RES=256,
OUTPUT_RES=64,
## backbone
BACKBONE=dict(
FREEZE=False,
PRETRAINED="timm",
INIT_CFG=dict(
type="timm/resnest50d",
pretrained=True,
in_chans=3,
features_only=True,
out_indices=(4,),
),
),
## geo head: Mask, XYZ, Region
GEO_HEAD=dict(
FREEZE=False,
INIT_CFG=dict(
type="TopDownMaskXyzRegionHead",
in_dim=2048, # this is num out channels of backbone conv feature
),
NUM_REGIONS=64,
),
PNP_NET=dict(
INIT_CFG=dict(norm="GN", act="gelu"),
REGION_ATTENTION=True,
WITH_2D_COORD=True,
ROT_TYPE="allo_rot6d",
TRANS_TYPE="centroid_z",
),
LOSS_CFG=dict(
# xyz loss ----------------------------
XYZ_LOSS_TYPE="L1", # L1 | CE_coor
XYZ_LOSS_MASK_GT="visib", # trunc | visib | obj
XYZ_LW=1.0,
# mask loss ---------------------------
MASK_LOSS_TYPE="BCE", # L1 | BCE | CE
MASK_LOSS_GT="trunc", # trunc | visib | gt
MASK_LW=1.0,
# region loss -------------------------
REGION_LOSS_TYPE="CE", # CE
REGION_LOSS_MASK_GT="visib", # trunc | visib | obj
REGION_LW=1.0,
# pm loss --------------
PM_LOSS_SYM=True, # NOTE: sym loss
PM_R_ONLY=True, # only do R loss in PM
PM_LW=1.0,
# centroid loss -------
CENTROID_LOSS_TYPE="L1",
CENTROID_LW=1.0,
# z loss -----------
Z_LOSS_TYPE="L1",
Z_LW=1.0,
),
SELF_LOSS_CFG=dict(
# LAB space loss ------------------
LAB_NO_L=True,
LAB_LW=0.2,
# MS-SSIM loss --------------------
MS_SSIM_LW=1.0,
# perceptual loss -----------------
PERCEPT_LW=0.15,
# mask loss (init, ren) -----------------------
MASK_INIT_REN_LOSS_TYPE="RW_BCE",
# MASK_INIT_REN_LOSS_TYPE="dice",
MASK_INIT_REN_LW=1.0,
# depth-based geometric loss ------
GEOM_LOSS_TYPE="chamfer", # L1, chamfer
GEOM_LW=100.0, # 100
CHAMFER_CENTER_LW=0.0,
CHAMFER_DIST_THR=0.5,
# refiner-based loss --------------
REFINE_LW=0.0,
# xyz loss (init, ren)
XYZ_INIT_REN_LOSS_TYPE="L1", # L1 | CE_coor (for cls)
XYZ_INIT_REN_LW=0.0,
SELF_PM_CFG=dict(
loss_weight=10.0, # NOTE: >0 to enable this loss
),
),
),
)
VAL = dict(
USE_BOP=False, # whether to use bop toolkit
)
| 37.758621
| 158
| 0.567732
|
18207358204d6224d2679830f5a7ca750a63ab48
| 1,676
|
py
|
Python
|
windows_packages_gpu/torch/multiprocessing/pool.py
|
codeproject/DeepStack
|
d96368a3db1bc0266cb500ba3701d130834da0e6
|
[
"Apache-2.0"
] | 353
|
2020-12-10T10:47:17.000Z
|
2022-03-31T23:08:29.000Z
|
windows_packages_gpu/torch/multiprocessing/pool.py
|
codeproject/DeepStack
|
d96368a3db1bc0266cb500ba3701d130834da0e6
|
[
"Apache-2.0"
] | 80
|
2020-12-10T09:54:22.000Z
|
2022-03-30T22:08:45.000Z
|
windows_packages_gpu/torch/multiprocessing/pool.py
|
codeproject/DeepStack
|
d96368a3db1bc0266cb500ba3701d130834da0e6
|
[
"Apache-2.0"
] | 63
|
2020-12-10T17:10:34.000Z
|
2022-03-28T16:27:07.000Z
|
import multiprocessing
import multiprocessing.pool
import multiprocessing.util as util
from .queue import SimpleQueue
def clean_worker(*args, **kwargs):
import gc
multiprocessing.pool.worker(*args, **kwargs)
# Regular multiprocessing workers don't fully clean up after themselves,
# so we have to explicitly trigger garbage collection to make sure that all
# destructors are called...
gc.collect()
class Pool(multiprocessing.pool.Pool):
"""Pool implementation which uses our version of SimpleQueue.
This lets us pass tensors in shared memory across processes instead of
serializing the underlying data."""
def _setup_queues(self):
self._inqueue = SimpleQueue()
self._outqueue = SimpleQueue()
self._quick_put = self._inqueue._writer.send
self._quick_get = self._outqueue._reader.recv
def _repopulate_pool(self):
"""Bring the number of pool processes up to the specified number,
for use after reaping workers which have exited.
"""
for i in range(self._processes - len(self._pool)):
# changed worker -> clean_worker
args = (self._inqueue, self._outqueue,
self._initializer,
self._initargs, self._maxtasksperchild)
if hasattr(self, '_wrap_exception'):
args += (self._wrap_exception,)
w = self.Process(target=clean_worker, args=args)
self._pool.append(w)
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.start()
util.debug('added worker')
| 37.244444
| 80
| 0.634248
|
a11d0aa0c8f21b8ab4eddae929916fa73c19d28f
| 3,796
|
py
|
Python
|
sdk/python/pulumi_azure_native/devices/v20200301/get_certificate.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/devices/v20200301/get_certificate.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/devices/v20200301/get_certificate.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetCertificateResult',
'AwaitableGetCertificateResult',
'get_certificate',
]
@pulumi.output_type
class GetCertificateResult:
"""
The X509 Certificate.
"""
def __init__(__self__, etag=None, id=None, name=None, properties=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> str:
"""
The entity tag.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
The resource identifier.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the certificate.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.CertificatePropertiesResponse':
"""
The description of an X509 CA Certificate.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> str:
"""
The resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetCertificateResult(GetCertificateResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetCertificateResult(
etag=self.etag,
id=self.id,
name=self.name,
properties=self.properties,
type=self.type)
def get_certificate(certificate_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCertificateResult:
"""
The X509 Certificate.
:param str certificate_name: The name of the certificate
:param str resource_group_name: The name of the resource group that contains the IoT hub.
:param str resource_name: The name of the IoT hub.
"""
__args__ = dict()
__args__['certificateName'] = certificate_name
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:devices/v20200301:getCertificate', __args__, opts=opts, typ=GetCertificateResult).value
return AwaitableGetCertificateResult(
etag=__ret__.etag,
id=__ret__.id,
name=__ret__.name,
properties=__ret__.properties,
type=__ret__.type)
| 31.114754
| 137
| 0.628293
|
32b4e552b8710c6635a81e843533182577899213
| 5,337
|
py
|
Python
|
withdrawall.py
|
Nugetzrul3/Magi-Tip
|
ac86aaf7bc0708dd1c43071da28b5b4b922bfe37
|
[
"MIT"
] | null | null | null |
withdrawall.py
|
Nugetzrul3/Magi-Tip
|
ac86aaf7bc0708dd1c43071da28b5b4b922bfe37
|
[
"MIT"
] | null | null | null |
withdrawall.py
|
Nugetzrul3/Magi-Tip
|
ac86aaf7bc0708dd1c43071da28b5b4b922bfe37
|
[
"MIT"
] | null | null | null |
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from decimal import Decimal
import discord
from discord.ext import commands
import user_db
import config
# connect to coind
rpc_connection = 'http://{0}:{1}@{2}:{3}'.format(config.rpc_user, config.rpc_password, config.ip, config.rpc_port)
class Withdrawall(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def withdrawall(self, ctx, address=None):
client = AuthServiceProxy(rpc_connection)
user_id = str(ctx.author.id)
if not user_db.check_user(user_id):
embed = discord.Embed(
title="**For first-use-user**",
color=0x0043ff)
embed.set_author(
name=ctx.author.display_name,
icon_url=ctx.author.avatar_url_as(format='png', size=256))
embed.add_field(
name="First of all, please type `//help`",
value="Welcome to world of Tip Magi !")
embed.set_thumbnail(url=self.bot.user.avatar_url_as(format='png', size=1024))
embed.set_footer(text="Tip Magi {0} [Owner: {1}]".format(config.VERSION, self.bot.get_user(config.OWNER_ID)),
icon_url=self.bot.user.avatar_url_as(format='png', size=256))
await ctx.channel.send(embed=embed)
else:
pass
account = str(ctx.author.id)
balance = Decimal(client.getbalance(account, config.CONFIRM))
if address is None:
embed = discord.Embed(color=0xffd800)
embed.set_author(
name=ctx.author.display_name,
icon_url=ctx.author.avatar_url_as(format='png', size=256))
embed.add_field(
name="Please check `//help` ",
value=" :mag: ")
embed.set_footer(text="Tip Magi {0} [Owner: {1}]".format(config.VERSION, self.bot.get_user(config.OWNER_ID)),
icon_url=self.bot.user.avatar_url_as(format='png', size=256))
await ctx.channel.send(embed=embed)
else:
pass
if balance < Decimal('0.5'):
embed = discord.Embed(color=0xff0000)
embed.set_author(
name=ctx.author.display_name,
icon_url=ctx.author.avatar_url_as(format='png', size=256))
embed.add_field(
name="Amount must be at least 0.5 XMG.",
value="Your balances : ```{0} XMG```".format(client.getbalance(account, config.CONFIRM)))
embed.set_footer(text="Tip Magi {0} [Owner: {1}]".format(config.VERSION, self.bot.get_user(config.OWNER_ID)),
icon_url=self.bot.user.avatar_url_as(format='png', size=256))
await ctx.channel.send(embed=embed)
else:
amount = balance - Decimal(str(config.FEE))
validate = client.validateaddress(address)
if not validate['isvalid']:
embed = discord.Embed(color=0xff0000)
embed.set_author(
name=ctx.author.display_name,
icon_url=ctx.author.avatar_url_as(format='png', size=256))
embed.add_field(
name="invalid address.",
value="`{0}`".format(str(address)))
embed.set_footer(text="Tip Magi {0} [Owner: {1}]".format(config.VERSION, self.bot.get_user(config.OWNER_ID)),
icon_url=self.bot.user.avatar_url_as(format='png', size=256))
await ctx.channel.send(embed=embed)
else:
txid = client.sendfrom(account, address, float(amount))
tx = client.gettransaction(txid)
txfee = tx['fee']
client.move(account, "tipmagi_wallet", Decimal(str(config.FEE)))
client.move("tipmagi_wallet", account, -txfee)
embed = discord.Embed(
title="**Block explorer**",
url='https://m-chain.info/tx/{0}'.format(txid),
color=0x0043ff)
embed.set_author(
name=ctx.author.display_name,
icon_url=ctx.author.avatar_url_as(format='png', size=256))
embed.add_field(
name="Withdrawal complete `{0} XMG`\nwithdraw fee is `{1} XMG`\nPlease check the transaction at the above link.".format(amount, str(config.FEE)),
value="Your balances : `{0} XMG`".format(client.getbalance(account, config.CONFIRM)))
embed.set_footer(text="Tip Magi {0} [Owner: {1}]".format(config.VERSION, self.bot.get_user(config.OWNER_ID)),
icon_url=self.bot.user.avatar_url_as(format='png', size=256))
await ctx.channel.send(embed=embed)
def setup(bot):
bot.add_cog(Withdrawall(bot))
| 48.081081
| 173
| 0.526888
|
b1d4f8d6a8da5a45817f42c56a9701acae69b8e7
| 815
|
py
|
Python
|
src/resources/user.py
|
ardydedase/flask-postgres-api
|
d9edb69988f89f480d595a8b111fafeb7ae81234
|
[
"MIT"
] | 3
|
2020-10-08T03:06:51.000Z
|
2021-04-28T12:31:46.000Z
|
src/resources/user.py
|
ardydedase/flask-postgres-api
|
d9edb69988f89f480d595a8b111fafeb7ae81234
|
[
"MIT"
] | null | null | null |
src/resources/user.py
|
ardydedase/flask-postgres-api
|
d9edb69988f89f480d595a8b111fafeb7ae81234
|
[
"MIT"
] | 6
|
2020-08-11T10:58:30.000Z
|
2021-09-09T03:57:37.000Z
|
from flask import request, jsonify
from flask_restful import Resource
from repositories import UserRepository
class User(Resource):
def get(self, username: str):
# TODO: error handler
# move to another resource
user = UserRepository.get(username)
return user, 200
class UserList(Resource):
def post(self):
"""
Create user
"""
request_json = request.get_json(silent=True)
username: str = request_json['username']
avatar_url: str = request_json.get('avatar_url', '')
try:
user = UserRepository.create(username, avatar_url)
return user, 200
except Exception as e:
response = jsonify(e.to_dict())
response.status_code = e.status_code
return response
| 27.166667
| 62
| 0.619632
|
b2e4ad7ffe2973a8153b0d4c38ef37164bd70a23
| 5,799
|
py
|
Python
|
service/workflow/workflow_base_service.py
|
jzx1230/myloonflow
|
879a6d578b4fc17baf0f02e1ae5431931a6d2237
|
[
"MIT"
] | null | null | null |
service/workflow/workflow_base_service.py
|
jzx1230/myloonflow
|
879a6d578b4fc17baf0f02e1ae5431931a6d2237
|
[
"MIT"
] | null | null | null |
service/workflow/workflow_base_service.py
|
jzx1230/myloonflow
|
879a6d578b4fc17baf0f02e1ae5431931a6d2237
|
[
"MIT"
] | null | null | null |
import json
from django.db.models import Q
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from apps.workflow.models import Workflow
from service.base_service import BaseService
from service.common.log_service import auto_log
from service.account.account_base_service import AccountBaseService
class WorkflowBaseService(BaseService):
"""
流程服务
"""
def __init__(self):
pass
@classmethod
@auto_log
def get_workflow_list(cls, name, page, per_page, workflow_id_list):
"""
获取工作流列表
:param name:
:param page:
:param per_page:
:param workflow_id_list:工作流id list
:return:
"""
query_params = Q(is_deleted=False)
if name:
query_params &= Q(name__contains=name)
query_params &= Q(id__in=workflow_id_list)
workflow_querset = Workflow.objects.filter(query_params).order_by('id')
paginator = Paginator(workflow_querset, per_page)
try:
workflow_result_paginator = paginator.page(page)
except PageNotAnInteger:
workflow_result_paginator = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results
workflow_result_paginator = paginator.page(paginator.num_pages)
workflow_result_object_list = workflow_result_paginator.object_list
workflow_result_restful_list = []
for workflow_result_object in workflow_result_object_list:
workflow_result_restful_list.append(dict(id=workflow_result_object.id, name=workflow_result_object.name, description=workflow_result_object.description,
creator=workflow_result_object.creator, gmt_created=str(workflow_result_object.gmt_created)[:19]))
return workflow_result_restful_list, dict(per_page=per_page, page=page, total=paginator.count)
@classmethod
@auto_log
def check_new_permission(cls, username, workflow_id):
"""
判断用户是否有新建工单的权限
:param username:
:param workflow_id:
:return:
"""
# 获取workflow的限制表达式
workflow_obj, msg = cls.get_by_id(workflow_id)
if not workflow_obj:
return False, msg
limit_expression = workflow_obj.limit_expression
if not limit_expression:
return True, 'no limit_expression set'
#'限制周期({"period":24} 24小时), 限制次数({"count":1}在限制周期内只允许提交1次), 限制级别({"level":1} 针对(1单个用户 2全局)限制周期限制次数,默认特定用户);允许特定人员提交({"allow_persons":"zhangsan,lisi"}只允许张三提交工单,{"allow_depts":"1,2"}只允许部门id为1和2的用户提交工单,{"allow_roles":"1,2"}只允许角色id为1和2的用户提交工单)
limit_expression_dict = json.loads(limit_expression)
limit_period = limit_expression_dict.get('period')
limit_count = limit_expression_dict.get('limit_count')
limit_allow_persons = limit_expression_dict.get('allow_persons')
limit_allow_depts = limit_expression_dict.get('allow_depts')
limit_allow_roles = limit_expression_dict.get('allow_roles')
if limit_period:
from service.ticket.ticket_base_service import TicketBaseService
if limit_expression_dict.get('level') == 1:
count_result, msg = TicketBaseService.get_ticket_count_by_args(workflow_id=workflow_id, username=username, period=limit_period)
elif limit_expression_dict.get('level') == 2:
count_result, msg = TicketBaseService.get_ticket_count_by_args(workflow_id=workflow_id, period=limit_period)
if count_result is False:
return False, msg
if count_result > limit_expression_dict.get('count'):
return False, '{} tickets can be created in {}hours when workflow_id is {}'.format(limit_count, limit_period, workflow_id)
if limit_allow_persons:
if username not in limit_allow_persons.split(','):
return False, '{} can not create ticket base on workflow_id:{}'.format(workflow_id)
if limit_allow_depts:
# 获取用户所属部门,包含上级部门
user_all_dept_id_list, msg = AccountBaseService.get_user_up_dept_id_list()
if user_all_dept_id_list is False:
return False, msg
# 只要user_all_dept_id_list中的某个部门包含在允许范围内即可
limit_allow_dept_str_list = limit_allow_depts.split(',')
limit_allow_dept_id_list = [int(limit_allow_dept_str) for limit_allow_dept_str in limit_allow_dept_str_list]
limit_allow_dept_id_list = list(set(limit_allow_dept_id_list)) #去重
total_list = user_all_dept_id_list + limit_allow_dept_id_list
if len(total_list) == len(set(total_list)):
return False, 'user is not in allow dept'
if limit_allow_roles:
# 获取用户所有的角色
user_role_list, msg = AccountBaseService.get_user_role_id_list(username)
if user_role_list is False:
return False, msg
limit_allow_role_str_list = limit_allow_roles.split(',')
limit_allow_role_id_list = [int(limit_allow_role_str) for limit_allow_role_str in limit_allow_role_str_list]
limit_allow_role_id_list = list(set(limit_allow_role_id_list))
total_list = limit_allow_role_id_list + user_role_list
if len(total_list) == len(set(total_list)):
return False, 'user is not in allow role'
return True, ''
@classmethod
@auto_log
def get_by_id(cls, workflow_id):
"""
获取工作流 by id
:param workflow_id:
:return:
"""
workflow_obj = Workflow.objects.filter(is_deleted=0, id=workflow_id).first()
if not workflow_obj:
return False, '工作流不存在'
return workflow_obj, ''
| 46.392
| 247
| 0.67322
|
9064be8d8119ed4a5c66f787b6e95df1fd135987
| 526
|
py
|
Python
|
analyzeapp/extensions.py
|
topdeveloper424/DataAnalyzer-Flask-
|
94724d77d993617d362a9c0cfc3f6be458089723
|
[
"BSD-2-Clause"
] | 1
|
2019-05-11T16:34:51.000Z
|
2019-05-11T16:34:51.000Z
|
analyzeapp/extensions.py
|
AShark1001/DataAnalyze
|
2e1e28d4ef3802693617c557d9c573dd4c8cf0e0
|
[
"BSD-2-Clause"
] | 2
|
2019-07-05T14:59:22.000Z
|
2019-07-05T15:26:52.000Z
|
analyzeapp/extensions.py
|
AShark1001/DataAnalyze
|
2e1e28d4ef3802693617c557d9c573dd4c8cf0e0
|
[
"BSD-2-Clause"
] | null | null | null |
from flask_cache import Cache
from flask_debugtoolbar import DebugToolbarExtension
from flask_login import LoginManager
from flask_assets import Environment
from analyzeapp.models import User
# Setup flask cache
cache = Cache()
# init flask assets
assets_env = Environment()
debug_toolbar = DebugToolbarExtension()
login_manager = LoginManager()
login_manager.login_view = "main.login"
login_manager.login_message_category = "warning"
@login_manager.user_loader
def load_user(userid):
return User.query.get(userid)
| 21.916667
| 52
| 0.819392
|
679c081a0adeb646031205f3413b76d4ddf82548
| 4,279
|
py
|
Python
|
tests/test_dask.py
|
kratsg/coffea
|
ec70b898677e03eecc5003177b26b3a42692a4b3
|
[
"BSD-3-Clause"
] | 77
|
2019-06-09T14:23:33.000Z
|
2022-03-22T21:34:01.000Z
|
tests/test_dask.py
|
kratsg/coffea
|
ec70b898677e03eecc5003177b26b3a42692a4b3
|
[
"BSD-3-Clause"
] | 353
|
2019-06-05T23:54:39.000Z
|
2022-03-31T21:21:47.000Z
|
tests/test_dask.py
|
kratsg/coffea
|
ec70b898677e03eecc5003177b26b3a42692a4b3
|
[
"BSD-3-Clause"
] | 71
|
2019-06-07T02:04:11.000Z
|
2022-03-05T21:03:45.000Z
|
from __future__ import print_function, division
from coffea import processor
import pytest
def do_dask_job(client, filelist, compression=0):
from coffea.processor.test_items import NanoTestProcessor
executor = processor.DaskExecutor(client=client, compression=compression)
run = processor.Runner(executor=executor)
hists = run(filelist, "Events", processor_instance=NanoTestProcessor())
assert hists["cutflow"]["ZJets_pt"] == 18
assert hists["cutflow"]["ZJets_mass"] == 6
assert hists["cutflow"]["Data_pt"] == 84
assert hists["cutflow"]["Data_mass"] == 66
def do_dask_cached(client, filelist, cachestrategy=None):
from coffea.nanoevents import schemas
from coffea.processor.test_items import NanoEventsProcessor
from coffea.processor.dask import register_columncache
register_columncache(client)
worker_affinity = True if cachestrategy is not None else False
executor = processor.DaskExecutor(client=client, worker_affinity=worker_affinity)
run = processor.Runner(
executor=executor,
schema=schemas.NanoAODSchema,
cachestrategy=cachestrategy,
savemetrics=True,
)
hists, metrics = run(
filelist,
"Events",
processor_instance=NanoEventsProcessor(
canaries=[
"a9490124-3648-11ea-89e9-f5b55c90beef/%2FEvents%3B1/0-40/nMuon%2C%21load%2C%21counts2offsets%2C%21skip/offsets",
"a9490124-3648-11ea-89e9-f5b55c90beef/%2FEvents%3B1/0-40/Muon_phi%2C%21load%2C%21content",
"a9490124-3648-11ea-89e9-f5b55c90beef/%2FEvents%3B1/0-40/Muon_pt%2C%21load%2C%21content",
"a9490124-3648-11ea-89e9-f5b55c90beef/%2FEvents%3B1/0-40/Muon_eta%2C%21load%2C%21content",
"a9490124-3648-11ea-89e9-f5b55c90beef/%2FEvents%3B1/0-40/Muon_mass%2C%21load%2C%21content",
"a9490124-3648-11ea-89e9-f5b55c90beef/%2FEvents%3B1/0-40/Muon_charge%2C%21load%2C%21content",
]
),
)
assert hists["cutflow"]["ZJets_pt"] == 18
assert hists["cutflow"]["ZJets_mass"] == 6
assert hists["cutflow"]["Data_pt"] == 84
assert hists["cutflow"]["Data_mass"] == 66
return hists["worker"]
def test_dask_job():
distributed = pytest.importorskip("distributed", minversion="2.6.0")
client = distributed.Client(dashboard_address=None)
import os
import os.path as osp
filelist = {
"ZJets": [osp.join(os.getcwd(), "tests/samples/nano_dy.root")],
"Data": [osp.join(os.getcwd(), "tests/samples/nano_dimuon.root")],
}
do_dask_job(client, filelist)
do_dask_job(client, filelist, compression=2)
filelist = {
"ZJets": {
"treename": "Events",
"files": [osp.join(os.getcwd(), "tests/samples/nano_dy.root")],
"metadata": {"checkusermeta": True, "someusermeta": "hello"},
},
"Data": {
"treename": "Events",
"files": [osp.join(os.getcwd(), "tests/samples/nano_dimuon.root")],
"metadata": {"checkusermeta": True, "someusermeta2": "world"},
},
}
do_dask_job(client, filelist)
client.close()
def test_dask_cached():
distributed = pytest.importorskip("distributed", minversion="2.6.0")
client = distributed.Client(dashboard_address=None)
import os
import os.path as osp
filelist = {
"ZJets": [osp.join(os.getcwd(), "tests/samples/nano_dy.root")],
"Data": [osp.join(os.getcwd(), "tests/samples/nano_dimuon.root")],
}
do_dask_cached(client, filelist)
workers1 = do_dask_cached(client, filelist, "dask-worker")
assert len(workers1) > 0
workers2 = do_dask_cached(client, filelist, "dask-worker")
assert workers1 == workers2
filelist = {
"ZJets": {
"treename": "Events",
"files": [osp.join(os.getcwd(), "tests/samples/nano_dy.root")],
"metadata": {"checkusermeta": True, "someusermeta": "hello"},
},
"Data": {
"treename": "Events",
"files": [osp.join(os.getcwd(), "tests/samples/nano_dimuon.root")],
"metadata": {"checkusermeta": True, "someusermeta2": "world"},
},
}
do_dask_cached(client, filelist)
client.close()
| 33.960317
| 128
| 0.641739
|
e959b18eefe29c9ac202491f37df80a502fc05fd
| 16,339
|
py
|
Python
|
helm/dagster/schema/schema_tests/test_user_deployments.py
|
Andrew-Crosby/dagster
|
e646625a687dc656bdd855d88b868de957b37b62
|
[
"Apache-2.0"
] | null | null | null |
helm/dagster/schema/schema_tests/test_user_deployments.py
|
Andrew-Crosby/dagster
|
e646625a687dc656bdd855d88b868de957b37b62
|
[
"Apache-2.0"
] | null | null | null |
helm/dagster/schema/schema_tests/test_user_deployments.py
|
Andrew-Crosby/dagster
|
e646625a687dc656bdd855d88b868de957b37b62
|
[
"Apache-2.0"
] | null | null | null |
import json
import subprocess
from typing import List
import pytest
from kubernetes.client import models
from schema.charts.dagster.values import DagsterHelmValues
from schema.charts.dagster_user_deployments.subschema.user_deployments import UserDeployments
from schema.charts.utils import kubernetes
from .helm_template import HelmTemplate
from .utils import create_complex_user_deployment, create_simple_user_deployment
@pytest.fixture(name="template")
def helm_template() -> HelmTemplate:
return HelmTemplate(
output="charts/dagster-user-deployments/templates/deployment-user.yaml",
model=models.V1Deployment,
)
@pytest.fixture(name="full_template")
def full_helm_template() -> HelmTemplate:
return HelmTemplate()
def assert_user_deployment_template(
t: HelmTemplate, templates: List[models.V1Deployment], values: DagsterHelmValues
):
assert len(templates) == len(values.dagsterUserDeployments.deployments)
for template, deployment_values in zip(templates, values.dagsterUserDeployments.deployments):
# Assert simple stuff
assert template.metadata.labels["deployment"] == deployment_values.name
assert len(template.spec.template.spec.containers) == 1
assert template.spec.template.spec.containers[0].image == deployment_values.image.name
assert (
template.spec.template.spec.containers[0].image_pull_policy
== deployment_values.image.pullPolicy
)
# Assert annotations
if deployment_values.annotations:
template_deployment_annotations = t.api_client.sanitize_for_serialization(
template.metadata.annotations
)
template_deployment_pod_annotations = t.api_client.sanitize_for_serialization(
template.spec.template.metadata.annotations
)
annotations_values = json.loads(deployment_values.annotations.json(exclude_none=True))
assert template_deployment_annotations == annotations_values
assert template_deployment_pod_annotations.items() >= annotations_values.items()
# Assert node selector
if deployment_values.nodeSelector:
template_node_selector = t.api_client.sanitize_for_serialization(
template.spec.template.spec.node_selector
)
node_selector_values = json.loads(
deployment_values.nodeSelector.json(exclude_none=True)
)
assert template_node_selector == node_selector_values
# Assert affinity
if deployment_values.affinity:
template_affinity = t.api_client.sanitize_for_serialization(
template.spec.template.spec.affinity
)
affinity_values = json.loads(deployment_values.affinity.json(exclude_none=True))
assert template_affinity == affinity_values
# Assert tolerations
if deployment_values.tolerations:
template_tolerations = t.api_client.sanitize_for_serialization(
template.spec.template.spec.tolerations
)
tolerations_values = json.loads(deployment_values.tolerations.json(exclude_none=True))
assert template_tolerations == tolerations_values
# Assert pod security context
if deployment_values.podSecurityContext:
template_pod_security_context = t.api_client.sanitize_for_serialization(
template.spec.template.spec.security_context
)
pod_security_context_values = json.loads(
deployment_values.podSecurityContext.json(exclude_none=True)
)
assert template_pod_security_context == pod_security_context_values
# Assert security context
if deployment_values.securityContext:
template_container_security_context = t.api_client.sanitize_for_serialization(
template.spec.template.spec.containers[0].security_context
)
security_context_values = json.loads(
deployment_values.securityContext.json(exclude_none=True)
)
assert template_container_security_context == security_context_values
# Assert resources
if deployment_values.resources:
template_resources = t.api_client.sanitize_for_serialization(
template.spec.template.spec.containers[0].resources
)
resource_values = json.loads(deployment_values.resources.json(exclude_none=True))
assert template_resources == resource_values
@pytest.mark.parametrize(
"helm_values",
[
DagsterHelmValues.construct(
dagsterUserDeployments=UserDeployments(
enabled=False,
enableSubchart=False,
deployments=[create_simple_user_deployment("simple-deployment-one")],
)
),
DagsterHelmValues.construct(
dagsterUserDeployments=UserDeployments(
enabled=False,
enableSubchart=True,
deployments=[create_simple_user_deployment("simple-deployment-one")],
)
),
DagsterHelmValues.construct(
dagsterUserDeployments=UserDeployments(
enabled=True,
enableSubchart=False,
deployments=[create_simple_user_deployment("simple-deployment-one")],
)
),
],
ids=[
"user deployments disabled, subchart disabled",
"user deployments disabled, subchart enabled",
"user deployments enabled, subchart disabled",
],
)
def test_deployments_do_not_render(helm_values: DagsterHelmValues, template: HelmTemplate, capsys):
with pytest.raises(subprocess.CalledProcessError):
template.render(helm_values)
_, err = capsys.readouterr()
assert "Error: could not find template" in err
@pytest.mark.parametrize(
"helm_values",
[
DagsterHelmValues.construct(
dagsterUserDeployments=UserDeployments(
enabled=True,
enableSubchart=True,
deployments=[create_simple_user_deployment("simple-deployment-one")],
)
),
DagsterHelmValues.construct(
dagsterUserDeployments=UserDeployments(
enabled=True,
enableSubchart=True,
deployments=[create_complex_user_deployment("complex-deployment-one")],
)
),
DagsterHelmValues.construct(
dagsterUserDeployments=UserDeployments(
enabled=True,
enableSubchart=True,
deployments=[
create_simple_user_deployment("simple-deployment-one"),
create_simple_user_deployment("simple-deployment-two"),
],
)
),
DagsterHelmValues.construct(
dagsterUserDeployments=UserDeployments(
enabled=True,
enableSubchart=True,
deployments=[
create_complex_user_deployment("complex-deployment-one"),
create_complex_user_deployment("complex-deployment-two"),
create_simple_user_deployment("simple-deployment-three"),
],
)
),
],
ids=[
"single user deployment",
"multi user deployment",
"complex, single user deployment",
"complex, multi user deployment",
],
)
def test_deployments_render(helm_values: DagsterHelmValues, template: HelmTemplate):
user_deployments = template.render(helm_values)
assert_user_deployment_template(template, user_deployments, helm_values)
def test_chart_does_not_render(full_template: HelmTemplate, capsys):
helm_values = DagsterHelmValues.construct(
dagsterUserDeployments=UserDeployments(
enabled=False,
enableSubchart=True,
deployments=[create_simple_user_deployment("simple-deployment-one")],
)
)
with pytest.raises(subprocess.CalledProcessError):
full_template.render(helm_values)
_, err = capsys.readouterr()
assert (
"dagster-user-deployments subchart cannot be enabled if workspace.yaml is not created."
in err
)
@pytest.mark.parametrize(
"helm_values",
[
DagsterHelmValues.construct(
dagsterUserDeployments=UserDeployments(
enabled=True,
enableSubchart=False,
deployments=[
create_simple_user_deployment("simple-deployment-one"),
],
)
),
DagsterHelmValues.construct(
dagsterUserDeployments=UserDeployments(
enabled=True,
enableSubchart=False,
deployments=[
create_complex_user_deployment("complex-deployment-one"),
create_complex_user_deployment("complex-deployment-two"),
create_simple_user_deployment("simple-deployment-three"),
],
)
),
],
ids=[
"single user deployment enabled, subchart disabled",
"multiple user deployments enabled, subchart disabled",
],
)
def test_chart_does_render(helm_values: DagsterHelmValues, full_template: HelmTemplate):
templates = full_template.render(helm_values)
assert templates
@pytest.mark.parametrize(
"helm_values",
[
DagsterHelmValues.construct(
dagsterUserDeployments=UserDeployments(
enabled=True,
enableSubchart=True,
deployments=[
create_simple_user_deployment("simple-deployment-one"),
],
)
),
DagsterHelmValues.construct(
dagsterUserDeployments=UserDeployments(
enabled=True,
enableSubchart=True,
deployments=[
create_complex_user_deployment("complex-deployment-one"),
create_complex_user_deployment("complex-deployment-two"),
create_simple_user_deployment("simple-deployment-three"),
],
)
),
],
ids=[
"single user deployment enabled",
"multiple user deployments enabled",
],
)
def test_user_deployment_checksum_unchanged(helm_values: DagsterHelmValues, template: HelmTemplate):
pre_upgrade_templates = template.render(helm_values)
post_upgrade_templates = template.render(helm_values)
# User deployment templates with the same Helm values should not redeploy in a Helm upgrade
for pre_upgrade_user_deployment, post_upgrade_user_deployment in zip(
pre_upgrade_templates, post_upgrade_templates
):
pre_upgrade_checksum = pre_upgrade_user_deployment.spec.template.metadata.annotations[
"checksum/dagster-user-deployment"
]
post_upgrade_checksum = post_upgrade_user_deployment.spec.template.metadata.annotations[
"checksum/dagster-user-deployment"
]
assert pre_upgrade_checksum == post_upgrade_checksum
def test_user_deployment_checksum_changes(template: HelmTemplate):
pre_upgrade_helm_values = DagsterHelmValues.construct(
dagsterUserDeployments=UserDeployments(
enabled=True,
enableSubchart=True,
deployments=[
create_simple_user_deployment("deployment-one"),
create_simple_user_deployment("deployment-two"),
],
)
)
post_upgrade_helm_values = DagsterHelmValues.construct(
dagsterUserDeployments=UserDeployments(
enabled=True,
enableSubchart=True,
deployments=[
create_complex_user_deployment("deployment-one"),
create_complex_user_deployment("deployment-two"),
],
)
)
pre_upgrade_templates = template.render(pre_upgrade_helm_values)
post_upgrade_templates = template.render(post_upgrade_helm_values)
# User deployment templates with the same Helm values should not redeploy in a Helm upgrade
for pre_upgrade_user_deployment, post_upgrade_user_deployment in zip(
pre_upgrade_templates, post_upgrade_templates
):
pre_upgrade_checksum = pre_upgrade_user_deployment.spec.template.metadata.annotations[
"checksum/dagster-user-deployment"
]
post_upgrade_checksum = post_upgrade_user_deployment.spec.template.metadata.annotations[
"checksum/dagster-user-deployment"
]
assert pre_upgrade_checksum != post_upgrade_checksum
@pytest.mark.parametrize("enabled", [True, False])
def test_startup_probe_enabled(template: HelmTemplate, enabled: bool):
deployment = create_simple_user_deployment("foo")
deployment.startupProbe = kubernetes.StartupProbe.construct(enabled=enabled)
helm_values = DagsterHelmValues.construct(
dagsterUserDeployments=UserDeployments.construct(deployments=[deployment])
)
dagster_user_deployment = template.render(helm_values)
assert len(dagster_user_deployment) == 1
dagster_user_deployment = dagster_user_deployment[0]
assert len(dagster_user_deployment.spec.template.spec.containers) == 1
container = dagster_user_deployment.spec.template.spec.containers[0]
assert (container.startup_probe is not None) == enabled
def test_startup_probe_exec(template: HelmTemplate):
deployment = create_simple_user_deployment("foo")
deployment.startupProbe = kubernetes.StartupProbe.construct(
enabled=True, exec=dict(command=["my", "command"])
)
helm_values = DagsterHelmValues.construct(
dagsterUserDeployments=UserDeployments.construct(deployments=[deployment])
)
dagster_user_deployment = template.render(helm_values)
assert len(dagster_user_deployment) == 1
dagster_user_deployment = dagster_user_deployment[0]
assert len(dagster_user_deployment.spec.template.spec.containers) == 1
container = dagster_user_deployment.spec.template.spec.containers[0]
assert container.startup_probe._exec.command == [ # pylint:disable=protected-access
"my",
"command",
]
def test_startup_probe_default_exec(template: HelmTemplate):
deployment = create_simple_user_deployment("foo")
deployment.startupProbe = kubernetes.StartupProbe.construct(enabled=True)
helm_values = DagsterHelmValues.construct(
dagsterUserDeployments=UserDeployments.construct(deployments=[deployment])
)
dagster_user_deployment = template.render(helm_values)
assert len(dagster_user_deployment) == 1
dagster_user_deployment = dagster_user_deployment[0]
assert len(dagster_user_deployment.spec.template.spec.containers) == 1
container = dagster_user_deployment.spec.template.spec.containers[0]
assert container.startup_probe._exec.command == [ # pylint: disable=protected-access
"dagster",
"api",
"grpc-health-check",
"-p",
str(deployment.port),
]
@pytest.mark.parametrize("chart_version", ["0.11.0", "0.11.1"])
def test_user_deployment_default_image_tag_is_chart_version(
template: HelmTemplate, chart_version: str
):
helm_values = DagsterHelmValues.construct()
user_deployments = template.render(helm_values, chart_version=chart_version)
assert len(user_deployments) == 1
image = user_deployments[0].spec.template.spec.containers[0].image
_, image_tag = image.split(":")
assert image_tag == chart_version
def test_user_deployment_image(template: HelmTemplate):
deployment = create_simple_user_deployment("foo")
helm_values = DagsterHelmValues.construct(
dagsterUserDeployments=UserDeployments(
enabled=True,
enableSubchart=True,
deployments=[deployment],
)
)
user_deployments = template.render(helm_values)
assert len(user_deployments) == 1
image = user_deployments[0].spec.template.spec.containers[0].image
image_name, image_tag = image.split(":")
assert image_name == deployment.image.repository
assert image_tag == deployment.image.tag
| 36.634529
| 100
| 0.672624
|
6787b74298f1830413742e591bfd17b63b3bdaff
| 430
|
py
|
Python
|
module/elasticsearch/cursor.py
|
KentWangYQ/mongo2es
|
8952640e8ac3f2b1aa6845082fce04b7c4f7bd1e
|
[
"Apache-2.0"
] | 5
|
2018-12-24T10:45:56.000Z
|
2019-07-29T07:26:28.000Z
|
module/elasticsearch/cursor.py
|
KentWangYQ/mongo2es
|
8952640e8ac3f2b1aa6845082fce04b7c4f7bd1e
|
[
"Apache-2.0"
] | null | null | null |
module/elasticsearch/cursor.py
|
KentWangYQ/mongo2es
|
8952640e8ac3f2b1aa6845082fce04b7c4f7bd1e
|
[
"Apache-2.0"
] | 2
|
2019-07-30T06:27:49.000Z
|
2021-09-24T08:21:52.000Z
|
# -*- coding: utf-8 -*-
class Cursor(object):
def __init__(self, total=0, limit=100, skip=0, count=0, filter=None, projection=None, pop_fields=None,
pipeline=None):
self.total = total
self.limit = limit
self.skip = skip
self.count = count
self.filter = filter
self.projection = projection
self.pop_fields = pop_fields
self.pipeline = pipeline
| 28.666667
| 106
| 0.593023
|
b8f8e3968916d639a8a7427a3f0772d955054fc0
| 49,540
|
py
|
Python
|
cms/tests/placeholder.py
|
intgr/django-cms
|
92edf033ccc0938e41f3752935516572c3623695
|
[
"BSD-3-Clause"
] | 1
|
2015-09-28T10:07:41.000Z
|
2015-09-28T10:07:41.000Z
|
cms/tests/placeholder.py
|
intgr/django-cms
|
92edf033ccc0938e41f3752935516572c3623695
|
[
"BSD-3-Clause"
] | null | null | null |
cms/tests/placeholder.py
|
intgr/django-cms
|
92edf033ccc0938e41f3752935516572c3623695
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import with_statement
import itertools
from cms.toolbar.toolbar import CMSToolbar
from sekizai.context import SekizaiContext
import warnings
from django.conf import settings
from django.contrib import admin
from django.contrib.auth.models import Permission
from django.contrib.messages.storage import default_storage
from django.core.cache import cache
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.db import models
from django.http import HttpResponseForbidden, HttpResponse
from django.template import TemplateSyntaxError, Template
from django.template.context import Context, RequestContext
from django.test import TestCase
from django.utils.numberformat import format
from djangocms_link.cms_plugins import LinkPlugin
from djangocms_text_ckeditor.cms_plugins import TextPlugin
from djangocms_text_ckeditor.models import Text
from cms import constants
from cms.api import add_plugin, create_page, create_title
from cms.exceptions import DuplicatePlaceholderWarning
from cms.models.fields import PlaceholderField
from cms.models.placeholdermodel import Placeholder
from cms.plugin_pool import plugin_pool
from cms.plugin_rendering import render_placeholder
from cms.admin.placeholderadmin import PlaceholderAdmin, PlaceholderAdminMixin
from cms.utils.compat.tests import UnittestCompatMixin
from cms.test_utils.fixtures.fakemlng import FakemlngFixtures
from cms.test_utils.project.fakemlng.models import Translations
from cms.test_utils.project.placeholderapp.models import (
Example1,
TwoPlaceholderExample,
DynamicPlaceholderSlotExample,
MultilingualExample1
)
from cms.test_utils.testcases import CMSTestCase
from cms.test_utils.util.context_managers import (SettingsOverride, UserLoginContext)
from cms.test_utils.util.mock import AttributeObject
from cms.utils.compat.dj import force_unicode
from cms.utils.placeholder import PlaceholderNoAction, MLNGPlaceholderActions, get_placeholder_conf
from cms.utils.plugins import get_placeholders, assign_plugins
from cms.compat import get_user_model
from cms.test_utils.project.objectpermissionsapp.models import UserObjectPermission
class PlaceholderTestCase(CMSTestCase, UnittestCompatMixin):
def setUp(self):
u = self._create_user("test", True, True)
self._login_context = self.login_user_context(u)
self._login_context.__enter__()
def tearDown(self):
self._login_context.__exit__(None, None, None)
def test_placeholder_scanning_extend(self):
placeholders = get_placeholders('placeholder_tests/test_one.html')
self.assertEqual(sorted(placeholders), sorted([u'new_one', u'two', u'three']))
def test_placeholder_scanning_sekizai_extend(self):
placeholders = get_placeholders('placeholder_tests/test_one_sekizai.html')
self.assertEqual(sorted(placeholders), sorted([u'new_one', u'two', u'three']))
def test_placeholder_scanning_include(self):
placeholders = get_placeholders('placeholder_tests/test_two.html')
self.assertEqual(sorted(placeholders), sorted([u'child', u'three']))
def test_placeholder_scanning_double_extend(self):
placeholders = get_placeholders('placeholder_tests/test_three.html')
self.assertEqual(sorted(placeholders), sorted([u'new_one', u'two', u'new_three']))
def test_placeholder_scanning_sekizai_double_extend(self):
placeholders = get_placeholders('placeholder_tests/test_three_sekizai.html')
self.assertEqual(sorted(placeholders), sorted([u'new_one', u'two', u'new_three']))
def test_placeholder_scanning_complex(self):
placeholders = get_placeholders('placeholder_tests/test_four.html')
self.assertEqual(sorted(placeholders), sorted([u'new_one', u'child', u'four']))
def test_placeholder_scanning_super(self):
placeholders = get_placeholders('placeholder_tests/test_five.html')
self.assertEqual(sorted(placeholders), sorted([u'one', u'extra_one', u'two', u'three']))
def test_placeholder_scanning_nested(self):
placeholders = get_placeholders('placeholder_tests/test_six.html')
self.assertEqual(sorted(placeholders), sorted([u'new_one', u'new_two', u'new_three']))
def test_placeholder_scanning_duplicate(self):
placeholders = self.assertWarns(DuplicatePlaceholderWarning,
'Duplicate {% placeholder "one" %} in template placeholder_tests/test_seven.html.',
get_placeholders, 'placeholder_tests/test_seven.html')
self.assertEqual(sorted(placeholders), sorted([u'one']))
def test_placeholder_scanning_extend_outside_block(self):
placeholders = get_placeholders('placeholder_tests/outside.html')
self.assertEqual(sorted(placeholders), sorted([u'new_one', u'two', u'base_outside']))
def test_placeholder_scanning_sekizai_extend_outside_block(self):
placeholders = get_placeholders('placeholder_tests/outside_sekizai.html')
self.assertEqual(sorted(placeholders), sorted([u'new_one', u'two', u'base_outside']))
def test_placeholder_scanning_extend_outside_block_nested(self):
placeholders = get_placeholders('placeholder_tests/outside_nested.html')
self.assertEqual(sorted(placeholders), sorted([u'new_one', u'two', u'base_outside']))
def test_placeholder_scanning_sekizai_extend_outside_block_nested(self):
placeholders = get_placeholders('placeholder_tests/outside_nested_sekizai.html')
self.assertEqual(sorted(placeholders), sorted([u'new_one', u'two', u'base_outside']))
def test_fieldsets_requests(self):
response = self.client.get(reverse('admin:placeholderapp_example1_add'))
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse('admin:placeholderapp_twoplaceholderexample_add'))
self.assertEqual(response.status_code, 200)
def test_page_only_plugins(self):
ex = Example1(
char_1='one',
char_2='two',
char_3='tree',
char_4='four'
)
ex.save()
response = self.client.get(reverse('admin:placeholderapp_example1_change', args=(ex.pk,)))
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'InheritPagePlaceholderPlugin')
def test_inter_placeholder_plugin_move(self):
ex = TwoPlaceholderExample(
char_1='one',
char_2='two',
char_3='tree',
char_4='four'
)
ex.save()
ph1 = ex.placeholder_1
ph2 = ex.placeholder_2
ph1_pl1 = add_plugin(ph1, TextPlugin, 'en', body='ph1 plugin1').cmsplugin_ptr
ph1_pl2 = add_plugin(ph1, TextPlugin, 'en', body='ph1 plugin2').cmsplugin_ptr
ph1_pl3 = add_plugin(ph1, TextPlugin, 'en', body='ph1 plugin3').cmsplugin_ptr
ph2_pl1 = add_plugin(ph2, TextPlugin, 'en', body='ph2 plugin1').cmsplugin_ptr
ph2_pl2 = add_plugin(ph2, TextPlugin, 'en', body='ph2 plugin2').cmsplugin_ptr
ph2_pl3 = add_plugin(ph2, TextPlugin, 'en', body='ph2 plugin3').cmsplugin_ptr
response = self.client.post(reverse('admin:placeholderapp_twoplaceholderexample_move_plugin'), {
'placeholder_id': str(ph2.pk),
'plugin_id': str(ph1_pl2.pk),
'plugin_order[]': [str(p.pk) for p in [ph2_pl3, ph2_pl1, ph2_pl2, ph1_pl2]]
})
self.assertEqual(response.status_code, 200)
self.assertEqual([ph1_pl1, ph1_pl3], list(ph1.cmsplugin_set.order_by('position')))
self.assertEqual([ph2_pl3, ph2_pl1, ph2_pl2, ph1_pl2, ], list(ph2.cmsplugin_set.order_by('position')))
def test_nested_plugin_escapejs(self):
"""
Checks #1366 error condition.
When adding/editing a plugin whose icon_src() method returns a URL
containing an hyphen, the hyphen is escaped by django escapejs resulting
in a incorrect URL
"""
with SettingsOverride(CMS_PERMISSION=False):
ex = Example1(
char_1='one',
char_2='two',
char_3='tree',
char_4='four'
)
ex.save()
ph1 = ex.placeholder
###
# add the test plugin
###
test_plugin = add_plugin(ph1, u"EmptyPlugin", u"en")
test_plugin.save()
pl_url = "%sedit-plugin/%s/" % (
reverse('admin:placeholderapp_example1_change', args=(ex.pk,)),
test_plugin.pk)
response = self.client.post(pl_url, {})
self.assertContains(response, "CMS.API.Helpers.reloadBrowser")
def test_nested_plugin_escapejs_page(self):
"""
Sibling test of the above, on a page.
#1366 does not apply to placeholder defined in a page
"""
with SettingsOverride(CMS_PERMISSION=False):
page = create_page('page', 'col_two.html', 'en')
ph1 = page.placeholders.get(slot='col_left')
###
# add the test plugin
###
test_plugin = add_plugin(ph1, u"EmptyPlugin", u"en")
test_plugin.save()
pl_url = "%sedit-plugin/%s/" % (
reverse('admin:cms_page_change', args=(page.pk,)),
test_plugin.pk)
response = self.client.post(pl_url, {})
self.assertContains(response, "CMS.API.Helpers.reloadBrowser")
def test_placeholder_scanning_fail(self):
self.assertRaises(TemplateSyntaxError, get_placeholders, 'placeholder_tests/test_eleven.html')
def test_placeholder_tag(self):
template = Template("{% load cms_tags %}{% render_placeholder placeholder %}")
ctx = Context()
self.assertEqual(template.render(ctx), "")
request = self.get_request('/')
rctx = RequestContext(request)
self.assertEqual(template.render(rctx), "")
placeholder = Placeholder.objects.create(slot="test")
rctx['placeholder'] = placeholder
self.assertEqual(template.render(rctx), "")
self.assertEqual(placeholder.cmsplugin_set.count(), 0)
add_plugin(placeholder, "TextPlugin", settings.LANGUAGES[0][0], body="test")
self.assertEqual(placeholder.cmsplugin_set.count(), 1)
rctx = RequestContext(request)
placeholder = self.reload(placeholder)
rctx['placeholder'] = placeholder
self.assertEqual(template.render(rctx).strip(), "test")
def test_placeholder_tag_language(self):
template = Template("{% load cms_tags %}{% render_placeholder placeholder language language %}")
placeholder = Placeholder.objects.create(slot="test")
add_plugin(placeholder, "TextPlugin", 'en', body="English")
add_plugin(placeholder, "TextPlugin", 'de', body="Deutsch")
request = self.get_request('/')
rctx = RequestContext(request)
rctx['placeholder'] = placeholder
rctx['language'] = 'en'
self.assertEqual(template.render(rctx).strip(), "English")
del placeholder._plugins_cache
rctx['language'] = 'de'
self.assertEqual(template.render(rctx).strip(), "Deutsch")
def test_get_placeholder_conf(self):
TEST_CONF = {
'main': {
'name': 'main content',
'plugins': ['TextPlugin', 'LinkPlugin'],
'default_plugins':[
{
'plugin_type':'TextPlugin',
'values':{
'body':'<p>Some default text</p>'
},
},
],
},
'layout/home.html main': {
'name': u'main content with FilerImagePlugin and limit',
'plugins': ['TextPlugin', 'FilerImagePlugin', 'LinkPlugin',],
'inherit':'main',
'limits': {'global': 1,},
},
'layout/other.html main': {
'name': u'main content with FilerImagePlugin and no limit',
'inherit':'layout/home.html main',
'limits': {},
},
}
with SettingsOverride(CMS_PLACEHOLDER_CONF=TEST_CONF):
#test no inheritance
returned = get_placeholder_conf('plugins', 'main')
self.assertEqual(returned, TEST_CONF['main']['plugins'])
#test no inherited value with inheritance enabled
returned = get_placeholder_conf('plugins', 'main', 'layout/home.html')
self.assertEqual(returned, TEST_CONF['layout/home.html main']['plugins'])
#test direct inherited value
returned = get_placeholder_conf('plugins', 'main', 'layout/other.html')
self.assertEqual(returned, TEST_CONF['layout/home.html main']['plugins'])
#test grandparent inherited value
returned = get_placeholder_conf('default_plugins', 'main', 'layout/other.html')
self.assertEqual(returned, TEST_CONF['main']['default_plugins'])
def test_placeholder_context_leaking(self):
TEST_CONF = {'test': {'extra_context': {'width': 10}}}
ph = Placeholder.objects.create(slot='test')
class NoPushPopContext(Context):
def push(self):
pass
pop = push
context = NoPushPopContext()
context['request'] = self.get_request()
with SettingsOverride(CMS_PLACEHOLDER_CONF=TEST_CONF):
render_placeholder(ph, context)
self.assertTrue('width' in context)
self.assertEqual(context['width'], 10)
ph.render(context, None)
self.assertTrue('width' in context)
self.assertEqual(context['width'], 10)
def test_placeholder_scanning_nested_super(self):
placeholders = get_placeholders('placeholder_tests/nested_super_level1.html')
self.assertEqual(sorted(placeholders), sorted([u'level1', u'level2', u'level3', u'level4']))
def test_placeholder_field_no_related_name(self):
self.assertRaises(ValueError, PlaceholderField, 'placeholder', related_name='+')
def test_placeholder_field_valid_slotname(self):
self.assertRaises(ImproperlyConfigured, PlaceholderField, 10)
def test_placeholder_field_dynamic_slot_generation(self):
instance = DynamicPlaceholderSlotExample.objects.create(char_1='slot1', char_2='slot2')
self.assertEqual(instance.char_1, instance.placeholder_1.slot)
self.assertEqual(instance.char_2, instance.placeholder_2.slot)
def test_placeholder_field_dynamic_slot_update(self):
instance = DynamicPlaceholderSlotExample.objects.create(char_1='slot1', char_2='slot2')
# Plugin counts
old_placeholder_1_plugin_count = len(instance.placeholder_1.get_plugins())
old_placeholder_2_plugin_count = len(instance.placeholder_2.get_plugins())
# Switch around the slot names
instance.char_1, instance.char_2 = instance.char_2, instance.char_1
# Store the ids before save, to test that a new placeholder is NOT created.
placeholder_1_id = instance.placeholder_1.pk
placeholder_2_id = instance.placeholder_2.pk
# Save instance
instance.save()
current_placeholder_1_plugin_count = len(instance.placeholder_1.get_plugins())
current_placeholder_2_plugin_count = len(instance.placeholder_2.get_plugins())
# Now test that the placeholder slots have changed
self.assertEqual(instance.char_2, 'slot1')
self.assertEqual(instance.char_1, 'slot2')
# Test that a new placeholder was never created
self.assertEqual(instance.placeholder_1.pk, placeholder_1_id)
self.assertEqual(instance.placeholder_2.pk, placeholder_2_id)
# And test the plugin counts remain the same
self.assertEqual(old_placeholder_1_plugin_count, current_placeholder_1_plugin_count)
self.assertEqual(old_placeholder_2_plugin_count, current_placeholder_2_plugin_count)
def test_plugins_language_fallback(self):
""" Tests language_fallback placeholder configuration """
page_en = create_page('page_en', 'col_two.html', 'en')
title_de = create_title("de", "page_de", page_en)
placeholder_en = page_en.placeholders.get(slot='col_left')
placeholder_de = title_de.page.placeholders.get(slot='col_left')
add_plugin(placeholder_en, TextPlugin, 'en', body='en body')
class NoPushPopContext(SekizaiContext):
def push(self):
pass
pop = push
context_en = NoPushPopContext()
context_en['request'] = self.get_request(language="en", page=page_en)
context_de = NoPushPopContext()
context_de['request'] = self.get_request(language="de", page=page_en)
# First test the default (non-fallback) behavior)
## English page should have the text plugin
content_en = render_placeholder(placeholder_en, context_en)
self.assertRegexpMatches(content_en, "^en body$")
## Deutsch page should have no text
content_de = render_placeholder(placeholder_de, context_de)
self.assertNotRegex(content_de, "^en body$")
self.assertEqual(len(content_de), 0)
conf = {
'col_left': {
'language_fallback': True,
},
}
with SettingsOverride(CMS_PLACEHOLDER_CONF=conf):
## Deutsch page should have no text
del(placeholder_de._plugins_cache)
cache.clear()
content_de = render_placeholder(placeholder_de, context_de)
self.assertRegexpMatches(content_de, "^en body$")
context_de2 = NoPushPopContext()
request = self.get_request(language="de", page=page_en)
request.user = self.get_superuser()
request.toolbar = CMSToolbar(request)
request.toolbar.edit_mode = True
context_de2['request'] = request
del(placeholder_de._plugins_cache)
cache.clear()
content_de2 = render_placeholder(placeholder_de, context_de2)
self.assertFalse("en body" in content_de2)
# remove the cached plugins instances
del(placeholder_de._plugins_cache)
cache.clear()
# Then we add a plugin to check for proper rendering
add_plugin(placeholder_de, TextPlugin, 'de', body='de body')
content_de = render_placeholder(placeholder_de, context_de)
self.assertRegexpMatches(content_de, "^de body$")
def test_plugins_non_default_language_fallback(self):
""" Tests language_fallback placeholder configuration """
page_en = create_page('page_en', 'col_two.html', 'en')
create_title("de", "page_de", page_en)
placeholder_en = page_en.placeholders.get(slot='col_left')
placeholder_de = page_en.placeholders.get(slot='col_left')
add_plugin(placeholder_de, TextPlugin, 'de', body='de body')
class NoPushPopContext(Context):
def push(self):
pass
pop = push
context_en = NoPushPopContext()
context_en['request'] = self.get_request(language="en", page=page_en)
context_de = NoPushPopContext()
context_de['request'] = self.get_request(language="de", page=page_en)
# First test the default (non-fallback) behavior)
## Deutsch page should have the text plugin
content_de = render_placeholder(placeholder_en, context_de)
self.assertRegexpMatches(content_de, "^de body$")
del(placeholder_en._plugins_cache)
cache.clear()
## English page should have no text
content_en = render_placeholder(placeholder_en, context_en)
self.assertNotRegex(content_en, "^de body$")
self.assertEqual(len(content_en), 0)
del(placeholder_en._plugins_cache)
cache.clear()
conf = {
'col_left': {
'language_fallback': True,
},
}
with SettingsOverride(CMS_PLACEHOLDER_CONF=conf):
## English page should have deutsch text
content_en = render_placeholder(placeholder_en, context_en)
self.assertRegexpMatches(content_en, "^de body$")
# remove the cached plugins instances
del(placeholder_en._plugins_cache)
cache.clear()
# Then we add a plugin to check for proper rendering
add_plugin(placeholder_en, TextPlugin, 'en', body='en body')
content_en = render_placeholder(placeholder_en, context_en)
self.assertRegexpMatches(content_en, "^en body$")
def test_plugins_discarded_with_language_fallback(self):
"""
Tests side effect of language fallback: if fallback enabled placeholder
existed, it discards all other existing plugins
"""
page_en = create_page('page_en', 'col_two.html', 'en')
create_title("de", "page_de", page_en)
placeholder_sidebar_en = page_en.placeholders.get(slot='col_sidebar')
placeholder_en = page_en.placeholders.get(slot='col_left')
add_plugin(placeholder_sidebar_en, TextPlugin, 'en', body='en body')
class NoPushPopContext(Context):
def push(self):
pass
pop = push
context_en = NoPushPopContext()
context_en['request'] = self.get_request(language="en", page=page_en)
conf = {
'col_left': {
'language_fallback': True,
},
}
with SettingsOverride(CMS_PLACEHOLDER_CONF=conf):
# call assign plugins first, as this is what is done in real cms life
# for all placeholders in a page at once
assign_plugins(context_en['request'],
[placeholder_sidebar_en, placeholder_en], 'col_two.html')
# if the normal, non fallback enabled placeholder still has content
content_en = render_placeholder(placeholder_sidebar_en, context_en)
self.assertRegexpMatches(content_en, "^en body$")
# remove the cached plugins instances
del(placeholder_sidebar_en._plugins_cache)
cache.clear()
def test_plugins_prepopulate(self):
""" Tests prepopulate placeholder configuration """
class NoPushPopContext(Context):
def push(self):
pass
pop = push
conf = {
'col_left': {
'default_plugins' : [
{
'plugin_type':'TextPlugin',
'values':{'body':'<p>en default body 1</p>'},
},
{
'plugin_type':'TextPlugin',
'values':{'body':'<p>en default body 2</p>'},
},
]
},
}
with SettingsOverride(CMS_PLACEHOLDER_CONF=conf):
page = create_page('page_en', 'col_two.html', 'en')
placeholder = page.placeholders.get(slot='col_left')
context = NoPushPopContext()
context['request'] = self.get_request(language="en", page=page)
# Our page should have "en default body 1" AND "en default body 2"
content = render_placeholder(placeholder, context)
self.assertRegexpMatches(content, "^<p>en default body 1</p>\s*<p>en default body 2</p>$")
def test_plugins_children_prepopulate(self):
"""
Validate a default textplugin with a nested default link plugin
"""
class NoPushPopContext(Context):
def push(self):
pass
pop = push
conf = {
'col_left': {
'default_plugins': [
{
'plugin_type': 'TextPlugin',
'values': {
'body': '<p>body %(_tag_child_1)s and %(_tag_child_2)s</p>'
},
'children': [
{
'plugin_type': 'LinkPlugin',
'values': {
'name': 'django',
'url': 'https://www.djangoproject.com/'
},
},
{
'plugin_type': 'LinkPlugin',
'values': {
'name': 'django-cms',
'url': 'https://www.django-cms.org'
},
},
]
},
]
},
}
with SettingsOverride(CMS_PLACEHOLDER_CONF=conf):
page = create_page('page_en', 'col_two.html', 'en')
placeholder = page.placeholders.get(slot='col_left')
context = NoPushPopContext()
context['request'] = self.get_request(language="en", page=page)
render_placeholder(placeholder, context)
plugins = placeholder.get_plugins_list()
self.assertEqual(len(plugins), 3)
self.assertEqual(plugins[0].plugin_type, 'TextPlugin')
self.assertEqual(plugins[1].plugin_type, 'LinkPlugin')
self.assertEqual(plugins[2].plugin_type, 'LinkPlugin')
self.assertTrue(plugins[1].parent == plugins[2].parent and plugins[1].parent == plugins[0])
def test_placeholder_pk_thousands_format(self):
page = create_page("page", "nav_playground.html", "en", published=True)
for placeholder in page.placeholders.all():
page.placeholders.remove(placeholder)
placeholder.pk += 1000
placeholder.save()
page.placeholders.add(placeholder)
page.reload()
for placeholder in page.placeholders.all():
add_plugin(placeholder, "TextPlugin", "en", body="body",
id=placeholder.pk)
with SettingsOverride(USE_THOUSAND_SEPARATOR=True, USE_L10N=True):
# Superuser
user = self.get_superuser()
self.client.login(username=getattr(user, get_user_model().USERNAME_FIELD),
password=getattr(user, get_user_model().USERNAME_FIELD))
response = self.client.get("/en/?edit")
for placeholder in page.placeholders.all():
self.assertContains(
response, "'placeholder_id': '%s'" % placeholder.pk)
self.assertNotContains(
response, "'placeholder_id': '%s'" % format(
placeholder.pk, ".", grouping=3, thousand_sep=","))
self.assertNotContains(
response, "'plugin_id': '%s'" % format(
placeholder.pk, ".", grouping=3, thousand_sep=","))
self.assertNotContains(
response, "'clipboard': '%s'" % format(
response.context['request'].toolbar.clipboard.pk, ".",
grouping=3, thousand_sep=","))
def test_placeholder_languages_model(self):
"""
Checks the retrieval of filled languages for a placeholder in a django
model
"""
avail_langs = set([u'en', u'de', u'fr'])
# Setup instance
ex = Example1(
char_1='one',
char_2='two',
char_3='tree',
char_4='four'
)
ex.save()
###
# add the test plugin
###
for lang in avail_langs:
add_plugin(ex.placeholder, u"EmptyPlugin", lang)
# reload instance from database
ex = Example1.objects.get(pk=ex.pk)
#get languages
langs = [lang['code'] for lang in ex.placeholder.get_filled_languages()]
self.assertEqual(avail_langs, set(langs))
def test_placeholder_languages_page(self):
"""
Checks the retrieval of filled languages for a placeholder in a django
model
"""
avail_langs = set([u'en', u'de', u'fr'])
# Setup instances
page = create_page('test page', 'col_two.html', u'en')
for lang in avail_langs:
if lang != u'en':
create_title(lang, 'test page %s' % lang, page)
placeholder = page.placeholders.get(slot='col_sidebar')
###
# add the test plugin
###
for lang in avail_langs:
add_plugin(placeholder, u"EmptyPlugin", lang)
# reload placeholder from database
placeholder = page.placeholders.get(slot='col_sidebar')
# get languages
langs = [lang['code'] for lang in placeholder.get_filled_languages()]
self.assertEqual(avail_langs, set(langs))
def test_deprecated_PlaceholderAdmin(self):
admin_site = admin.sites.AdminSite()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
pa = PlaceholderAdmin(Placeholder, admin_site)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, DeprecationWarning))
self.assertTrue("PlaceholderAdminMixin with admin.ModelAdmin" in str(w[-1].message))
self.assertIsInstance(pa, admin.ModelAdmin, 'PlaceholderAdmin not admin.ModelAdmin')
self.assertIsInstance(pa, PlaceholderAdminMixin, 'PlaceholderAdmin not PlaceholderAdminMixin')
class PlaceholderActionTests(FakemlngFixtures, CMSTestCase):
def test_placeholder_no_action(self):
actions = PlaceholderNoAction()
self.assertEqual(actions.get_copy_languages(), [])
self.assertFalse(actions.copy())
def test_mlng_placeholder_actions_get_copy_languages(self):
actions = MLNGPlaceholderActions()
fr = Translations.objects.get(language_code='fr')
de = Translations.objects.get(language_code='de')
en = Translations.objects.get(language_code='en')
fieldname = 'placeholder'
fr_copy_languages = actions.get_copy_languages(
fr.placeholder, Translations, fieldname
)
de_copy_languages = actions.get_copy_languages(
de.placeholder, Translations, fieldname
)
en_copy_languages = actions.get_copy_languages(
en.placeholder, Translations, fieldname
)
EN = ('en', 'English')
FR = ('fr', 'French')
self.assertEqual(set(fr_copy_languages), set([EN]))
self.assertEqual(set(de_copy_languages), set([EN, FR]))
self.assertEqual(set(en_copy_languages), set([FR]))
def test_mlng_placeholder_actions_copy(self):
actions = MLNGPlaceholderActions()
fr = Translations.objects.get(language_code='fr')
de = Translations.objects.get(language_code='de')
self.assertEqual(fr.placeholder.cmsplugin_set.count(), 1)
self.assertEqual(de.placeholder.cmsplugin_set.count(), 0)
new_plugins = actions.copy(de.placeholder, 'fr', 'placeholder', Translations, 'de')
self.assertEqual(len(new_plugins), 1)
de = self.reload(de)
fr = self.reload(fr)
self.assertEqual(fr.placeholder.cmsplugin_set.count(), 1)
self.assertEqual(de.placeholder.cmsplugin_set.count(), 1)
def test_mlng_placeholder_actions_empty_copy(self):
actions = MLNGPlaceholderActions()
fr = Translations.objects.get(language_code='fr')
de = Translations.objects.get(language_code='de')
self.assertEqual(fr.placeholder.cmsplugin_set.count(), 1)
self.assertEqual(de.placeholder.cmsplugin_set.count(), 0)
new_plugins = actions.copy(fr.placeholder, 'de', 'placeholder', Translations, 'fr')
self.assertEqual(len(new_plugins), 0)
de = self.reload(de)
fr = self.reload(fr)
self.assertEqual(fr.placeholder.cmsplugin_set.count(), 1)
self.assertEqual(de.placeholder.cmsplugin_set.count(), 0)
def test_mlng_placeholder_actions_no_placeholder(self):
actions = MLNGPlaceholderActions()
Translations.objects.filter(language_code='nl').update(placeholder=None)
de = Translations.objects.get(language_code='de')
nl = Translations.objects.get(language_code='nl')
self.assertEqual(nl.placeholder, None)
self.assertEqual(de.placeholder.cmsplugin_set.count(), 0)
okay = actions.copy(de.placeholder, 'nl', 'placeholder', Translations, 'de')
self.assertEqual(okay, False)
de = self.reload(de)
nl = self.reload(nl)
nl = Translations.objects.get(language_code='nl')
de = Translations.objects.get(language_code='de')
class PlaceholderModelTests(CMSTestCase):
def get_mock_user(self, superuser):
return AttributeObject(
is_superuser=superuser,
has_perm=lambda string: False,
)
def get_mock_request(self, superuser=True):
return AttributeObject(
superuser=superuser,
user=self.get_mock_user(superuser)
)
def test_check_placeholder_permissions_ok_for_superuser(self):
ph = Placeholder.objects.create(slot='test', default_width=300)
result = ph.has_change_permission(self.get_mock_request(True))
self.assertTrue(result)
def test_check_placeholder_permissions_nok_for_user(self):
ph = Placeholder.objects.create(slot='test', default_width=300)
result = ph.has_change_permission(self.get_mock_request(False))
self.assertFalse(result)
def test_check_unicode_rendering(self):
ph = Placeholder.objects.create(slot='test', default_width=300)
result = force_unicode(ph)
self.assertEqual(result, u'test')
def test_excercise_get_attached_model(self):
ph = Placeholder.objects.create(slot='test', default_width=300)
result = ph._get_attached_model()
self.assertEqual(result, None) # Simple PH - no model
def test_excercise_get_attached_field_name(self):
ph = Placeholder.objects.create(slot='test', default_width=300)
result = ph._get_attached_field_name()
self.assertEqual(result, None) # Simple PH - no field name
def test_excercise_get_attached_models_notplugins(self):
ex = Example1(
char_1='one',
char_2='two',
char_3='tree',
char_4='four'
)
ex.save()
ph = ex.placeholder
result = list(ph._get_attached_models())
self.assertEqual(result, [Example1]) # Simple PH - Example1 model
add_plugin(ph, TextPlugin, 'en', body='en body')
result = list(ph._get_attached_models())
self.assertEqual(result, [Example1]) # Simple PH still one Example1 model
def test_excercise_get_attached_fields_notplugins(self):
ex = Example1(
char_1='one',
char_2='two',
char_3='tree',
char_4='four',
)
ex.save()
ph = ex.placeholder
result = [f.name for f in list(ph._get_attached_fields())]
self.assertEqual(result, ['placeholder']) # Simple PH - placeholder field name
add_plugin(ph, TextPlugin, 'en', body='en body')
result = [f.name for f in list(ph._get_attached_fields())]
self.assertEqual(result, ['placeholder']) # Simple PH - still one placeholder field name
class PlaceholderAdminTestBase(CMSTestCase):
def get_placeholder(self):
return Placeholder.objects.create(slot='test')
def get_admin(self):
admin.autodiscover()
return admin.site._registry[Example1]
def get_post_request(self, data):
return self.get_request(post_data=data)
class PlaceholderAdminTest(PlaceholderAdminTestBase):
placeholderconf = {'test': {
'limits': {
'global': 2,
'TextPlugin': 1,
}
}
}
def test_global_limit(self):
placeholder = self.get_placeholder()
admin_instance = self.get_admin()
data = {
'plugin_type': 'LinkPlugin',
'placeholder_id': placeholder.pk,
'plugin_language': 'en',
}
superuser = self.get_superuser()
with UserLoginContext(self, superuser):
with SettingsOverride(CMS_PLACEHOLDER_CONF=self.placeholderconf):
request = self.get_post_request(data)
response = admin_instance.add_plugin(request) # first
self.assertEqual(response.status_code, 200)
response = admin_instance.add_plugin(request) # second
self.assertEqual(response.status_code, 200)
response = admin_instance.add_plugin(request) # third
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content, b"This placeholder already has the maximum number of plugins (2).")
def test_type_limit(self):
placeholder = self.get_placeholder()
admin_instance = self.get_admin()
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': placeholder.pk,
'plugin_language': 'en',
}
superuser = self.get_superuser()
with UserLoginContext(self, superuser):
with SettingsOverride(CMS_PLACEHOLDER_CONF=self.placeholderconf):
request = self.get_post_request(data)
response = admin_instance.add_plugin(request) # first
self.assertEqual(response.status_code, 200)
response = admin_instance.add_plugin(request) # second
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content,
b"This placeholder already has the maximum number (1) of allowed Text plugins.")
def test_global_limit_on_plugin_move(self):
admin_instance = self.get_admin()
superuser = self.get_superuser()
source_placeholder = Placeholder.objects.create(slot='source')
target_placeholder = self.get_placeholder()
data = {
'placeholder': source_placeholder,
'plugin_type': 'LinkPlugin',
'language': 'en',
}
plugin_1 = add_plugin(**data)
plugin_2 = add_plugin(**data)
plugin_3 = add_plugin(**data)
with UserLoginContext(self, superuser):
with SettingsOverride(CMS_PLACEHOLDER_CONF=self.placeholderconf):
request = self.get_post_request({'placeholder_id': target_placeholder.pk, 'plugin_id': plugin_1.pk})
response = admin_instance.move_plugin(request) # first
self.assertEqual(response.status_code, 200)
request = self.get_post_request({'placeholder_id': target_placeholder.pk, 'plugin_id': plugin_2.pk})
response = admin_instance.move_plugin(request) # second
self.assertEqual(response.status_code, 200)
request = self.get_post_request({'placeholder_id': target_placeholder.pk, 'plugin_id': plugin_3.pk})
response = admin_instance.move_plugin(request) # third
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content, b"This placeholder already has the maximum number of plugins (2).")
def test_type_limit_on_plugin_move(self):
admin_instance = self.get_admin()
superuser = self.get_superuser()
source_placeholder = Placeholder.objects.create(slot='source')
target_placeholder = self.get_placeholder()
data = {
'placeholder': source_placeholder,
'plugin_type': 'TextPlugin',
'language': 'en',
}
plugin_1 = add_plugin(**data)
plugin_2 = add_plugin(**data)
with UserLoginContext(self, superuser):
with SettingsOverride(CMS_PLACEHOLDER_CONF=self.placeholderconf):
request = self.get_post_request({'placeholder_id': target_placeholder.pk, 'plugin_id': plugin_1.pk})
response = admin_instance.move_plugin(request) # first
self.assertEqual(response.status_code, 200)
request = self.get_post_request({'placeholder_id': target_placeholder.pk, 'plugin_id': plugin_2.pk})
response = admin_instance.move_plugin(request) # second
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content,
b"This placeholder already has the maximum number (1) of allowed Text plugins.")
def test_edit_plugin_and_cancel(self):
placeholder = self.get_placeholder()
admin_instance = self.get_admin()
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': placeholder.pk,
'plugin_language': 'en',
}
superuser = self.get_superuser()
with UserLoginContext(self, superuser):
with SettingsOverride(CMS_PLACEHOLDER_CONF=self.placeholderconf):
request = self.get_post_request(data)
response = admin_instance.add_plugin(request)
self.assertEqual(response.status_code, 200)
plugin_id = int(str(response.content).split('edit-plugin/')[1].split("/")[0])
data = {
'body': 'Hello World',
}
request = self.get_post_request(data)
response = admin_instance.edit_plugin(request, plugin_id)
self.assertEqual(response.status_code, 200)
text_plugin = Text.objects.get(pk=plugin_id)
self.assertEqual('Hello World', text_plugin.body)
# edit again, but this time press cancel
data = {
'body': 'Hello World!!',
'_cancel': True,
}
request = self.get_post_request(data)
response = admin_instance.edit_plugin(request, plugin_id)
self.assertEqual(response.status_code, 200)
text_plugin = Text.objects.get(pk=plugin_id)
self.assertEqual('Hello World', text_plugin.body)
class PlaceholderPluginPermissionTests(PlaceholderAdminTestBase):
def _testuser(self):
User = get_user_model()
u = User(is_staff=True, is_active=True, is_superuser=False)
setattr(u, u.USERNAME_FIELD, "test")
u.set_password("test")
u.save()
return u
def _create_example(self):
ex = Example1(
char_1='one',
char_2='two',
char_3='tree',
char_4='four'
)
ex.save()
self._placeholder = ex.placeholder
self.example_object = ex
def _create_plugin(self):
self._plugin = add_plugin(self._placeholder, 'TextPlugin', 'en')
def _give_permission(self, user, model, permission_type, save=True):
codename = '%s_%s' % (permission_type, model._meta.object_name.lower())
user.user_permissions.add(Permission.objects.get(codename=codename))
def _delete_permission(self, user, model, permission_type, save=True):
codename = '%s_%s' % (permission_type, model._meta.object_name.lower())
user.user_permissions.remove(Permission.objects.get(codename=codename))
def _give_object_permission(self, user, object, permission_type, save=True):
codename = '%s_%s' % (permission_type, object.__class__._meta.object_name.lower())
UserObjectPermission.objects.assign_perm(codename, user=user, obj=object)
def _delete_object_permission(self, user, object, permission_type, save=True):
codename = '%s_%s' % (permission_type, object.__class__._meta.object_name.lower())
UserObjectPermission.objects.remove_perm(codename, user=user, obj=object)
def _post_request(self, user):
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': self._placeholder.pk,
'plugin_language': 'en',
}
request = self.get_post_request(data)
request.user = self.reload(user)
request._messages = default_storage(request)
return request
def test_plugin_add_requires_permissions(self):
"""User wants to add a plugin to the example app placeholder but has no permissions"""
self._test_plugin_action_requires_permissions('add')
def test_plugin_edit_requires_permissions(self):
"""User wants to edit a plugin to the example app placeholder but has no permissions"""
self._test_plugin_action_requires_permissions('change')
def _test_plugin_action_requires_permissions(self, key):
self._create_example()
if key == 'change':
self._create_plugin()
normal_guy = self._testuser()
admin_instance = self.get_admin()
# check all combinations of plugin, app and object permission
for perms in itertools.product(*[[False, True]]*3):
self._set_perms(normal_guy, [Text, Example1, self.example_object], perms, key)
request = self._post_request(normal_guy)
if key == 'add':
response = admin_instance.add_plugin(request)
elif key == 'change':
response = admin_instance.edit_plugin(request, self._plugin.id)
should_pass = perms[0] and (perms[1] or perms[2])
expected_status_code = HttpResponse.status_code if should_pass else HttpResponseForbidden.status_code
self.assertEqual(response.status_code, expected_status_code)
# cleanup
self._set_perms(normal_guy, [Text, Example1, self.example_object], (False,)*3, key)
def _set_perms(self, user, objects, perms, key):
for obj, perm in zip(objects, perms):
action = 'give' if perm else 'delete'
object_key = '_object' if isinstance(obj, models.Model) else ''
method_name = '_%s%s_permission' % (action, object_key)
getattr(self, method_name)(user, obj, key)
class PlaceholderConfTests(TestCase):
def test_get_all_plugins_single_page(self):
page = create_page('page', 'col_two.html', 'en')
placeholder = page.placeholders.get(slot='col_left')
conf = {
'col_two': {
'plugins': ['TextPlugin', 'LinkPlugin'],
},
'col_two.html col_left': {
'plugins': ['LinkPlugin'],
},
}
with SettingsOverride(CMS_PLACEHOLDER_CONF=conf):
plugins = plugin_pool.get_all_plugins(placeholder, page)
self.assertEqual(len(plugins), 1, plugins)
self.assertEqual(plugins[0], LinkPlugin)
def test_get_all_plugins_inherit(self):
parent = create_page('parent', 'col_two.html', 'en')
page = create_page('page', constants.TEMPLATE_INHERITANCE_MAGIC, 'en', parent=parent)
placeholder = page.placeholders.get(slot='col_left')
conf = {
'col_two': {
'plugins': ['TextPlugin', 'LinkPlugin'],
},
'col_two.html col_left': {
'plugins': ['LinkPlugin'],
},
}
with SettingsOverride(CMS_PLACEHOLDER_CONF=conf):
plugins = plugin_pool.get_all_plugins(placeholder, page)
self.assertEqual(len(plugins), 1, plugins)
self.assertEqual(plugins[0], LinkPlugin)
class PlaceholderI18NTest(CMSTestCase):
def _testuser(self):
User = get_user_model()
u = User(is_staff=True, is_active=True, is_superuser=True)
setattr(u, u.USERNAME_FIELD, "test")
u.set_password("test")
u.save()
return u
def test_hvad_tabs(self):
ex = MultilingualExample1(
char_1='one',
char_2='two',
)
ex.save()
self._testuser()
self.client.login(username='test', password='test')
response = self.client.get('/de/admin/placeholderapp/multilingualexample1/%d/' % ex.pk)
self.assertContains(response, '<input type="hidden" class="language_button selected" name="de" />')
def test_no_tabs(self):
ex = Example1(
char_1='one',
char_2='two',
char_3='one',
char_4='two',
)
ex.save()
self._testuser()
self.client.login(username='test', password='test')
response = self.client.get('/de/admin/placeholderapp/example1/%d/' % ex.pk)
self.assertNotContains(response, '<input type="hidden" class="language_button selected" name="de" />')
def test_placeholder_tabs(self):
ex = TwoPlaceholderExample(
char_1='one',
char_2='two',
char_3='one',
char_4='two',
)
ex.save()
self._testuser()
self.client.login(username='test', password='test')
response = self.client.get('/de/admin/placeholderapp/twoplaceholderexample/%d/' % ex.pk)
self.assertNotContains(response,
"""<input type="button" onclick="trigger_lang_button(this,'./?language=en');" class="language_button selected" id="debutton" name="en" value="English">""")
| 43.801945
| 186
| 0.628603
|
42302c2ca9b88636d9a09fbcc71c7c9dc33211c7
| 9,690
|
py
|
Python
|
src/launch_aggregation_term_evaluation.py
|
Jie317/WordEmbeddingsAnalysis
|
e412782aa135a452894b0ab1f0cc4e5df037a0d8
|
[
"Apache-2.0"
] | 1
|
2017-08-11T13:23:08.000Z
|
2017-08-11T13:23:08.000Z
|
src/launch_aggregation_term_evaluation.py
|
Jie317/WordEmbeddingsEvaluation
|
e412782aa135a452894b0ab1f0cc4e5df037a0d8
|
[
"Apache-2.0"
] | null | null | null |
src/launch_aggregation_term_evaluation.py
|
Jie317/WordEmbeddingsEvaluation
|
e412782aa135a452894b0ab1f0cc4e5df037a0d8
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/python
import glob, time, sys, os
import numpy as np
from sklearn.cross_validation import KFold
from tools.neural_network import simple_neural_network
from tools.tflearn_neural_network import tflearn_DNN_training
help_s = '''
A script to aggregate the models by applying a set of weights to each model. The weights are trained by a single neuron model which consists of an input layer and an output layer. The results is given by mean squared error, which are stored in the global result csv file besides the folder "details".
Inputs:
[1] directory of "details" which locates in the subdirectory of the timestamped folder under the "result" folder
For example, "../results/sch_single_neuron_0811211927__0811212032_complete/evaluation_complete/details/"
Attentioon: run "launch_computation_and_evaluation.py" first to generate this directory
[2] iteration number during the training
put 0 to apply the default value: 100000
[3] learning rate during the training
put 0 to apply the default value: 0.05
Version 0.1 by Jie He @LGI2P, EMA
'''
def get_sm(level): # return sample matrix and concepts number
# extract the vocabularies (terms and concepts)
voc_concepts = set()
voc_terms = set()
pairs_dict = {}
with open(details_dir + 'Level_%d/intersection.pair' % level) as com_pairs:
for l in com_pairs:
voc_terms.add(l.split()[0])
voc_concepts.add(l.split()[1])
pairs_dict[l.split()[0]] = l.split()[1]
print '\n\n>>>>>>>>>>Level %d\nvoc terms: %d\n' % (level,len(voc_terms)), voc_terms
print '\nvoc cncepts: %d\n' % len(voc_concepts), voc_concepts,'\n'
# extract all the original pair sims from all the models in this level
sims_all_models = {} # key is the model name, to store all the sim info for all the terms in all the models
for m in mns:
model_sims = {} # key is the pairs, value is the sims, for each model
with open(details_dir + 'Level_%d/%s_term_concepts.sims' % (level, m),'r') as f:
for line in f: model_sims[line.split('\t')[0]] = float(line.split('\t')[1])
sims_all_models[m] = model_sims
# extract sample items
sample_items = [] # matrix where the sample items are stored (pair, input and output)
for term in voc_terms:
for concept in voc_concepts:
# generate samples (the last element is output)
sample_unit = []
sample_unit = [term, concept, pairs_dict[term]] + [sims_all_models[m][term+' '+concept] for m in mns] # the order of values remains the same as that of model_names
# add the classification label(output)
if pairs_dict[term] == concept: sample_unit.append(1)
else: sample_unit.append(0)
sample_items.append(sample_unit)
# get sample matrix
sm_raw = [r[3:] for r in sample_items]
# convert to standard sample.
sm_std = []
cn = len(voc_concepts)
tn = len(voc_terms)
for i in xrange(tn):
sm_temp = sm_raw[i*cn:(i+1)*cn]
X = np.array(sm_temp, dtype='|S4').astype(np.float)[:, :-1]
X = X.ravel()
y = np.array(sm_temp, dtype='|S4').astype(np.float)[:, -1]
sm_std.append([X, y])
return sm_std, cn
def write_sample_items_to_file(cn, sample_items): # return none
fpa = details_dir+'Level_%d/samples.item' % level
with open(fpa,'w+') as output_samples:
output_samples.write('Term\tConcepts\tCorrect Concept\t')
for mn in mns: output_samples.write(mn+'\t')
output_samples.write('%d\n' % cn)
for l in sample_items:
for value in l: output_samples.write('%s\t' % value)
output_samples.write('\n')
def start_training_kfCV_for_TC_evalutation(level,k=3): # return weights matrix
kv = KFold(len(sm), n_folds=k, shuffle=True, random_state=None)
count = 0
test = []
test_w = []
test_m = {}
ws = []
for n in mns: test_m[n] = []
for tr_index, te_index in kv:
count += 1
print '\nLevel %d, K_fold %d\ntrain index:\n' % (level,count),tr_index, '\ntest index: \n', te_index
train_set = []
test_set = []
for tr in tr_index:
train_set.append(sm[tr])
for te in te_index:
test_set.append(sm[te])
X = np.array([r[0] for r in train_set])
y = np.array([r[1] for r in train_set])
X_t = np.array([r[0] for r in test_set])
y_t = np.array([r[1] for r in test_set])
weights = tflearn_DNN_training(X, y, mns)
#weights, _, _, _, _, _ = simple_neural_network(X, y, X_t, y_t, mns, it=it, alpha=alpha, hd=HIDDEN_DIM) ###### train the model and get the weights
#print "weights:\n",weights
weights = normalize(weights) # rescale the weights by Frobenius Norm to make them comparable between the 4 levels
#print "weights rescaled:\n",weights
ws.append(weights.T[0].tolist()) # add "[0]" to convert the 2-d array into a 1-d array
# test with and without the trained weights
'''test.append(test_TC_evaluation(X_t, y_t))
for i,n in enumerate(mns):
test_m[n].append(test_TC_evaluation(np.array(X_t[:,i]).reshape((len(X_t),1)),y_t))'''
#test_w.append(test_TC_evaluation(X_t, y_t, w=weights))
return ws, test, test_w, test_m
def test_TC_evaluation(X_t, y_t, w=None): # return test result (prediction ratio, or evaluation performance)
if w is None: w = [[1.] for n in range(len(X_t[0]))]
w_array = np.array(w)
correct_count = 0
#print 'Weights in test:\n',w_array
#print 'X_t:\n',X_t
sum_sims = X_t.dot(w_array)
#print 'Sum_sims:\n',sum_sims
for i in range(len(X_t)/cn):
t_tem = sum_sims[i*cn:(i+1)*cn]
c_tem = y_t[i*cn:(i+1)*cn]
#print np.argmax(t_tem)
if c_tem[np.argmax(t_tem)] == 1:
correct_count += 1
return [correct_count, len(X_t)/cn, float(correct_count)/(len(X_t)/cn)]
def normalize(v): # apply Frobenius Norm
norm=np.linalg.norm(v)
if norm==0: return v
return v/norm
def save_oprms():
g_re.write('\n>>>>Model Prediction Accuracy\n')
for e in head_pr_m: g_re.write(e+'\t')
g_re.write('\n')
for l in range(1,5):
h_row0 = [['Level %d' % l] for n in mns] + [['Average']]
h_row1 = [[n] for n in mns] + [['-']]
#print 'oprms:\n',oprms
oprms[l] = np.hstack((np.hstack((h_row0, h_row1)), np.vstack((oprms[l], np.mean(oprms[l],axis=0)))))
np.savetxt(g_re, oprms[l], delimiter='\t', fmt='%s')
def save_fold_details():
np.savetxt(g_re, np.array(ws), delimiter='\t', header='\n>>>>>>>>K-fold details: Level %d\nWeight matrix (K=3)' % level, fmt='%.5f')
np.savetxt(g_re, np.array(test), delimiter='\t', header='Test without weights', fmt='%.3f')
np.savetxt(g_re, np.array(test_w), delimiter='\t', header='Test with weights', fmt='%.3f')
for n in mns: np.savetxt(g_re, np.array(test_m[n]), delimiter='\t', header='Test for model %s' % n, fmt='%.3f')
if len(sys.argv) != 4:
print help_s
exit()
# parameters
it = 100000 # iterationn number to update the weights
alpha = 0.05 # learning rate to rescale the update value
HIDDEN_DIM = 1 # fixed to 1 to degrade the neural model to single neuron model
details_dir = sys.argv[1]
if sys.argv[2] != '0': it = int(sys.argv[2])
if sys.argv[3] != '0': alpha = float(sys.argv[3])
if not os.path.exists('../results/'): os.makedir('../results/')
global_result_path = glob.glob(details_dir[:-8]+'*evaluation_result.csv')[0]
print '\n---------------- Starting aggregation --------------\n'
# get model names (and fix their order by setting the variable type as list)
mns = []
for fpath in glob.glob(details_dir + 'Level_1/*term_concepts.sims'):
model_name = fpath.replace(details_dir + 'Level_1/', '').replace('_term_concepts.sims', '')
mns.append(model_name)
wm = [] # weights matrix (4 levels)
oprs = [] # original prediction accuracies (4 levels) ############## Meaningless to calculate the averages for original predictions in each level
prs = [] # prediction accuracies when applying trained weights (4 levels)
oprms = {} # original prediction accuracies for each model (4 levels)
g_re = open(global_result_path,'a+')
for level in range(1,5):
sm, cn = get_sm(level) # get sample matrix and concepts number in current level
ws, test, test_w, test_m = start_training_kfCV_for_TC_evalutation(level)
#print 'ws:\n',ws,type(ws)
wm.append(np.mean(ws,axis=0).tolist())
oprs.append(np.mean(test,axis=0).tolist())
prs.append(np.mean(test_w,axis=0).tolist())
oprms[level] = []
for n in mns: oprms[level].append(np.mean(test_m[n],axis=0).tolist())
#save_fold_details() # save the details of each fold in K-fold Cross Validation
# write conclusion tables to the result file
# make headers and titles to establish the table
h_wm = ['Model Name'] + mns
h_pr = ['Concept Level','Correct Prediction','Pairs Number','Prediction Accuracy']
h_pr_m = ['Concept Level','Model Name','Correct Prediction','Pairs Number','Prediction Accuracy']
h_row_pr = [['Level %d' % l] for l in range(1,5)]+[['Average']]
h_row = [['Level %d' % l] for l in range(1,5)]
h_row_pr_m = [[n] for n in mns]+[['Average']]
# calculate the averages and add headers and other infos to complete the table (to facilitate the write process)
wm_output = np.vstack((h_wm, np.hstack((h_row,wm))))
oprs_output = np.vstack((h_pr, np.hstack((h_row_pr,np.vstack((oprs,np.mean(oprs,axis=0)))))))
prs_output = np.vstack((h_pr, np.hstack((h_row_pr,np.vstack((prs,np.mean(prs,axis=0)))))))
g_re.write('\n>>>>>>>>>>>>>>>>\nTC evaluation task: local time %s\n' % time.strftime("%c"))
np.savetxt(g_re, wm_output, delimiter='\t', header='>>>>>>>>Conclusion (it=%d,alpha=%.2f)\n>>>>Weight matrix (K=3)' % (it,alpha), fmt='%s')
#np.savetxt(g_re, oprs_output, delimiter='\t', header='\n>>>>Original Prediction Accuracy', fmt='%s')
np.savetxt(g_re, prs_output, delimiter='\t', header='\n>>>>Prediction Accuracy With Trained Weights', fmt='%s')
save_oprms() # no need to run it every time unless the original data(samples) are changed
g_re.close()
print '\n---------------- Aggregation ended --------------\n'
| 41.234043
| 300
| 0.685346
|
d1d9907e9013d3d857f3263a78f6005026f33653
| 5,224
|
py
|
Python
|
tests/fixtures/auth0.py
|
techdragon/django-auth0-auth
|
418de8adee28a7dc079af1ed84985239b715a258
|
[
"BSD-2-Clause"
] | 4
|
2017-11-20T02:56:09.000Z
|
2019-06-27T13:20:54.000Z
|
tests/fixtures/auth0.py
|
techdragon/django-auth0-auth
|
418de8adee28a7dc079af1ed84985239b715a258
|
[
"BSD-2-Clause"
] | 1
|
2021-05-10T17:30:21.000Z
|
2021-05-10T17:30:21.000Z
|
tests/fixtures/auth0.py
|
techdragon/django-auth0-auth
|
418de8adee28a7dc079af1ed84985239b715a258
|
[
"BSD-2-Clause"
] | null | null | null |
import logging
import pytest
from test_app.models import Auth0User
from tests.utils.auth0 import (
create_auth0_users_and_confirm,
delete_all_auth0_users as delete_all_auth0_users_via_api,
delete_all_auth0_users_with_confirmation,
pause_and_confirm_total_auth0_users,
)
logger = logging.getLogger(__name__)
DELAY = 15
# TODO: Refactor/Remove these fixtures as I'm not using testing things the same way anymore.
@pytest.fixture(scope="class")
def one_auth0_user(request):
"""
Pytest fixture providing one Auth0 user for testing.
Create a new user in Auth0 and add it to the class of our requesting test case class at runtime.
This is a little different than most PyTest fixtures because are working around the fact that
we are using SeleniumTestCase for some tests, which as a subclass of TestCase cannot use normal
PyTest fixture based parametrization.
:param request:
:return:
"""
users = create_auth0_users_and_confirm(1)
request.cls.user = users[0]
@pytest.fixture(scope='function')
def delete_all_auth0_users():
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_via_api()
yield
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_via_api()
@pytest.fixture(scope='function')
def delete_all_django_users():
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
yield
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
@pytest.fixture(scope='function')
def cleanup_django_and_auth0():
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_via_api()
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
yield
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_via_api()
@pytest.fixture(scope='function')
def one_user():
number_of_users = 1
logger.info('Start of one_user() fixture.')
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_with_confirmation()
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
create_auth0_users_and_confirm(number_of_users)
pause_and_confirm_total_auth0_users(DELAY, number_of_users)
yield
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_with_confirmation()
logger.info('End of one_user() fixture.')
@pytest.fixture(scope='function')
def five_users():
number_of_users = 5
logger.info('Start of five_users() fixture.')
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_with_confirmation()
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
create_auth0_users_and_confirm(number_of_users)
pause_and_confirm_total_auth0_users(DELAY, number_of_users)
yield
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_with_confirmation()
logger.info('End of five_users() fixture.')
@pytest.fixture(scope='function')
def ten_users():
number_of_users = 10
logger.info('Start of ten_users() fixture.')
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_with_confirmation()
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
create_auth0_users_and_confirm(number_of_users)
pause_and_confirm_total_auth0_users(DELAY, number_of_users)
yield
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_with_confirmation()
logger.info('End of ten_users() fixture.')
@pytest.fixture(scope='function')
def with_33_auth0_users():
number_of_users = 33
logger.info('Start of ten_users() fixture.')
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_with_confirmation()
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
create_auth0_users_and_confirm(number_of_users)
pause_and_confirm_total_auth0_users(DELAY, number_of_users)
yield
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_with_confirmation()
logger.info('End of ten_users() fixture.')
@pytest.fixture(scope='function')
def with_100_auth0_users():
number_of_users = 100
logger.info('Start of ten_users() fixture.')
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_with_confirmation()
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
create_auth0_users_and_confirm(number_of_users)
pause_and_confirm_total_auth0_users(DELAY, number_of_users)
yield
logger.info('Deleting Django User objects.')
Auth0User.objects.all().delete()
logger.info('Deleting all auth0 users.')
delete_all_auth0_users_with_confirmation()
logger.info('End of ten_users() fixture.')
| 33.487179
| 100
| 0.744257
|
629d6ae4a3d4343c372d5b81ac57e23872abd030
| 696
|
py
|
Python
|
register/views/confirmation.py
|
Ajuajmal/heroku
|
f23aad8c392a273caf0da39cedeec4746ded29dc
|
[
"0BSD"
] | null | null | null |
register/views/confirmation.py
|
Ajuajmal/heroku
|
f23aad8c392a273caf0da39cedeec4746ded29dc
|
[
"0BSD"
] | null | null | null |
register/views/confirmation.py
|
Ajuajmal/heroku
|
f23aad8c392a273caf0da39cedeec4746ded29dc
|
[
"0BSD"
] | null | null | null |
from django.forms import Form
from django.urls import reverse
from register.models.attendee import Attendee
from register.views.core import RegisterStep
class ConfirmationView(RegisterStep):
title = 'Confirmation'
form_class = Form
template_name = 'register/page/confirmation.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
try:
context['attendee'] = self.request.user.attendee
except Attendee.DoesNotExist:
pass
return context
def get_success_url(self):
return reverse('wafer_user_profile',
kwargs={'username': self.request.user.username})
| 26.769231
| 71
| 0.679598
|
37395206fca18fff502ff574c63566be4b57318e
| 12,977
|
py
|
Python
|
rllib/utils/test_utils.py
|
AnesBenmerzoug/ray
|
5921e87ecd4e359fad60dab55f45855456d591e5
|
[
"Apache-2.0"
] | null | null | null |
rllib/utils/test_utils.py
|
AnesBenmerzoug/ray
|
5921e87ecd4e359fad60dab55f45855456d591e5
|
[
"Apache-2.0"
] | null | null | null |
rllib/utils/test_utils.py
|
AnesBenmerzoug/ray
|
5921e87ecd4e359fad60dab55f45855456d591e5
|
[
"Apache-2.0"
] | null | null | null |
import gym
import logging
import numpy as np
from ray.rllib.utils.framework import try_import_tf, try_import_torch
tf1, tf, tfv = try_import_tf()
if tf1:
eager_mode = None
try:
from tensorflow.python.eager.context import eager_mode
except (ImportError, ModuleNotFoundError):
pass
torch, _ = try_import_torch()
logger = logging.getLogger(__name__)
def framework_iterator(config=None,
frameworks=("tf2", "tf", "tfe", "torch"),
session=False):
"""An generator that allows for looping through n frameworks for testing.
Provides the correct config entries ("framework") as well
as the correct eager/non-eager contexts for tfe/tf.
Args:
config (Optional[dict]): An optional config dict to alter in place
depending on the iteration.
frameworks (Tuple[str]): A list/tuple of the frameworks to be tested.
Allowed are: "tf2", "tf", "tfe", "torch", and None.
session (bool): If True and only in the tf-case: Enter a tf.Session()
and yield that as second return value (otherwise yield (fw, None)).
Yields:
str: If enter_session is False:
The current framework ("tf2", "tf", "tfe", "torch") used.
Tuple(str, Union[None,tf.Session]: If enter_session is True:
A tuple of the current fw and the tf.Session if fw="tf".
"""
config = config or {}
frameworks = [frameworks] if isinstance(frameworks, str) else \
list(frameworks)
# Both tf2 and tfe present -> remove "tfe" or "tf2" depending on version.
if "tf2" in frameworks and "tfe" in frameworks:
frameworks.remove("tfe" if tfv == 2 else "tf2")
for fw in frameworks:
# Skip non-installed frameworks.
if fw == "torch" and not torch:
logger.warning(
"framework_iterator skipping torch (not installed)!")
continue
if fw != "torch" and not tf:
logger.warning("framework_iterator skipping {} (tf not "
"installed)!".format(fw))
continue
elif fw == "tfe" and not eager_mode:
logger.warning("framework_iterator skipping tf-eager (could not "
"import `eager_mode` from tensorflow.python)!")
continue
elif fw == "tf2" and tfv != 2:
logger.warning(
"framework_iterator skipping tf2.x (tf version is < 2.0)!")
continue
assert fw in ["tf2", "tf", "tfe", "torch", None]
# Do we need a test session?
sess = None
if fw == "tf" and session is True:
sess = tf1.Session()
sess.__enter__()
print("framework={}".format(fw))
config["framework"] = fw
eager_ctx = None
# Enable eager mode for tf2 and tfe.
if fw in ["tf2", "tfe"]:
eager_ctx = eager_mode()
eager_ctx.__enter__()
assert tf1.executing_eagerly()
# Make sure, eager mode is off.
elif fw == "tf":
assert not tf1.executing_eagerly()
yield fw if session is False else (fw, sess)
# Exit any context we may have entered.
if eager_ctx:
eager_ctx.__exit__(None, None, None)
elif sess:
sess.__exit__(None, None, None)
def check(x, y, decimals=5, atol=None, rtol=None, false=False):
"""
Checks two structures (dict, tuple, list,
np.array, float, int, etc..) for (almost) numeric identity.
All numbers in the two structures have to match up to `decimal` digits
after the floating point. Uses assertions.
Args:
x (any): The value to be compared (to the expectation: `y`). This
may be a Tensor.
y (any): The expected value to be compared to `x`. This must not
be a tf-Tensor, but may be a tfe/torch-Tensor.
decimals (int): The number of digits after the floating point up to
which all numeric values have to match.
atol (float): Absolute tolerance of the difference between x and y
(overrides `decimals` if given).
rtol (float): Relative tolerance of the difference between x and y
(overrides `decimals` if given).
false (bool): Whether to check that x and y are NOT the same.
"""
# A dict type.
if isinstance(x, dict):
assert isinstance(y, dict), \
"ERROR: If x is dict, y needs to be a dict as well!"
y_keys = set(x.keys())
for key, value in x.items():
assert key in y, \
"ERROR: y does not have x's key='{}'! y={}".format(key, y)
check(
value,
y[key],
decimals=decimals,
atol=atol,
rtol=rtol,
false=false)
y_keys.remove(key)
assert not y_keys, \
"ERROR: y contains keys ({}) that are not in x! y={}".\
format(list(y_keys), y)
# A tuple type.
elif isinstance(x, (tuple, list)):
assert isinstance(y, (tuple, list)),\
"ERROR: If x is tuple, y needs to be a tuple as well!"
assert len(y) == len(x),\
"ERROR: y does not have the same length as x ({} vs {})!".\
format(len(y), len(x))
for i, value in enumerate(x):
check(
value,
y[i],
decimals=decimals,
atol=atol,
rtol=rtol,
false=false)
# Boolean comparison.
elif isinstance(x, (np.bool_, bool)):
if false is True:
assert bool(x) is not bool(y), \
"ERROR: x ({}) is y ({})!".format(x, y)
else:
assert bool(x) is bool(y), \
"ERROR: x ({}) is not y ({})!".format(x, y)
# Nones or primitives.
elif x is None or y is None or isinstance(x, (str, int)):
if false is True:
assert x != y, "ERROR: x ({}) is the same as y ({})!".format(x, y)
else:
assert x == y, \
"ERROR: x ({}) is not the same as y ({})!".format(x, y)
# String comparison.
elif hasattr(x, "dtype") and x.dtype == np.object:
try:
np.testing.assert_array_equal(x, y)
if false is True:
assert False, \
"ERROR: x ({}) is the same as y ({})!".format(x, y)
except AssertionError as e:
if false is False:
raise e
# Everything else (assume numeric or tf/torch.Tensor).
else:
if tf1 is not None:
# y should never be a Tensor (y=expected value).
if isinstance(y, tf1.Tensor):
# In eager mode, numpyize tensors.
if tf.executing_eagerly():
y = y.numpy()
else:
raise ValueError(
"`y` (expected value) must not be a Tensor. "
"Use numpy.ndarray instead")
if isinstance(x, tf1.Tensor):
# In eager mode, numpyize tensors.
if tf1.executing_eagerly():
x = x.numpy()
# Otherwise, use a quick tf-session.
else:
with tf1.Session() as sess:
x = sess.run(x)
return check(
x,
y,
decimals=decimals,
atol=atol,
rtol=rtol,
false=false)
if torch is not None:
if isinstance(x, torch.Tensor):
x = x.detach().numpy()
if isinstance(y, torch.Tensor):
y = y.detach().numpy()
# Using decimals.
if atol is None and rtol is None:
# Assert equality of both values.
try:
np.testing.assert_almost_equal(x, y, decimal=decimals)
# Both values are not equal.
except AssertionError as e:
# Raise error in normal case.
if false is False:
raise e
# Both values are equal.
else:
# If false is set -> raise error (not expected to be equal).
if false is True:
assert False, \
"ERROR: x ({}) is the same as y ({})!".format(x, y)
# Using atol/rtol.
else:
# Provide defaults for either one of atol/rtol.
if atol is None:
atol = 0
if rtol is None:
rtol = 1e-7
try:
np.testing.assert_allclose(x, y, atol=atol, rtol=rtol)
except AssertionError as e:
if false is False:
raise e
else:
if false is True:
assert False, \
"ERROR: x ({}) is the same as y ({})!".format(x, y)
def check_learning_achieved(tune_results, min_reward):
"""Throws an error if `min_reward` is not reached within tune_results.
Checks the last iteration found in tune_results for its
"episode_reward_mean" value and compares it to `min_reward`.
Args:
tune_results: The tune.run returned results object.
min_reward (float): The min reward that must be reached.
Raises:
ValueError: If `min_reward` not reached.
"""
if tune_results.trials[0].last_result["episode_reward_mean"] < min_reward:
raise ValueError("`stop-reward` of {} not reached!".format(min_reward))
print("ok")
def check_compute_single_action(trainer,
include_state=False,
include_prev_action_reward=False):
"""Tests different combinations of arguments for trainer.compute_action.
Args:
trainer (Trainer): The Trainer object to test.
include_state (bool): Whether to include the initial state of the
Policy's Model in the `compute_action` call.
include_prev_action_reward (bool): Whether to include the prev-action
and -reward in the `compute_action` call.
Raises:
ValueError: If anything unexpected happens.
"""
try:
pol = trainer.get_policy()
except AttributeError:
pol = trainer.policy
action_space = pol.action_space
for what in [pol, trainer]:
if what is trainer:
method_to_test = trainer.compute_action
# Get the obs-space from Workers.env (not Policy) due to possible
# pre-processor up front.
worker_set = getattr(trainer, "workers",
getattr(trainer, "_workers", None))
assert worker_set
if isinstance(worker_set, list):
obs_space = trainer.get_policy().observation_space
try:
obs_space = obs_space.original_space
except AttributeError:
pass
else:
obs_space = worker_set.local_worker().env.observation_space
else:
method_to_test = pol.compute_single_action
obs_space = pol.observation_space
for explore in [True, False]:
for full_fetch in ([False, True] if what is trainer else [False]):
call_kwargs = {}
if what is trainer:
call_kwargs["full_fetch"] = full_fetch
else:
call_kwargs["clip_actions"] = True
obs = obs_space.sample()
if isinstance(obs_space, gym.spaces.Box):
obs = np.clip(obs, -1.0, 1.0)
state_in = None
if include_state:
state_in = pol.model.get_initial_state()
action_in = action_space.sample() \
if include_prev_action_reward else None
reward_in = 1.0 if include_prev_action_reward else None
action = method_to_test(
obs,
state_in,
prev_action=action_in,
prev_reward=reward_in,
explore=explore,
**call_kwargs)
state_out = None
if state_in or full_fetch or what is pol:
action, state_out, _ = action
if state_out:
for si, so in zip(state_in, state_out):
check(list(si.shape), so.shape)
if not action_space.contains(action):
raise ValueError(
"Returned action ({}) of trainer/policy {} not in "
"Env's action_space "
"({})!".format(action, what, action_space))
| 37.833819
| 79
| 0.525314
|
6ff06df311c141158fb9b551d3146e6a15a03513
| 1,016
|
py
|
Python
|
model.py
|
DakshMiglani/Cartpole-1
|
d19f5fa8ba3aebab811ae3782af1a3a96b36c376
|
[
"MIT"
] | null | null | null |
model.py
|
DakshMiglani/Cartpole-1
|
d19f5fa8ba3aebab811ae3782af1a3a96b36c376
|
[
"MIT"
] | null | null | null |
model.py
|
DakshMiglani/Cartpole-1
|
d19f5fa8ba3aebab811ae3782af1a3a96b36c376
|
[
"MIT"
] | null | null | null |
import tflearn
def create_model(input_size, output_nodes, learning_rate, dir):
nn = tflearn.input_data(shape=[None, input_size, 1], name='input')
nn = tflearn.fully_connected(nn, 128, activation='relu')
nn = tflearn.dropout(nn, 0.8)
nn = tflearn.fully_connected(nn, 256, activation='relu')
nn = tflearn.dropout(nn, 0.8)
nn = tflearn.fully_connected(nn, 512, activation='relu')
nn = tflearn.dropout(nn, 0.8)
nn = tflearn.fully_connected(nn, 256, activation='relu')
nn = tflearn.dropout(nn, 0.8)
nn = tflearn.fully_connected(nn, 128, activation='relu')
nn = tflearn.dropout(nn, 0.8)
nn = tflearn.fully_connected(nn, 256, activation='relu')
nn = tflearn.dropout(nn, 0.8)
if output_nodes > 1:
activation = 'softmax'
else:
activation = 'sigmoid'
nn = tflearn.fully_connected(nn, output_nodes, activation=activation)
nn = tflearn.regression(nn, optimizer='adam', loss='categorical_crossentropy',
learning_rate=learning_rate, name="targets")
return tflearn.DNN(nn, tensorboard_dir=dir)
| 33.866667
| 80
| 0.726378
|
dec7223b1c0f51d091d8176e835e9c6d53281072
| 512
|
py
|
Python
|
packages/python/plotly/plotly/validators/pie/marker/__init__.py
|
labaran1/plotly.py
|
7ec751e8fed4a570c11ea4bea2231806389d62eb
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/pie/marker/__init__.py
|
labaran1/plotly.py
|
7ec751e8fed4a570c11ea4bea2231806389d62eb
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/pie/marker/__init__.py
|
labaran1/plotly.py
|
7ec751e8fed4a570c11ea4bea2231806389d62eb
|
[
"MIT"
] | null | null | null |
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._line import LineValidator
from ._colorssrc import ColorssrcValidator
from ._colors import ColorsValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._line.LineValidator",
"._colorssrc.ColorssrcValidator",
"._colors.ColorsValidator",
],
)
| 25.6
| 55
| 0.658203
|
8ea747f854f9b89f7ca9b8006a63be041fb30bd4
| 288
|
py
|
Python
|
Others/code_festival/CODE_FESTIVAL_2017_qual_B/ProblemA.py
|
KATO-Hiro/AtCoder
|
cbbdb18e95110b604728a54aed83a6ed6b993fde
|
[
"CC0-1.0"
] | 2
|
2020-06-12T09:54:23.000Z
|
2021-05-04T01:34:07.000Z
|
Others/code_festival/CODE_FESTIVAL_2017_qual_B/ProblemA.py
|
KATO-Hiro/AtCoder
|
cbbdb18e95110b604728a54aed83a6ed6b993fde
|
[
"CC0-1.0"
] | 961
|
2020-06-23T07:26:22.000Z
|
2022-03-31T21:34:52.000Z
|
Others/code_festival/CODE_FESTIVAL_2017_qual_B/ProblemA.py
|
KATO-Hiro/AtCoder
|
cbbdb18e95110b604728a54aed83a6ed6b993fde
|
[
"CC0-1.0"
] | null | null | null |
'''input
0
0
0
0
0
0
0
0
0
240
600
1800
3600
4800
7200
10000
0
10000
10400
10300
10100
9800
9500
8500
7000
5000
10400
10400
'''
# -*- coding: utf-8 -*-
# CODE THANKS FESTIVAL 2017 A
# Problem A
if __name__ == '__main__':
ts = [int(input()) for _ in range(8)]
print(max(ts))
| 6.697674
| 41
| 0.635417
|
85a31b694ac2fc23be9fe319f347cd9cb924b110
| 1,939
|
py
|
Python
|
PreprocessText.py
|
prtshshrm/SpamClassifier
|
af11d445862cecbb95677acbcf1fc2c9dee8dc81
|
[
"Unlicense"
] | null | null | null |
PreprocessText.py
|
prtshshrm/SpamClassifier
|
af11d445862cecbb95677acbcf1fc2c9dee8dc81
|
[
"Unlicense"
] | null | null | null |
PreprocessText.py
|
prtshshrm/SpamClassifier
|
af11d445862cecbb95677acbcf1fc2c9dee8dc81
|
[
"Unlicense"
] | null | null | null |
#Loads data and preprocesses it
import nltk
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from nltk.corpus import stopwords
def loadData(file_name):
df=pd.read_table(file_name, header=None, encoding='utf-8')
return df
def encodeMessages(df):
classes=df[0]
encoder=LabelEncoder()
binary_labels=encoder.fit_transform(classes)
return binary_labels
def preprocessMessages(df):
text_messages=df[1]
# Replace email addresses with 'email'
process_messages = text_messages.str.replace(r'^.+@[^\.].*\.[a-z]{2,}$','emailaddress')
# Replace URLs with 'webaddress'
process_messages = process_messages.str.replace(r'^http\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(/\S*)?$', 'webaddress')
# Replace money symbols with 'moneysymb' (£ can by typed with ALT key + 156)
process_messages = process_messages.str.replace(r'£|\$', 'moneysymb')
# Replace 10 digit phone numbers (formats include paranthesis, spaces, no spaces, dashes) with 'phonenumber'
process_messages = process_messages.str.replace(r'^\(?[\d]{3}\)?[\s-]?[\d]{3}[\s-]?[\d]{4}$', 'phonenumbr')
# Replace numbers with 'numbr'
process_messages = process_messages.str.replace(r'\d+(\.\d+)?', 'numbr')
# Remove punctuation
process_messages = process_messages.str.replace(r'[^\w\d\s]', ' ')
# Replace whitespace between terms with a single space
process_messages = process_messages.str.replace(r'\s+', ' ')
# Remove leading and trailing whitespace
process_messages = process_messages.str.replace(r'^\s+|\s+?$', '')
#lowercase
process_messages = process_messages.str.lower()
#remove stop words
stop_words=set(stopwords.words('english'))
process_messages=process_messages.apply(lambda x:' '.join(term for term in x.split() if term not in stop_words))
#use stemming
ps=nltk.PorterStemmer()
process_messages=process_messages.apply(lambda x:' '.join(ps.stem(term) for term in x.split()))
return process_messages
| 33.431034
| 115
| 0.725116
|
e6c00de097ad6d930aa204f5f121b3d74f08d05f
| 15,772
|
py
|
Python
|
lib/sqlalchemy/testing/assertsql.py
|
petit87/sqlalchemy
|
67d674bd63ca36ac32b23f96e2b19e9dac6b0863
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/testing/assertsql.py
|
petit87/sqlalchemy
|
67d674bd63ca36ac32b23f96e2b19e9dac6b0863
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/testing/assertsql.py
|
petit87/sqlalchemy
|
67d674bd63ca36ac32b23f96e2b19e9dac6b0863
|
[
"MIT"
] | null | null | null |
# testing/assertsql.py
# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
from __future__ import annotations
import collections
import contextlib
import re
from .. import event
from ..engine import url
from ..engine.default import DefaultDialect
from ..schema import _DDLCompiles
class AssertRule:
is_consumed = False
errormessage = None
consume_statement = True
def process_statement(self, execute_observed):
pass
def no_more_statements(self):
assert False, (
"All statements are complete, but pending "
"assertion rules remain"
)
class SQLMatchRule(AssertRule):
pass
class CursorSQL(SQLMatchRule):
def __init__(self, statement, params=None, consume_statement=True):
self.statement = statement
self.params = params
self.consume_statement = consume_statement
def process_statement(self, execute_observed):
stmt = execute_observed.statements[0]
if self.statement != stmt.statement or (
self.params is not None and self.params != stmt.parameters
):
self.errormessage = (
"Testing for exact SQL %s parameters %s received %s %s"
% (
self.statement,
self.params,
stmt.statement,
stmt.parameters,
)
)
else:
execute_observed.statements.pop(0)
self.is_consumed = True
if not execute_observed.statements:
self.consume_statement = True
class CompiledSQL(SQLMatchRule):
def __init__(self, statement, params=None, dialect="default"):
self.statement = statement
self.params = params
self.dialect = dialect
def _compare_sql(self, execute_observed, received_statement):
stmt = re.sub(r"[\n\t]", "", self.statement)
return received_statement == stmt
def _compile_dialect(self, execute_observed):
if self.dialect == "default":
dialect = DefaultDialect()
# this is currently what tests are expecting
# dialect.supports_default_values = True
dialect.supports_default_metavalue = True
return dialect
else:
# ugh
if self.dialect == "postgresql":
params = {"implicit_returning": True}
else:
params = {}
return url.URL.create(self.dialect).get_dialect()(**params)
def _received_statement(self, execute_observed):
"""reconstruct the statement and params in terms
of a target dialect, which for CompiledSQL is just DefaultDialect."""
context = execute_observed.context
compare_dialect = self._compile_dialect(execute_observed)
# received_statement runs a full compile(). we should not need to
# consider extracted_parameters; if we do this indicates some state
# is being sent from a previous cached query, which some misbehaviors
# in the ORM can cause, see #6881
cache_key = None # execute_observed.context.compiled.cache_key
extracted_parameters = (
None # execute_observed.context.extracted_parameters
)
if "schema_translate_map" in context.execution_options:
map_ = context.execution_options["schema_translate_map"]
else:
map_ = None
if isinstance(execute_observed.clauseelement, _DDLCompiles):
compiled = execute_observed.clauseelement.compile(
dialect=compare_dialect,
schema_translate_map=map_,
)
else:
compiled = execute_observed.clauseelement.compile(
cache_key=cache_key,
dialect=compare_dialect,
column_keys=context.compiled.column_keys,
for_executemany=context.compiled.for_executemany,
schema_translate_map=map_,
)
_received_statement = re.sub(r"[\n\t]", "", str(compiled))
parameters = execute_observed.parameters
if not parameters:
_received_parameters = [
compiled.construct_params(
extracted_parameters=extracted_parameters
)
]
else:
_received_parameters = [
compiled.construct_params(
m, extracted_parameters=extracted_parameters
)
for m in parameters
]
return _received_statement, _received_parameters
def process_statement(self, execute_observed):
context = execute_observed.context
_received_statement, _received_parameters = self._received_statement(
execute_observed
)
params = self._all_params(context)
equivalent = self._compare_sql(execute_observed, _received_statement)
if equivalent:
if params is not None:
all_params = list(params)
all_received = list(_received_parameters)
while all_params and all_received:
param = dict(all_params.pop(0))
for idx, received in enumerate(list(all_received)):
# do a positive compare only
for param_key in param:
# a key in param did not match current
# 'received'
if (
param_key not in received
or received[param_key] != param[param_key]
):
break
else:
# all keys in param matched 'received';
# onto next param
del all_received[idx]
break
else:
# param did not match any entry
# in all_received
equivalent = False
break
if all_params or all_received:
equivalent = False
if equivalent:
self.is_consumed = True
self.errormessage = None
else:
self.errormessage = self._failure_message(
execute_observed, params
) % {
"received_statement": _received_statement,
"received_parameters": _received_parameters,
}
def _all_params(self, context):
if self.params:
if callable(self.params):
params = self.params(context)
else:
params = self.params
if not isinstance(params, list):
params = [params]
return params
else:
return None
def _failure_message(self, execute_observed, expected_params):
return (
"Testing for compiled statement\n%r partial params %s, "
"received\n%%(received_statement)r with params "
"%%(received_parameters)r"
% (
self.statement.replace("%", "%%"),
repr(expected_params).replace("%", "%%"),
)
)
class RegexSQL(CompiledSQL):
def __init__(self, regex, params=None, dialect="default"):
SQLMatchRule.__init__(self)
self.regex = re.compile(regex)
self.orig_regex = regex
self.params = params
self.dialect = dialect
def _failure_message(self, execute_observed, expected_params):
return (
"Testing for compiled statement ~%r partial params %s, "
"received %%(received_statement)r with params "
"%%(received_parameters)r"
% (
self.orig_regex.replace("%", "%%"),
repr(expected_params).replace("%", "%%"),
)
)
def _compare_sql(self, execute_observed, received_statement):
return bool(self.regex.match(received_statement))
class DialectSQL(CompiledSQL):
def _compile_dialect(self, execute_observed):
return execute_observed.context.dialect
def _compare_no_space(self, real_stmt, received_stmt):
stmt = re.sub(r"[\n\t]", "", real_stmt)
return received_stmt == stmt
def _received_statement(self, execute_observed):
received_stmt, received_params = super(
DialectSQL, self
)._received_statement(execute_observed)
# TODO: why do we need this part?
for real_stmt in execute_observed.statements:
if self._compare_no_space(real_stmt.statement, received_stmt):
break
else:
raise AssertionError(
"Can't locate compiled statement %r in list of "
"statements actually invoked" % received_stmt
)
return received_stmt, execute_observed.context.compiled_parameters
def _dialect_adjusted_statement(self, paramstyle):
stmt = re.sub(r"[\n\t]", "", self.statement)
# temporarily escape out PG double colons
stmt = stmt.replace("::", "!!")
if paramstyle == "pyformat":
stmt = re.sub(r":([\w_]+)", r"%(\1)s", stmt)
else:
# positional params
repl = None
if paramstyle == "qmark":
repl = "?"
elif paramstyle == "format":
repl = r"%s"
elif paramstyle == "numeric":
repl = None
stmt = re.sub(r":([\w_]+)", repl, stmt)
# put them back
stmt = stmt.replace("!!", "::")
return stmt
def _compare_sql(self, execute_observed, received_statement):
paramstyle = execute_observed.context.dialect.paramstyle
stmt = self._dialect_adjusted_statement(paramstyle)
return received_statement == stmt
def _failure_message(self, execute_observed, expected_params):
paramstyle = execute_observed.context.dialect.paramstyle
return (
"Testing for compiled statement\n%r partial params %s, "
"received\n%%(received_statement)r with params "
"%%(received_parameters)r"
% (
self._dialect_adjusted_statement(paramstyle).replace(
"%", "%%"
),
repr(expected_params).replace("%", "%%"),
)
)
class CountStatements(AssertRule):
def __init__(self, count):
self.count = count
self._statement_count = 0
def process_statement(self, execute_observed):
self._statement_count += 1
def no_more_statements(self):
if self.count != self._statement_count:
assert False, "desired statement count %d does not match %d" % (
self.count,
self._statement_count,
)
class AllOf(AssertRule):
def __init__(self, *rules):
self.rules = set(rules)
def process_statement(self, execute_observed):
for rule in list(self.rules):
rule.errormessage = None
rule.process_statement(execute_observed)
if rule.is_consumed:
self.rules.discard(rule)
if not self.rules:
self.is_consumed = True
break
elif not rule.errormessage:
# rule is not done yet
self.errormessage = None
break
else:
self.errormessage = list(self.rules)[0].errormessage
class EachOf(AssertRule):
def __init__(self, *rules):
self.rules = list(rules)
def process_statement(self, execute_observed):
while self.rules:
rule = self.rules[0]
rule.process_statement(execute_observed)
if rule.is_consumed:
self.rules.pop(0)
elif rule.errormessage:
self.errormessage = rule.errormessage
if rule.consume_statement:
break
if not self.rules:
self.is_consumed = True
def no_more_statements(self):
if self.rules and not self.rules[0].is_consumed:
self.rules[0].no_more_statements()
elif self.rules:
super(EachOf, self).no_more_statements()
class Conditional(EachOf):
def __init__(self, condition, rules, else_rules):
if condition:
super(Conditional, self).__init__(*rules)
else:
super(Conditional, self).__init__(*else_rules)
class Or(AllOf):
def process_statement(self, execute_observed):
for rule in self.rules:
rule.process_statement(execute_observed)
if rule.is_consumed:
self.is_consumed = True
break
else:
self.errormessage = list(self.rules)[0].errormessage
class SQLExecuteObserved:
def __init__(self, context, clauseelement, multiparams, params):
self.context = context
self.clauseelement = clauseelement
if multiparams:
self.parameters = multiparams
elif params:
self.parameters = [params]
else:
self.parameters = []
self.statements = []
def __repr__(self):
return str(self.statements)
class SQLCursorExecuteObserved(
collections.namedtuple(
"SQLCursorExecuteObserved",
["statement", "parameters", "context", "executemany"],
)
):
pass
class SQLAsserter:
def __init__(self):
self.accumulated = []
def _close(self):
self._final = self.accumulated
del self.accumulated
def assert_(self, *rules):
rule = EachOf(*rules)
observed = list(self._final)
while observed:
statement = observed.pop(0)
rule.process_statement(statement)
if rule.is_consumed:
break
elif rule.errormessage:
assert False, rule.errormessage
if observed:
assert False, "Additional SQL statements remain:\n%s" % observed
elif not rule.is_consumed:
rule.no_more_statements()
@contextlib.contextmanager
def assert_engine(engine):
asserter = SQLAsserter()
orig = []
@event.listens_for(engine, "before_execute")
def connection_execute(
conn, clauseelement, multiparams, params, execution_options
):
# grab the original statement + params before any cursor
# execution
orig[:] = clauseelement, multiparams, params
@event.listens_for(engine, "after_cursor_execute")
def cursor_execute(
conn, cursor, statement, parameters, context, executemany
):
if not context:
return
# then grab real cursor statements and associate them all
# around a single context
if (
asserter.accumulated
and asserter.accumulated[-1].context is context
):
obs = asserter.accumulated[-1]
else:
obs = SQLExecuteObserved(context, orig[0], orig[1], orig[2])
asserter.accumulated.append(obs)
obs.statements.append(
SQLCursorExecuteObserved(
statement, parameters, context, executemany
)
)
try:
yield asserter
finally:
event.remove(engine, "after_cursor_execute", cursor_execute)
event.remove(engine, "before_execute", connection_execute)
asserter._close()
| 32.386037
| 77
| 0.577352
|
45873d2f408f319afc8ad2fbf865f25b5601f790
| 35,682
|
py
|
Python
|
skipole/ski/widgets/inputforms.py
|
bernie-skipole/skipole
|
b45d3291c593e7c03c053ab4f192f1ecc5c3e9b9
|
[
"MIT"
] | null | null | null |
skipole/ski/widgets/inputforms.py
|
bernie-skipole/skipole
|
b45d3291c593e7c03c053ab4f192f1ecc5c3e9b9
|
[
"MIT"
] | null | null | null |
skipole/ski/widgets/inputforms.py
|
bernie-skipole/skipole
|
b45d3291c593e7c03c053ab4f192f1ecc5c3e9b9
|
[
"MIT"
] | null | null | null |
"""Contains form widgets, these have 'container' functionality - they can contain further html and widgets, typically
further input fields. The module also has an Hidden Field and Submit Button widgets, which can be inserted into
a form. """
from .. import skiboot, tag
from . import Widget, ClosedWidget, FieldArg, FieldArgList, FieldArgTable, FieldArgDict
class HiddenField(ClosedWidget):
"""An input field of type hidden, for use as an insert into form widgets"""
# This class does not display any error messages
display_errors = False
arg_descriptions = {'hidden_field':FieldArg("text", '', valdt=True, jsonset=True)}
def __init__(self, name=None, brief='', **field_args):
"hidden_field: A hidden input field"
ClosedWidget.__init__(self, name=name, tag_name="input", brief=brief, **field_args)
def _build(self, page, ident_list, environ, call_data, lang):
"Sets the attributes"
value = self.get_field_value('hidden_field')
if not value:
self.show = False
return
self.update_attribs({"name":self.get_formname('hidden_field'),
"value":value,
"type":"hidden"})
@classmethod
def description(cls):
"""Returns a text string to illustrate the widget"""
return """
<input type="hidden" /> <!-- with widget id and class widget_class -->
<!-- with value of the "hidden_field" value, and name being the widgfield -->"""
class HiddenSessionStorage(ClosedWidget):
"""An input field of type hidden, for use as an insert into form widgets"""
# This class does not display any error messages
display_errors = False
arg_descriptions = {'session_key':FieldArg("text", '', valdt=True, jsonset=True)}
def __init__(self, name=None, brief='', **field_args):
"hidden_field: A hidden input field with value from session storage"
ClosedWidget.__init__(self, name=name, tag_name="input", brief=brief, **field_args)
self._key = ''
def _build(self, page, ident_list, environ, call_data, lang):
"Sets the attributes"
self._key = self.get_field_value('session_key')
if not self._key:
self.show = False
return
self.update_attribs({"name":self.get_formname('session_key'),
"value":"",
"type":"hidden"})
def _build_js(self, page, ident_list, environ, call_data, lang):
"""Sets key value into the value attribute by calling the widget updatefunc"""
if not self._key:
return
jscript = """
SKIPOLE.widgets["{ident}"].updatefunc();
""".format(ident=self.get_id())
return self._make_fieldvalues(session_key=self._key) + jscript
@classmethod
def description(cls):
"""Returns a text string to illustrate the widget"""
return """
<input type="hidden" /> <!-- with widget id and class widget_class -->
<!-- with value taken from the session storage with key "session_key", and name being the session_key widgfield -->"""
class HiddenLocalStorage(ClosedWidget):
"""An input field of type hidden, for use as an insert into form widgets"""
# This class does not display any error messages
display_errors = False
arg_descriptions = {'local_key':FieldArg("text", '', valdt=True, jsonset=True)}
def __init__(self, name=None, brief='', **field_args):
"hidden_field: A hidden input field with value from local storage"
ClosedWidget.__init__(self, name=name, tag_name="input", brief=brief, **field_args)
self._key = ''
def _build(self, page, ident_list, environ, call_data, lang):
"Sets the attributes"
self._key = self.get_field_value('local_key')
if not self._key:
self.show = False
return
self.update_attribs({"name":self.get_formname('local_key'),
"value":"",
"type":"hidden"})
def _build_js(self, page, ident_list, environ, call_data, lang):
"""Sets key value into the value attribute by calling the widget updatefunc"""
if not self._key:
return
jscript = """
SKIPOLE.widgets["{ident}"].updatefunc();
""".format(ident=self.get_id())
return self._make_fieldvalues(local_key=self._key) + jscript
@classmethod
def description(cls):
"""Returns a text string to illustrate the widget"""
return """
<input type="hidden" /> <!-- with widget id and class widget_class -->
<!-- with value taken from the local storage with key "local_key", and name being the local_key widgfield -->"""
class SubmitButton1(ClosedWidget):
"""An input field of type submit, for use as an insert into form widgets"""
# This class does not display any error messages
display_errors = False
arg_descriptions = {'button_text':FieldArg("text", 'Submit', valdt=True, jsonset=True)}
def __init__(self, name=None, brief='', **field_args):
"Create input type submit button widget"
ClosedWidget.__init__(self, name=name, tag_name="input", brief=brief, **field_args)
def _build(self, page, ident_list, environ, call_data, lang):
"Sets the attributes"
button_text = self.get_field_value('button_text')
if not button_text:
button_text = "Submit"
self.update_attribs({"name":self.get_formname('button_text'),
"value":button_text,
"type":"submit"})
@classmethod
def description(cls):
"""Returns a text string to illustrate the widget"""
return """
<input type="button_text" /> <!-- with widget id and class widget_class -->
<!-- with value of the "button_text" value, and name being the 'button_text widgfield -->"""
class SubmitButton2(ClosedWidget):
"""An input field of type submit, for use as an insert into form widgets"""
# This class does not display any error messages
display_errors = False
arg_descriptions = {'button_text':FieldArg("text", 'Submit', jsonset=True)}
def __init__(self, name=None, brief='', **field_args):
"Create input type submit button widget"
ClosedWidget.__init__(self, name=name, tag_name="input", brief=brief, **field_args)
def _build(self, page, ident_list, environ, call_data, lang):
"Sets the attributes"
button_text = self.get_field_value('button_text')
if not button_text:
button_text = "Submit"
self.update_attribs({"value":button_text, "type":"submit"})
@classmethod
def description(cls):
"""Returns a text string to illustrate the widget"""
return """
<input type="button_text" /> <!-- with widget id and class widget_class -->
<!-- with value of the "button_text" value, but no name, so does not submit a widgfield -->"""
class Form1(Widget):
"""A form with a container and four hidden fields. Used with further input fields set within it.
On error - the error message is displayed before any of the contents
Does not include a submit button, therefore requires one to be inserted with the contents"""
_container = ((1,0),)
error_location = (0,0,0)
arg_descriptions = {'action':FieldArg("url", ''),
'enctype':FieldArg("text", ''),
'hidden_field1':FieldArg("text", '', valdt=True, jsonset=True),
'hidden_field2':FieldArg("text", '', valdt=True, jsonset=True),
'hidden_field3':FieldArg("text", '', valdt=True, jsonset=True),
'hidden_field4':FieldArg("text", '', valdt=True, jsonset=True),
'container_class':FieldArg("cssclass", ''),
'error_class':FieldArg("cssclass", '')
}
def __init__(self, name=None, brief='', **field_args):
"""
action: The page ident, label, url this form links to
enctype: Sets the enctype attribute if given
hidden_field1: A hidden field value, leave blank if unused
hidden_field2: A second hidden field value, leave blank if unused
hidden_field3: A third hidden field value, leave blank if unused
hidden_field4: A fourth hidden field value, leave blank if unused
container_class: the class attribute of the div holding the container
error_class: The class applied to the paragraph containing the error message on error."""
Widget.__init__(self, name=name, tag_name="div", brief=brief, **field_args)
self.update_attribs({"role":"form", "method":"post"})
# error div at 0
self[0] = tag.Part(tag_name="div", attribs={"style":"display:none;"})
self[0][0] = tag.Part(tag_name="p")
self[0][0][0] = ''
# The form
self[1] = tag.Part(tag_name='form', attribs={"role":"form", "method":"post"})
# The location 1,0 is available as a container
self[1][0] = tag.Part(tag_name="div")
self[1][0][0] = ''
def _build(self, page, ident_list, environ, call_data, lang):
"build the form"
if self.get_field_value('error_class'):
self[0].update_attribs({"class":self.get_field_value('error_class')})
if self.error_status:
self[0].del_one_attrib("style")
if not self.get_field_value("action"):
# setting self._error replaces the entire tag
self._error = "Warning: No form action"
return
actionurl = skiboot.get_url(self.get_field_value("action"), proj_ident=page.proj_ident)
if not actionurl:
# setting self._error replaces the entire tag
self._error = "Warning: broken link"
return
# update the action of the form
self[1].update_attribs({"action": actionurl})
if self.get_field_value('enctype'):
self[1].update_attribs({"enctype": self.get_field_value('enctype')})
# the div holding the container
if self.get_field_value('container_class'):
self[1][0].attribs = {"class": self.get_field_value('container_class')}
# add ident and four hidden fields
self.add_hiddens(self[1], page)
def _build_js(self, page, ident_list, environ, call_data, lang):
"""Sets a submit event handler"""
# this ensures any input text widgets added to the container, get local validation
# when the form is submitted
jscript = """ $('#{ident} form').on("submit", function(e) {{
SKIPOLE.widgets['{ident}'].eventfunc(e);
}});
""".format(ident=self.get_id())
return jscript
@classmethod
def description(cls):
"""Returns a text string to illustrate the widget"""
return """
<div> <!-- with widget id and class widget_class -->
<div> <!-- normally hidden div, with class error_class -->
<p> <!-- Any error text appears here --> </p>
</div>
<form method=\"post\"> <!-- action attribute set to action field -->
<div> <!-- this div has the class attribute set to container_class -->
<!-- container 0 for further html -->
</div>
<!-- hidden input fields -->
</form>
</div>"""
class SubmitForm1(Widget):
"""A form taking contents with submit button, left or right labels and four hidden fields.
Used with further input fields set within it. On error - the error message is displayed
below the form tag, before any of the contents"""
_container = ((1,0),)
error_location = (0,0,0)
arg_descriptions = {'left_label':FieldArg("text", 'Please Submit:'),
'left_class':FieldArg("cssclass", ''),
'left_style':FieldArg("cssstyle", ''),
'right_label':FieldArg("text", ''),
'right_class':FieldArg("cssclass", ''),
'right_style':FieldArg("cssstyle", ''),
'action_json':FieldArg("url", ''),
'action':FieldArg("url", ''),
'enctype':FieldArg("text", ''),
'hidden_field1':FieldArg("text", '', valdt=True, jsonset=True),
'hidden_field2':FieldArg("text", '', valdt=True, jsonset=True),
'hidden_field3':FieldArg("text", '', valdt=True, jsonset=True),
'hidden_field4':FieldArg("text", '', valdt=True, jsonset=True),
'button_text':FieldArg("text",'Submit'),
'button_wait_text':FieldArg("text", ''),
'button_class':FieldArg("cssclass", ''),
'div_class':FieldArg("cssclass", ''),
'container_class':FieldArg("cssclass", ''),
'error_class':FieldArg("cssclass", ''),
}
def __init__(self, name=None, brief='', **field_args):
"""
left_label: The text displayed to the left of the button
left_class: The css class of the label to the left of the button
right_label: The text displayed to the right of the button
right_class: The css class of the label to the right of the button
action_json: if a value set, and client has jscript enabled, this is the page ident, label, url this button links to, expects a json page back
action: The page ident, label, url this button links to
enctype: Sets the enctype attribute if given
hidden_field1: A hidden field value, leave blank if unused, name used as the get field name
hidden_field2: A second hidden field value, leave blank if unused, name used as the get field name
hidden_field3: A third hidden field value, leave blank if unused, name used as the get field name
hidden_field4: A fourth hidden field value, leave blank if unused, name used as the get field name
button_text: The text on the button
button_wait_text: A 'please wait' message shown on the button
button_class: The css class of the button
div_class: the class attribute of the div tag which contains the label and button
container_class: the class attribute of the div holding the container
error_class: The class applied to the paragraph containing the error message on error.
"""
Widget.__init__(self, name=name, tag_name="div", brief=brief, **field_args)
# error div at 0
self[0] = tag.Part(tag_name="div", attribs={"style":"display:none;"})
self[0][0] = tag.Part(tag_name="p")
self[0][0][0] = ''
# The form
self[1] = tag.Part(tag_name='form', attribs={"role":"form", "method":"post"})
# The location 1,0 is available as a container
self[1][0] = tag.Part(tag_name='div')
self[1][0][0] = ''
# tag containing label and button
self[1][1] = tag.Part(tag_name='div')
# the left label
self[1][1][0] = tag.Part(tag_name="label", hide_if_empty=True)
# the submit button
self[1][1][1] = tag.ClosedPart(tag_name="input")
# the right label
self[1][1][2] = tag.Part(tag_name="label", hide_if_empty=True)
self._jsonurl = ''
def _build(self, page, ident_list, environ, call_data, lang):
"build the form"
self._jsonurl = skiboot.get_url(self.get_field_value("action_json"), proj_ident=page.proj_ident)
if self.get_field_value('error_class'):
self[0].update_attribs({"class":self.get_field_value('error_class')})
if self.error_status:
self[0].del_one_attrib("style")
if not self.get_field_value("action"):
# setting self._error replaces the entire tag
self._error = "Warning: No form action"
return
actionurl = skiboot.get_url(self.get_field_value("action"), proj_ident=page.proj_ident)
if not actionurl:
# setting self._error replaces the entire tag
self._error = "Warning: broken link"
return
# update the action of the form
self[1].update_attribs({"action": actionurl})
if self.get_field_value('enctype'):
self[1].update_attribs({"enctype": self.get_field_value('enctype')})
# the div holding the container
if self.get_field_value('container_class'):
self[1][0].attribs = {"class": self.get_field_value('container_class')}
# the div holding label and button
if self.get_field_value('div_class'):
self[1][1].attribs = {"class": self.get_field_value('div_class')}
if self.get_field_value('left_label'):
self[1][1][0][0] = self.get_field_value('left_label')
if self.get_field_value('left_class'):
self[1][1][0].attribs = {"class": self.get_field_value('left_class')}
if self.get_field_value('left_style'):
self[1][1][0].attribs = {"style": self.get_field_value('left_style')}
# submit button
if self.get_field_value('button_class'):
self[1][1][1].attribs = {"value":self.get_field_value('button_text'), "type":"submit", "class": self.get_field_value('button_class')}
else:
self[1][1][1].attribs = {"value":self.get_field_value('button_text'), "type":"submit"}
# set an id in the submit button for the 'label for' tag
self[1][1][1].insert_id()
if self.get_field_value('right_label'):
self[1][1][2][0] = self.get_field_value('right_label')
if self.get_field_value('right_class'):
self[1][1][2].attribs = {"class": self.get_field_value('right_class')}
if self.get_field_value('right_style'):
self[1][1][2].attribs = {"style": self.get_field_value('right_style')}
# set the label 'for' attribute
self[1][1][0].update_attribs({'for':self[1][1][1].get_id()})
self[1][1][2].update_attribs({'for':self[1][1][1].get_id()})
# add ident and four hidden fields
self.add_hiddens(self[1], page)
def _build_js(self, page, ident_list, environ, call_data, lang):
"""Sets a submit event handler"""
jscript = """$('#{ident} form').on("submit", function(e) {{
SKIPOLE.widgets['{ident}'].eventfunc(e);
}});
""".format(ident=self.get_id())
if self._jsonurl:
return jscript + self._make_fieldvalues('button_wait_text', buttonident = self[1][1][1].get_id(), url=self._jsonurl)
else:
return jscript + self._make_fieldvalues('button_wait_text', buttonident = self[1][1][1].get_id())
@classmethod
def description(cls):
"""Returns a text string to illustrate the widget"""
return """
<div> <!-- with widget id and class widget_class -->
<div> <!-- normally hidden div, with class error_class -->
<p> <!-- Any error text appears here --> </p>
</div>
<form method="post"> <!-- action attribute set to action field -->
<div> <!-- this div has the class attribute set to container_class -->
<!-- container 0 for further html -->
</div>
<div> <!-- this div has the class attribute set to div_class -->
<label> <!-- with class set to left_class and content to left_label -->
</label>
<input type=\"submit\" /> <!-- button value set to button_text -->
<label> <!-- with class set to right_class and content to right_label -->
</label>
</div>
<!-- hidden input fields -->
</form>
</div>"""
class SubmitForm2(Widget):
"""A form taking contents with submit button, left or right labels and four hidden fields.
Used with further input fields set within it. On error - the error message is displayed
below the form tag, before any of the contents
Can send session or local storage values."""
_container = ((1,0),)
error_location = (0,0,0)
arg_descriptions = {'left_label':FieldArg("text", 'Please Submit:'),
'left_class':FieldArg("cssclass", ''),
'left_style':FieldArg("cssstyle", ''),
'right_label':FieldArg("text", ''),
'right_class':FieldArg("cssclass", ''),
'right_style':FieldArg("cssstyle", ''),
'action_json':FieldArg("url", ''),
'action':FieldArg("url", ''),
'enctype':FieldArg("text", ''),
'hidden_field1':FieldArg("text", '', valdt=True, jsonset=True),
'hidden_field2':FieldArg("text", '', valdt=True, jsonset=True),
'hidden_field3':FieldArg("text", '', valdt=True, jsonset=True),
'hidden_field4':FieldArg("text", '', valdt=True, jsonset=True),
'session_storage':FieldArg("text", "", valdt=True, jsonset=True),
'local_storage':FieldArg("text","", valdt=True, jsonset=True),
'button_text':FieldArg("text",'Submit'),
'button_wait_text':FieldArg("text", ''),
'button_class':FieldArg("cssclass", ''),
'div_class':FieldArg("cssclass", ''),
'container_class':FieldArg("cssclass", ''),
'error_class':FieldArg("cssclass", ''),
}
def __init__(self, name=None, brief='', **field_args):
"""
left_label: The text displayed to the left of the button
left_class: The css class of the label to the left of the button
right_label: The text displayed to the right of the button
right_class: The css class of the label to the right of the button
action_json: if a value set, and client has jscript enabled, this is the page ident, label, url this button links to, expects a json page back
action: The page ident, label, url this button links to
enctype: Sets the enctype attribute if given
hidden_field1: A hidden field value, leave blank if unused, name used as the get field name
hidden_field2: A second hidden field value, leave blank if unused, name used as the get field name
hidden_field3: A third hidden field value, leave blank if unused, name used as the get field name
hidden_field4: A fourth hidden field value, leave blank if unused, name used as the get field name
session_storage: A session storage key, this widgfield returns the stored value if anything
local_storage: A local storage key, this widgfield returns the stored value if anything
button_text: The text on the button
button_wait_text: A 'please wait' message shown on the button
button_class: The css class of the button
div_class: the class attribute of the div tag which contains the label and button
container_class: the class attribute of the div holding the container
error_class: The class applied to the paragraph containing the error message on error.
"""
Widget.__init__(self, name=name, tag_name="div", brief=brief, **field_args)
# error div at 0
self[0] = tag.Part(tag_name="div", attribs={"style":"display:none;"})
self[0][0] = tag.Part(tag_name="p")
self[0][0][0] = ''
# The form
self[1] = tag.Part(tag_name='form', attribs={"role":"form", "method":"post"})
# The location 1,0 is available as a container
self[1][0] = tag.Part(tag_name='div')
self[1][0][0] = ''
# tag containing label and button
self[1][1] = tag.Part(tag_name='div')
# the left label
self[1][1][0] = tag.Part(tag_name="label", hide_if_empty=True)
# the submit button
self[1][1][1] = tag.ClosedPart(tag_name="input")
# the right label
self[1][1][2] = tag.Part(tag_name="label", hide_if_empty=True)
self._jsonurl = ''
def _build(self, page, ident_list, environ, call_data, lang):
"build the form"
self._jsonurl = skiboot.get_url(self.get_field_value("action_json"), proj_ident=page.proj_ident)
if self.get_field_value('error_class'):
self[0].update_attribs({"class":self.get_field_value('error_class')})
if self.error_status:
self[0].del_one_attrib("style")
if not self.get_field_value("action"):
# setting self._error replaces the entire tag
self._error = "Warning: No form action"
return
actionurl = skiboot.get_url(self.get_field_value("action"), proj_ident=page.proj_ident)
if not actionurl:
# setting self._error replaces the entire tag
self._error = "Warning: broken link"
return
# update the action of the form
self[1].update_attribs({"action": actionurl})
if self.get_field_value('enctype'):
self[1].update_attribs({"enctype": self.get_field_value('enctype')})
# the div holding the container
if self.get_field_value('container_class'):
self[1][0].attribs = {"class": self.get_field_value('container_class')}
# the div holding label and button
if self.get_field_value('div_class'):
self[1][1].attribs = {"class": self.get_field_value('div_class')}
if self.get_field_value('left_label'):
self[1][1][0][0] = self.get_field_value('left_label')
if self.get_field_value('left_class'):
self[1][1][0].attribs = {"class": self.get_field_value('left_class')}
if self.get_field_value('left_style'):
self[1][1][0].attribs = {"style": self.get_field_value('left_style')}
# submit button
if self.get_field_value('button_class'):
self[1][1][1].attribs = {"value":self.get_field_value('button_text'), "type":"submit", "class": self.get_field_value('button_class')}
else:
self[1][1][1].attribs = {"value":self.get_field_value('button_text'), "type":"submit"}
# set an id in the submit button for the 'label for' tag
self[1][1][1].insert_id()
if self.get_field_value('right_label'):
self[1][1][2][0] = self.get_field_value('right_label')
if self.get_field_value('right_class'):
self[1][1][2].attribs = {"class": self.get_field_value('right_class')}
if self.get_field_value('right_style'):
self[1][1][2].attribs = {"style": self.get_field_value('right_style')}
# set the label 'for' attribute
self[1][1][0].update_attribs({'for':self[1][1][1].get_id()})
self[1][1][2].update_attribs({'for':self[1][1][1].get_id()})
# add ident and four hidden fields
self.add_hiddens(self[1], page)
def _build_js(self, page, ident_list, environ, call_data, lang):
"""Sets a submit event handler"""
jscript = """$('#{ident} form').on("submit", function(e) {{
SKIPOLE.widgets['{ident}'].eventfunc(e);
}});
""".format(ident=self.get_id())
if self._jsonurl:
return jscript + self._make_fieldvalues('button_wait_text',
'session_storage',
'local_storage',
buttonident = self[1][1][1].get_id(),
url=self._jsonurl)
else:
return jscript + self._make_fieldvalues('button_wait_text',
'session_storage',
'local_storage',
buttonident = self[1][1][1].get_id())
@classmethod
def description(cls):
"""Returns a text string to illustrate the widget"""
return """
<div> <!-- with widget id and class widget_class -->
<div> <!-- normally hidden div, with class error_class -->
<p> <!-- Any error text appears here --> </p>
</div>
<form method="post"> <!-- action attribute set to action field -->
<div> <!-- this div has the class attribute set to container_class -->
<!-- container 0 for further html -->
</div>
<div> <!-- this div has the class attribute set to div_class -->
<label> <!-- with class set to left_class and content to left_label -->
</label>
<input type=\"submit\" /> <!-- button value set to button_text -->
<label> <!-- with class set to right_class and content to right_label -->
</label>
</div>
<!-- hidden input fields -->
</form>
</div>"""
class SubmitFromScript(Widget):
"""Defines a form with four hidden fields, values set by javascript"""
# This class does not display any error messages
display_errors = False
arg_descriptions = {
'action_json':FieldArg("url", ''),
'action':FieldArg("url", ''),
'hidden_field1':FieldArg("text", '', valdt=True),
'hidden_field2':FieldArg("text", '', valdt=True),
'hidden_field3':FieldArg("text", '', valdt=True),
'hidden_field4':FieldArg("text", '', valdt=True),
'target':FieldArg("text",''),
'button_text':FieldArg("text",'Submit'),
'button_class':FieldArg("cssclass", ''),
'buttondiv_class':FieldArg("cssclass", ''),
'buttondiv_style':FieldArg("cssstyle", ''),
'hide':FieldArg("boolean", False, jsonset=True)
}
def __init__(self, name=None, brief='', **field_args):
"""
action_json: if a value set, and client has jscript enabled, this is the page ident, label, url this button links to, expects a json page back
action: The page ident, label, url this button links to, overridden if action_json is set.
hidden_field1: Body of a javascript function returning a value, leave blank if unused
hidden_field2: Body of a javascript function returning a value, leave blank if unused
hidden_field3: Body of a javascript function returning a value, leave blank if unused
hidden_field4: Body of a javascript function returning a value, leave blank if unused
target: if given, the target attribute will be set
button_text: The text on the button
button_class: The class given to the button tag
buttondiv_class: the class attribute of the div which contains the submit button
buttondiv_style: the style attribute of the div which contains the submit button
hide: If True, widget is hidden
"""
Widget.__init__(self, name=name, tag_name="div", brief=brief, **field_args)
# The form
self[0] = tag.Part(tag_name='form', attribs={"role":"form", "method":"post"})
# div containing the submit button
self[0][0] = tag.Part(tag_name='div')
# the submit button
self[0][0][0] = tag.Part(tag_name="button", attribs ={"type":"submit"})
self[0][0][0][0] = "Submit"
self._jsonurl = ''
def _build(self, page, ident_list, environ, call_data, lang):
"build the form"
if self.get_field_value("target"):
self[0].update_attribs({"target":self.get_field_value("target")})
# Hides widget if no error and hide is True
self.widget_hide(self.get_field_value("hide"))
self._jsonurl = skiboot.get_url(self.get_field_value("action_json"), proj_ident=page.proj_ident)
if not self.get_field_value("action"):
# setting self._error replaces the entire tag
self._error = "Warning: No form action"
return
actionurl = skiboot.get_url(self.get_field_value("action"), proj_ident=page.proj_ident)
if not actionurl:
# setting self._error replaces the entire tag
self._error = "Warning: broken link"
return
# update the action of the form
self[0].update_attribs({"action": actionurl})
# the div holding the submit button
if self.get_field_value('buttondiv_class'):
self[0][0].attribs = {"class": self.get_field_value('buttondiv_class')}
if self.get_field_value('buttondiv_style'):
self[0][0].update_attribs({"style": self.get_field_value('buttondiv_style')})
# submit button
if self.get_field_value('button_class'):
self[0][0][0].update_attribs({"class": self.get_field_value('button_class')})
if self.get_field_value('button_text'):
self[0][0][0][0] = self.get_field_value('button_text')
# add ident and four hidden fields
if page is not None:
self[0].append(tag.ClosedPart(tag_name="input",
attribs ={"name":'ident',
"value":page.ident_data_string,
"type":"hidden"}))
# hidden field on the form
if self.get_field_value('hidden_field1'):
self[0].append(tag.ClosedPart(tag_name="input",
attribs ={"name":self.get_formname('hidden_field1'),
"type":"hidden"}))
# Second hidden field on the form
if self.get_field_value('hidden_field2'):
self[0].append(tag.ClosedPart(tag_name="input",
attribs ={"name":self.get_formname('hidden_field2'),
"type":"hidden"}))
# third hidden field on the form
if self.get_field_value('hidden_field3'):
self[0].append(tag.ClosedPart(tag_name="input",
attribs ={"name":self.get_formname('hidden_field3'),
"type":"hidden"}))
# fourth hidden field on the form
if self.get_field_value('hidden_field4'):
self[0].append(tag.ClosedPart(tag_name="input",
attribs ={"name":self.get_formname('hidden_field4'),
"type":"hidden"}))
def _build_js(self, page, ident_list, environ, call_data, lang):
"""Sets a submit event handler"""
jscript = """ $("#{ident} form").on("submit input", function(e) {{
SKIPOLE.widgets["{ident}"].eventfunc(e);
}});
""".format(ident=self.get_id())
if self._jsonurl:
return jscript + self._make_fieldvalues('hidden_field1', 'hidden_field2', 'hidden_field3', 'hidden_field4', url=self._jsonurl)
return jscript + self._make_fieldvalues('hidden_field1', 'hidden_field2', 'hidden_field3', 'hidden_field4')
@classmethod
def description(cls):
"""Returns a text string to illustrate the widget"""
return """
<div> <!-- with widget id and class widget_class -->
<form role="form" method="post"> <!-- action attribute set to action field -->
<div> <!-- class attribute set to buttondiv_class -->
<button type="submit"> <!-- with class set to button_class -->
<!-- button_text -->
</button>
</div>
<!-- hidden input fields each submitting a value as returned by the corresponding javascript functions -->
</form>
</div>"""
| 46.826772
| 151
| 0.596267
|
ae57387ca255dd09d90695652f0c3c7efe18c799
| 25
|
py
|
Python
|
clearml/version.py
|
H4dr1en/trains
|
642c1130ad1f76db10ed9b8e1a4ff0fd7e45b3cc
|
[
"Apache-2.0"
] | null | null | null |
clearml/version.py
|
H4dr1en/trains
|
642c1130ad1f76db10ed9b8e1a4ff0fd7e45b3cc
|
[
"Apache-2.0"
] | null | null | null |
clearml/version.py
|
H4dr1en/trains
|
642c1130ad1f76db10ed9b8e1a4ff0fd7e45b3cc
|
[
"Apache-2.0"
] | null | null | null |
__version__ = '1.1.5rc3'
| 12.5
| 24
| 0.68
|
330fb60aa85b749ba65c897b6150c15962669c24
| 59,435
|
py
|
Python
|
lynx_code/network.py
|
enkrypter/Lynx-wallet
|
166b7e5810f017a6e12bf96e54b0d44767b2a901
|
[
"MIT"
] | null | null | null |
lynx_code/network.py
|
enkrypter/Lynx-wallet
|
166b7e5810f017a6e12bf96e54b0d44767b2a901
|
[
"MIT"
] | null | null | null |
lynx_code/network.py
|
enkrypter/Lynx-wallet
|
166b7e5810f017a6e12bf96e54b0d44767b2a901
|
[
"MIT"
] | null | null | null |
# Electrum - Lightweight Bitcoin Client
# Copyright (c) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
import queue
import os
import random
import re
from collections import defaultdict
import threading
import socket
import json
import sys
import ipaddress
import asyncio
from typing import NamedTuple, Optional, Sequence, List, Dict, Tuple
import traceback
import dns
import dns.resolver
import aiorpcx
from aiorpcx import TaskGroup
from aiohttp import ClientResponse
from . import util
from .util import (log_exceptions, ignore_exceptions,
bfh, SilentTaskGroup, make_aiohttp_session, send_exception_to_crash_reporter,
is_hash256_str, is_non_negative_integer)
from .bitcoin import COIN
from . import constants
from . import blockchain
from . import bitcoin
from .blockchain import Blockchain, ZC_HEADER_SIZE
from .interface import (Interface, serialize_server, deserialize_server,
RequestTimedOut, NetworkTimeout, BUCKET_NAME_OF_ONION_SERVERS)
from .version import ELECTRUM_VERSION, PROTOCOL_VERSION
from .simple_config import SimpleConfig
from .i18n import _
from .logging import get_logger, Logger
_logger = get_logger(__name__)
NODES_RETRY_INTERVAL = 60
SERVER_RETRY_INTERVAL = 10
NUM_TARGET_CONNECTED_SERVERS = 10
NUM_RECENT_SERVERS = 20
def parse_servers(result: Sequence[Tuple[str, str, List[str]]]) -> Dict[str, dict]:
""" parse servers list into dict format"""
servers = {}
for item in result:
host = item[1]
out = {}
version = None
pruning_level = '-'
if len(item) > 2:
for v in item[2]:
if re.match(r"[st]\d*", v):
protocol, port = v[0], v[1:]
if port == '': port = constants.net.DEFAULT_PORTS[protocol]
out[protocol] = port
elif re.match("v(.?)+", v):
version = v[1:]
elif re.match(r"p\d*", v):
pruning_level = v[1:]
if pruning_level == '': pruning_level = '0'
if out:
out['pruning'] = pruning_level
out['version'] = version
servers[host] = out
return servers
def filter_version(servers):
def is_recent(version):
try:
return util.versiontuple(version) >= util.versiontuple(PROTOCOL_VERSION)
except Exception as e:
return False
return {k: v for k, v in servers.items() if is_recent(v.get('version'))}
def filter_noonion(servers):
return {k: v for k, v in servers.items() if not k.endswith('.onion')}
def filter_protocol(hostmap, protocol='s'):
'''Filters the hostmap for those implementing protocol.
The result is a list in serialized form.'''
eligible = []
for host, portmap in hostmap.items():
port = portmap.get(protocol)
if port:
eligible.append(serialize_server(host, port, protocol))
return eligible
def pick_random_server(hostmap=None, protocol='s', exclude_set=None):
if hostmap is None:
hostmap = constants.net.DEFAULT_SERVERS
if exclude_set is None:
exclude_set = set()
eligible = list(set(filter_protocol(hostmap, protocol)) - exclude_set)
return random.choice(eligible) if eligible else None
class NetworkParameters(NamedTuple):
host: str
port: str
protocol: str
proxy: Optional[dict]
auto_connect: bool
oneserver: bool = False
proxy_modes = ['socks4', 'socks5']
def serialize_proxy(p):
if not isinstance(p, dict):
return None
return ':'.join([p.get('mode'), p.get('host'), p.get('port'),
p.get('user', ''), p.get('password', '')])
def deserialize_proxy(s: str) -> Optional[dict]:
if not isinstance(s, str):
return None
if s.lower() == 'none':
return None
proxy = { "mode":"socks5", "host":"localhost" }
# FIXME raw IPv6 address fails here
args = s.split(':')
n = 0
if proxy_modes.count(args[n]) == 1:
proxy["mode"] = args[n]
n += 1
if len(args) > n:
proxy["host"] = args[n]
n += 1
if len(args) > n:
proxy["port"] = args[n]
n += 1
else:
proxy["port"] = "8080" if proxy["mode"] == "http" else "1080"
if len(args) > n:
proxy["user"] = args[n]
n += 1
if len(args) > n:
proxy["password"] = args[n]
return proxy
class BestEffortRequestFailed(Exception): pass
class TxBroadcastError(Exception):
def get_message_for_gui(self):
raise NotImplementedError()
class TxBroadcastHashMismatch(TxBroadcastError):
def get_message_for_gui(self):
return "{}\n{}\n\n{}" \
.format(_("The server returned an unexpected transaction ID when broadcasting the transaction."),
_("Consider trying to connect to a different server, or updating Lynx."),
str(self))
class TxBroadcastServerReturnedError(TxBroadcastError):
def get_message_for_gui(self):
return "{}\n{}\n\n{}" \
.format(_("The server returned an error when broadcasting the transaction."),
_("Consider trying to connect to a different server, or updating Lynx."),
str(self))
class TxBroadcastUnknownError(TxBroadcastError):
def get_message_for_gui(self):
return "{}\n{}" \
.format(_("Unknown error when broadcasting the transaction."),
_("Consider trying to connect to a different server, or updating Lynx."))
class UntrustedServerReturnedError(Exception):
def __init__(self, *, original_exception):
self.original_exception = original_exception
def __str__(self):
return _("The server returned an error.")
def __repr__(self):
return f"<UntrustedServerReturnedError original_exception: {repr(self.original_exception)}>"
INSTANCE = None
class Network(Logger):
"""The Network class manages a set of connections to remote electrum
servers, each connected socket is handled by an Interface() object.
"""
LOGGING_SHORTCUT = 'n'
def __init__(self, config: SimpleConfig=None):
global INSTANCE
INSTANCE = self
Logger.__init__(self)
self.asyncio_loop = asyncio.get_event_loop()
assert self.asyncio_loop.is_running(), "event loop not running"
self._loop_thread = None # type: threading.Thread # set by caller; only used for sanity checks
if config is None:
config = {} # Do not use mutables as default values!
self.config = SimpleConfig(config) if isinstance(config, dict) else config # type: SimpleConfig
# Autodetect and enable Tor proxy on Network init
self.tor_docs_uri = ('https://github.com/enkrypter/Lynx-wallet'
'blob/%s/docs/tor.md' % ELECTRUM_VERSION)
self.tor_docs_title = 'Tor Setup Docs'
self.tor_docs_uri_qt = ('<br><br><a href="%s">%s</a>' %
(self.tor_docs_uri, self.tor_docs_title))
self.tor_warn_msg = ('Tor proxy is disabled, to enable it read'
' the docs.')
self.tor_auto_on = self.config.get('tor_auto_on', True)
self.tor_detected = self.detect_tor_proxy(self.config.get('proxy'))
if self.tor_auto_on and self.tor_detected:
self.config.set_key('proxy', self.tor_detected, False)
if self.config.get('proxy') and self.tor_detected:
self.tor_on = True
else:
self.tor_on = False
blockchain.read_blockchains(self.config)
self.logger.info(f"blockchains {list(map(lambda b: b.forkpoint, blockchain.blockchains.values()))}")
self._blockchain_preferred_block = self.config.get('blockchain_preferred_block', None) # type: Optional[Dict]
self._blockchain = blockchain.get_best_chain()
# Server for addresses and transactions
self.default_server = self.config.get('server', None)
# Sanitize default server
if self.default_server:
try:
deserialize_server(self.default_server)
except:
self.logger.warning('failed to parse server-string; falling back to random.')
self.default_server = None
if not self.default_server:
self.default_server = pick_random_server()
self.main_taskgroup = None # type: TaskGroup
# locks
self.restart_lock = asyncio.Lock()
self.bhi_lock = asyncio.Lock()
self.callback_lock = threading.Lock()
self.recent_servers_lock = threading.RLock() # <- re-entrant
self.interfaces_lock = threading.Lock() # for mutating/iterating self.interfaces
self.server_peers = {} # returned by interface (servers that the main interface knows about)
self.recent_servers = self._read_recent_servers() # note: needs self.recent_servers_lock
self.banner = ''
self.donation_address = ''
self.relay_fee = None # type: Optional[int]
# callbacks set by the GUI
self.callbacks = defaultdict(list) # note: needs self.callback_lock
dir_path = os.path.join(self.config.path, 'certs')
util.make_dir(dir_path)
# retry times
self.server_retry_time = time.time()
self.nodes_retry_time = time.time()
# the main server we are currently communicating with
self.interface = None # type: Interface
# set of servers we have an ongoing connection with
self.interfaces = {} # type: Dict[str, Interface]
self.auto_connect = self.config.get('auto_connect', True)
self.connecting = set()
self.server_queue = None
self.proxy = None
# Dump network messages (all interfaces). Set at runtime from the console.
self.debug = False
self._set_status('disconnected')
def run_from_another_thread(self, coro):
assert self._loop_thread != threading.current_thread(), 'must not be called from network thread'
fut = asyncio.run_coroutine_threadsafe(coro, self.asyncio_loop)
return fut.result()
@staticmethod
def get_instance() -> Optional["Network"]:
return INSTANCE
def with_recent_servers_lock(func):
def func_wrapper(self, *args, **kwargs):
with self.recent_servers_lock:
return func(self, *args, **kwargs)
return func_wrapper
def register_callback(self, callback, events):
with self.callback_lock:
for event in events:
self.callbacks[event].append(callback)
def unregister_callback(self, callback):
with self.callback_lock:
for callbacks in self.callbacks.values():
if callback in callbacks:
callbacks.remove(callback)
def trigger_callback(self, event, *args):
with self.callback_lock:
callbacks = self.callbacks[event][:]
for callback in callbacks:
# FIXME: if callback throws, we will lose the traceback
if asyncio.iscoroutinefunction(callback):
asyncio.run_coroutine_threadsafe(callback(event, *args), self.asyncio_loop)
else:
self.asyncio_loop.call_soon_threadsafe(callback, event, *args)
def _read_recent_servers(self):
if not self.config.path:
return []
path = os.path.join(self.config.path, "recent_servers")
try:
with open(path, "r", encoding='utf-8') as f:
data = f.read()
return json.loads(data)
except:
return []
@with_recent_servers_lock
def _save_recent_servers(self):
if not self.config.path:
return
path = os.path.join(self.config.path, "recent_servers")
s = json.dumps(self.recent_servers, indent=4, sort_keys=True)
try:
with open(path, "w", encoding='utf-8') as f:
f.write(s)
except:
pass
def get_server_height(self):
interface = self.interface
return interface.tip if interface else 0
async def _server_is_lagging(self):
sh = self.get_server_height()
if not sh:
self.logger.info('no height for main interface')
return True
lh = self.get_local_height()
result = (lh - sh) > 1
if result:
self.logger.info(f'{self.default_server} is lagging ({sh} vs {lh})')
return result
def _set_status(self, status):
self.connection_status = status
self.notify('status')
def is_connected(self):
interface = self.interface
return interface is not None and interface.ready.done()
def is_connecting(self):
return self.connection_status == 'connecting'
async def _request_server_info(self, interface):
await interface.ready
session = interface.session
async def get_banner():
self.banner = await session.send_request('server.banner')
self.notify('banner')
async def get_donation_address():
addr = await session.send_request('server.donation_address')
if not bitcoin.is_address(addr):
if addr: # ignore empty string
self.logger.info(f"invalid donation address from server: {repr(addr)}")
addr = ''
self.donation_address = addr
async def get_server_peers():
server_peers = await session.send_request('server.peers.subscribe')
random.shuffle(server_peers)
max_accepted_peers = len(constants.net.DEFAULT_SERVERS) + NUM_RECENT_SERVERS
server_peers = server_peers[:max_accepted_peers]
self.server_peers = parse_servers(server_peers)
self.notify('servers')
async def get_relay_fee():
relayfee = await session.send_request('blockchain.relayfee')
if relayfee is None:
self.relay_fee = None
else:
relayfee = int(relayfee * COIN)
self.relay_fee = max(0, relayfee)
async with TaskGroup() as group:
await group.spawn(get_banner)
await group.spawn(get_donation_address)
await group.spawn(get_server_peers)
await group.spawn(get_relay_fee)
await group.spawn(self._request_fee_estimates(interface))
async def _request_fee_estimates(self, interface):
session = interface.session
from .simple_config import FEE_ETA_TARGETS
self.config.requested_fee_estimates()
async with TaskGroup() as group:
histogram_task = await group.spawn(session.send_request('mempool.get_fee_histogram'))
fee_tasks = []
for i in FEE_ETA_TARGETS:
fee_tasks.append((i, await group.spawn(session.send_request('blockchain.estimatefee', [i]))))
self.config.mempool_fees = histogram = histogram_task.result()
self.logger.info(f'fee_histogram {histogram}')
self.notify('fee_histogram')
fee_estimates_eta = {}
for nblock_target, task in fee_tasks:
fee = int(task.result() * COIN)
fee_estimates_eta[nblock_target] = fee
if fee < 0: continue
self.config.update_fee_estimates(nblock_target, fee)
self.logger.info(f'fee_estimates {fee_estimates_eta}')
self.notify('fee')
def get_status_value(self, key):
if key == 'status':
value = self.connection_status
elif key == 'banner':
value = self.banner
elif key == 'fee':
value = self.config.fee_estimates
elif key == 'fee_histogram':
value = self.config.mempool_fees
elif key == 'servers':
value = self.get_servers()
else:
raise Exception('unexpected trigger key {}'.format(key))
return value
def notify(self, key):
if key in ['status', 'updated']:
self.trigger_callback(key)
else:
self.trigger_callback(key, self.get_status_value(key))
def get_parameters(self) -> NetworkParameters:
host, port, protocol = deserialize_server(self.default_server)
return NetworkParameters(host=host,
port=port,
protocol=protocol,
proxy=self.proxy,
auto_connect=self.auto_connect,
oneserver=self.oneserver)
def get_donation_address(self):
if self.is_connected():
return self.donation_address
def get_interfaces(self) -> List[str]:
"""The list of servers for the connected interfaces."""
with self.interfaces_lock:
return list(self.interfaces)
@with_recent_servers_lock
def get_servers(self):
# start with hardcoded servers
out = dict(constants.net.DEFAULT_SERVERS) # copy
# add recent servers
for s in self.recent_servers:
try:
host, port, protocol = deserialize_server(s)
except:
continue
if host not in out:
out[host] = {protocol: port}
# add servers received from main interface
server_peers = self.server_peers
if server_peers:
out.update(filter_version(server_peers.copy()))
# potentially filter out some
if self.config.get('noonion'):
out = filter_noonion(out)
return out
def _start_interface(self, server: str):
if server not in self.interfaces and server not in self.connecting:
if server == self.default_server:
self.logger.info(f"connecting to {server} as new interface")
self._set_status('connecting')
self.connecting.add(server)
self.server_queue.put(server)
def _start_random_interface(self):
with self.interfaces_lock:
exclude_set = self.disconnected_servers | set(self.interfaces) | self.connecting
server = pick_random_server(self.get_servers(), self.protocol, exclude_set)
if server:
self._start_interface(server)
return server
def _set_proxy(self, proxy: Optional[dict]):
self.proxy = proxy
# Store these somewhere so we can un-monkey-patch
if not hasattr(socket, "_getaddrinfo"):
socket._getaddrinfo = socket.getaddrinfo
if proxy:
self.logger.info(f'setting proxy {proxy}')
# prevent dns leaks, see http://stackoverflow.com/questions/13184205/dns-over-proxy
socket.getaddrinfo = lambda *args: [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]
else:
if sys.platform == 'win32':
# On Windows, socket.getaddrinfo takes a mutex, and might hold it for up to 10 seconds
# when dns-resolving. To speed it up drastically, we resolve dns ourselves, outside that lock.
# see #4421
socket.getaddrinfo = self._fast_getaddrinfo
else:
socket.getaddrinfo = socket._getaddrinfo
self.trigger_callback('proxy_set', self.proxy)
@staticmethod
def _fast_getaddrinfo(host, *args, **kwargs):
def needs_dns_resolving(host):
try:
ipaddress.ip_address(host)
return False # already valid IP
except ValueError:
pass # not an IP
if str(host) in ('localhost', 'localhost.',):
return False
return True
def resolve_with_dnspython(host):
addrs = []
# try IPv6
try:
answers = dns.resolver.query(host, dns.rdatatype.AAAA)
addrs += [str(answer) for answer in answers]
except dns.exception.DNSException as e:
pass
except BaseException as e:
_logger.info(f'dnspython failed to resolve dns (AAAA) with error: {e}')
# try IPv4
try:
answers = dns.resolver.query(host, dns.rdatatype.A)
addrs += [str(answer) for answer in answers]
except dns.exception.DNSException as e:
# dns failed for some reason, e.g. dns.resolver.NXDOMAIN this is normal.
# Simply report back failure; except if we already have some results.
if not addrs:
raise socket.gaierror(11001, 'getaddrinfo failed') from e
except BaseException as e:
# Possibly internal error in dnspython :( see #4483
_logger.info(f'dnspython failed to resolve dns (A) with error: {e}')
if addrs:
return addrs
# Fall back to original socket.getaddrinfo to resolve dns.
return [host]
addrs = [host]
if needs_dns_resolving(host):
addrs = resolve_with_dnspython(host)
list_of_list_of_socketinfos = [socket._getaddrinfo(addr, *args, **kwargs) for addr in addrs]
list_of_socketinfos = [item for lst in list_of_list_of_socketinfos for item in lst]
return list_of_socketinfos
@log_exceptions
async def set_parameters(self, net_params: NetworkParameters):
proxy = net_params.proxy
proxy_str = serialize_proxy(proxy)
host, port, protocol = net_params.host, net_params.port, net_params.protocol
server_str = serialize_server(host, port, protocol)
# sanitize parameters
try:
deserialize_server(serialize_server(host, port, protocol))
if proxy:
proxy_modes.index(proxy['mode']) + 1
int(proxy['port'])
except:
return
self.config.set_key('auto_connect', net_params.auto_connect, False)
self.config.set_key('oneserver', net_params.oneserver, False)
self.config.set_key('proxy', proxy_str, False)
self.config.set_key('server', server_str, True)
# abort if changes were not allowed by config
if self.config.get('server') != server_str \
or self.config.get('proxy') != proxy_str \
or self.config.get('oneserver') != net_params.oneserver:
return
async with self.restart_lock:
self.auto_connect = net_params.auto_connect
if self.proxy != proxy or self.protocol != protocol or self.oneserver != net_params.oneserver:
# Restart the network defaulting to the given server
await self._stop()
self.default_server = server_str
await self._start()
elif self.default_server != server_str:
await self.switch_to_interface(server_str)
else:
await self.switch_lagging_interface()
def _set_oneserver(self, oneserver: bool):
self.num_server = NUM_TARGET_CONNECTED_SERVERS if not oneserver else 0
self.oneserver = bool(oneserver)
async def _switch_to_random_interface(self):
'''Switch to a random connected server other than the current one'''
servers = self.get_interfaces() # Those in connected state
if self.default_server in servers:
servers.remove(self.default_server)
if servers:
await self.switch_to_interface(random.choice(servers))
async def switch_lagging_interface(self):
'''If auto_connect and lagging, switch interface'''
if self.auto_connect and await self._server_is_lagging():
# switch to one that has the correct header (not height)
best_header = self.blockchain().read_header(self.get_local_height())
with self.interfaces_lock: interfaces = list(self.interfaces.values())
filtered = list(filter(lambda iface: iface.tip_header == best_header, interfaces))
if filtered:
chosen_iface = random.choice(filtered)
await self.switch_to_interface(chosen_iface.server)
async def switch_unwanted_fork_interface(self):
"""If auto_connect and main interface is not on preferred fork,
try to switch to preferred fork.
"""
if not self.auto_connect or not self.interface:
return
with self.interfaces_lock: interfaces = list(self.interfaces.values())
# try to switch to preferred fork
if self._blockchain_preferred_block:
pref_height = self._blockchain_preferred_block['height']
pref_hash = self._blockchain_preferred_block['hash']
if self.interface.blockchain.check_hash(pref_height, pref_hash):
return # already on preferred fork
filtered = list(filter(lambda iface: iface.blockchain.check_hash(pref_height, pref_hash),
interfaces))
if filtered:
self.logger.info("switching to preferred fork")
chosen_iface = random.choice(filtered)
await self.switch_to_interface(chosen_iface.server)
return
else:
self.logger.info("tried to switch to preferred fork but no interfaces are on it")
# try to switch to best chain
if self.blockchain().parent is None:
return # already on best chain
filtered = list(filter(lambda iface: iface.blockchain.parent is None,
interfaces))
if filtered:
self.logger.info("switching to best chain")
chosen_iface = random.choice(filtered)
await self.switch_to_interface(chosen_iface.server)
else:
# FIXME switch to best available?
self.logger.info("tried to switch to best chain but no interfaces are on it")
async def switch_to_interface(self, server: str):
"""Switch to server as our main interface. If no connection exists,
queue interface to be started. The actual switch will
happen when the interface becomes ready.
"""
self.default_server = server
old_interface = self.interface
old_server = old_interface.server if old_interface else None
# Stop any current interface in order to terminate subscriptions,
# and to cancel tasks in interface.group.
# However, for headers sub, give preference to this interface
# over unknown ones, i.e. start it again right away.
if old_server and old_server != server:
await self._close_interface(old_interface)
if len(self.interfaces) <= self.num_server:
self._start_interface(old_server)
if server not in self.interfaces:
self.interface = None
self._start_interface(server)
return
i = self.interfaces[server]
if old_interface != i:
self.logger.info(f"switching to {server}")
blockchain_updated = i.blockchain != self.blockchain()
self.interface = i
await i.group.spawn(self._request_server_info(i))
self.trigger_callback('default_server_changed')
self._set_status('connected')
self.trigger_callback('network_updated')
if blockchain_updated: self.trigger_callback('blockchain_updated')
async def _close_interface(self, interface):
if interface:
with self.interfaces_lock:
if self.interfaces.get(interface.server) == interface:
self.interfaces.pop(interface.server)
if interface.server == self.default_server:
self.interface = None
await interface.close()
@with_recent_servers_lock
def _add_recent_server(self, server):
# list is ordered
if server in self.recent_servers:
self.recent_servers.remove(server)
self.recent_servers.insert(0, server)
self.recent_servers = self.recent_servers[:NUM_RECENT_SERVERS]
self._save_recent_servers()
async def connection_down(self, interface: Interface):
'''A connection to server either went down, or was never made.
We distinguish by whether it is in self.interfaces.'''
if not interface: return
server = interface.server
self.disconnected_servers.add(server)
if server == self.default_server:
self._set_status('disconnected')
await self._close_interface(interface)
self.trigger_callback('network_updated')
def get_network_timeout_seconds(self, request_type=NetworkTimeout.Generic) -> int:
if self.oneserver and not self.auto_connect:
return request_type.MOST_RELAXED
if self.proxy:
return request_type.RELAXED
return request_type.NORMAL
@ignore_exceptions # do not kill main_taskgroup
@log_exceptions
async def _run_new_interface(self, server):
interface = Interface(self, server, self.proxy)
# note: using longer timeouts here as DNS can sometimes be slow!
timeout = self.get_network_timeout_seconds(NetworkTimeout.Generic)
try:
await asyncio.wait_for(interface.ready, timeout)
except BaseException as e:
self.logger.info(f"couldn't launch iface {server} -- {repr(e)}")
await interface.close()
return
else:
with self.interfaces_lock:
assert server not in self.interfaces
self.interfaces[server] = interface
finally:
try: self.connecting.remove(server)
except KeyError: pass
if server == self.default_server:
await self.switch_to_interface(server)
self._add_recent_server(server)
self.trigger_callback('network_updated')
def check_interface_against_healthy_spread_of_connected_servers(self, iface_to_check) -> bool:
# main interface is exempt. this makes switching servers easier
if iface_to_check.is_main_server():
return True
if not iface_to_check.bucket_based_on_ipaddress():
return True
# bucket connected interfaces
with self.interfaces_lock:
interfaces = list(self.interfaces.values())
if iface_to_check in interfaces:
interfaces.remove(iface_to_check)
buckets = defaultdict(list)
for iface in interfaces:
buckets[iface.bucket_based_on_ipaddress()].append(iface)
# check proposed server against buckets
onion_servers = buckets[BUCKET_NAME_OF_ONION_SERVERS]
if iface_to_check.is_tor():
# keep number of onion servers below half of all connected servers
if len(onion_servers) > NUM_TARGET_CONNECTED_SERVERS // 2:
return False
else:
bucket = iface_to_check.bucket_based_on_ipaddress()
if len(buckets[bucket]) > 1:
return False
return True
async def _init_headers_file(self):
b = blockchain.get_best_chain()
filename = b.path()
len_checkpoints = len(constants.net.CHECKPOINTS)
length = ZC_HEADER_SIZE * len_checkpoints * 2016
if not os.path.exists(filename) or os.path.getsize(filename) < length:
with open(filename, 'wb') as f:
for i in range(len_checkpoints):
for height, header_data in b.checkpoints[i][2]:
f.seek(height*ZC_HEADER_SIZE)
bin_header = util.bfh(header_data).ljust(ZC_HEADER_SIZE, util.bfh("00"))
f.write(bin_header)
util.ensure_sparse_file(filename)
with b.lock:
b.update_size()
def best_effort_reliable(func):
async def make_reliable_wrapper(self, *args, **kwargs):
for i in range(10):
iface = self.interface
# retry until there is a main interface
if not iface:
await asyncio.sleep(0.1)
continue # try again
# wait for it to be usable
iface_ready = iface.ready
iface_disconnected = iface.got_disconnected
await asyncio.wait([iface_ready, iface_disconnected], return_when=asyncio.FIRST_COMPLETED)
if not iface_ready.done() or iface_ready.cancelled():
await asyncio.sleep(0.1)
continue # try again
# try actual request
success_fut = asyncio.ensure_future(func(self, *args, **kwargs))
await asyncio.wait([success_fut, iface_disconnected], return_when=asyncio.FIRST_COMPLETED)
if success_fut.done() and not success_fut.cancelled():
if success_fut.exception():
try:
raise success_fut.exception()
except RequestTimedOut:
await iface.close()
await iface_disconnected
continue # try again
return success_fut.result()
# otherwise; try again
raise BestEffortRequestFailed('no interface to do request on... gave up.')
return make_reliable_wrapper
def catch_server_exceptions(func):
async def wrapper(self, *args, **kwargs):
try:
return await func(self, *args, **kwargs)
except aiorpcx.jsonrpc.CodeMessageError as e:
raise UntrustedServerReturnedError(original_exception=e) from e
return wrapper
@best_effort_reliable
@catch_server_exceptions
async def get_merkle_for_transaction(self, tx_hash: str, tx_height: int) -> dict:
if not is_hash256_str(tx_hash):
raise Exception(f"{repr(tx_hash)} is not a txid")
if not is_non_negative_integer(tx_height):
raise Exception(f"{repr(tx_height)} is not a block height")
return await self.interface.session.send_request('blockchain.transaction.get_merkle', [tx_hash, tx_height])
@best_effort_reliable
async def broadcast_transaction(self, tx, *, timeout=None) -> None:
if timeout is None:
timeout = self.get_network_timeout_seconds(NetworkTimeout.Urgent)
try:
out = await self.interface.session.send_request('blockchain.transaction.broadcast', [str(tx)], timeout=timeout)
# note: both 'out' and exception messages are untrusted input from the server
except (RequestTimedOut, asyncio.CancelledError, asyncio.TimeoutError):
raise # pass-through
except aiorpcx.jsonrpc.CodeMessageError as e:
self.logger.info(f"broadcast_transaction error [DO NOT TRUST THIS MESSAGE]: {repr(e)}")
raise TxBroadcastServerReturnedError(self.sanitize_tx_broadcast_response(e.message)) from e
except BaseException as e: # intentional BaseException for sanity!
self.logger.info(f"broadcast_transaction error2 [DO NOT TRUST THIS MESSAGE]: {repr(e)}")
send_exception_to_crash_reporter(e)
raise TxBroadcastUnknownError() from e
if out != tx.txid():
self.logger.info(f"unexpected txid for broadcast_transaction [DO NOT TRUST THIS MESSAGE]: {out} != {tx.txid()}")
raise TxBroadcastHashMismatch(_("Server returned unexpected transaction ID."))
@staticmethod
def sanitize_tx_broadcast_response(server_msg) -> str:
# Unfortunately, bitcoind and hence the Electrum protocol doesn't return a useful error code.
# So, we use substring matching to grok the error message.
# server_msg is untrusted input so it should not be shown to the user. see #4968
server_msg = str(server_msg)
server_msg = server_msg.replace("\n", r"\n")
# https://github.com/bitcoin/bitcoin/blob/cd42553b1178a48a16017eff0b70669c84c3895c/src/policy/policy.cpp
# grep "reason ="
policy_error_messages = {
r"version": _("Transaction uses non-standard version."),
r"tx-size": _("The transaction was rejected because it is too large (in bytes)."),
r"scriptsig-size": None,
r"scriptsig-not-pushonly": None,
r"scriptpubkey": None,
r"bare-multisig": None,
r"dust": _("Transaction could not be broadcast due to dust outputs."),
r"multi-op-return": _("The transaction was rejected because it contains multiple OP_RETURN outputs."),
}
for substring in policy_error_messages:
if substring in server_msg:
msg = policy_error_messages[substring]
return msg if msg else substring
# https://github.com/bitcoin/bitcoin/blob/cd42553b1178a48a16017eff0b70669c84c3895c/src/script/script_error.cpp
script_error_messages = {
r"Script evaluated without error but finished with a false/empty top stack element",
r"Script failed an OP_VERIFY operation",
r"Script failed an OP_EQUALVERIFY operation",
r"Script failed an OP_CHECKMULTISIGVERIFY operation",
r"Script failed an OP_CHECKSIGVERIFY operation",
r"Script failed an OP_NUMEQUALVERIFY operation",
r"Script is too big",
r"Push value size limit exceeded",
r"Operation limit exceeded",
r"Stack size limit exceeded",
r"Signature count negative or greater than pubkey count",
r"Pubkey count negative or limit exceeded",
r"Opcode missing or not understood",
r"Attempted to use a disabled opcode",
r"Operation not valid with the current stack size",
r"Operation not valid with the current altstack size",
r"OP_RETURN was encountered",
r"Invalid OP_IF construction",
r"Negative locktime",
r"Locktime requirement not satisfied",
r"Signature hash type missing or not understood",
r"Non-canonical DER signature",
r"Data push larger than necessary",
r"Only non-push operators allowed in signatures",
r"Non-canonical signature: S value is unnecessarily high",
r"Dummy CHECKMULTISIG argument must be zero",
r"OP_IF/NOTIF argument must be minimal",
r"Signature must be zero for failed CHECK(MULTI)SIG operation",
r"NOPx reserved for soft-fork upgrades",
r"Public key is neither compressed or uncompressed",
r"Extra items left on stack after execution",
r"Signature is found in scriptCode",
}
for substring in script_error_messages:
if substring in server_msg:
return substring
# https://github.com/bitcoin/bitcoin/blob/cd42553b1178a48a16017eff0b70669c84c3895c/src/validation.cpp
# grep "REJECT_"
# should come after script_error.cpp (due to e.g. non-mandatory-script-verify-flag)
validation_error_messages = {
r"coinbase",
r"tx-size-small",
r"non-final",
r"txn-already-in-mempool",
r"txn-mempool-conflict",
r"txn-already-known",
r"non-BIP68-final",
r"bad-txns-nonstandard-inputs",
r"bad-txns-too-many-sigops",
r"mempool min fee not met",
r"min relay fee not met",
r"absurdly-high-fee",
r"too-long-mempool-chain",
r"bad-txns-spends-conflicting-tx",
r"insufficient fee",
r"too many potential replacements",
r"replacement-adds-unconfirmed",
r"mempool full",
r"non-mandatory-script-verify-flag",
r"mandatory-script-verify-flag-failed",
}
for substring in validation_error_messages:
if substring in server_msg:
return substring
# https://github.com/bitcoin/bitcoin/blob/cd42553b1178a48a16017eff0b70669c84c3895c/src/rpc/rawtransaction.cpp
# grep "RPC_TRANSACTION"
# grep "RPC_DESERIALIZATION_ERROR"
# https://github.com/bitcoin/bitcoin/blob/d7d7d315060620446bd363ca50f95f79d3260db7/src/util/error.cpp
rawtransaction_error_messages = {
r"Missing inputs",
r"transaction already in block chain",
r"Transaction already in block chain",
r"TX decode failed",
r"Peer-to-peer functionality missing or disabled",
r"Transaction rejected by AcceptToMemoryPool",
r"AcceptToMemoryPool failed",
}
for substring in rawtransaction_error_messages:
if substring in server_msg:
return substring
# https://github.com/bitcoin/bitcoin/blob/cd42553b1178a48a16017eff0b70669c84c3895c/src/consensus/tx_verify.cpp
# grep "REJECT_"
tx_verify_error_messages = {
r"bad-txns-vin-empty",
r"bad-txns-vout-empty",
r"bad-txns-oversize",
r"bad-txns-vout-negative",
r"bad-txns-vout-toolarge",
r"bad-txns-txouttotal-toolarge",
r"bad-txns-inputs-duplicate",
r"bad-cb-length",
r"bad-txns-prevout-null",
r"bad-txns-inputs-missingorspent",
r"bad-txns-premature-spend-of-coinbase",
r"bad-txns-inputvalues-outofrange",
r"bad-txns-in-belowout",
r"bad-txns-fee-outofrange",
}
for substring in tx_verify_error_messages:
if substring in server_msg:
return substring
# AUDAXd v0.13.1 specific errors
audaxd_specific_error_messages = {
r"bad-qc-not-allowed",
r"bad-qc-missing",
r"bad-qc-block",
r"bad-qc-invalid-null",
r"bad-qc-dup",
r"bad-qc-height",
r"bad-qc-invalid",
r"bad-tx-payload",
r"bad-qc-dup",
r"bad-qc-premature",
r"bad-qc-version",
r"bad-qc-quorum-hash",
r"bad-qc-type",
r"bad-tx-type",
r"bad-tx-type-check",
r"bad-tx-type-proc",
r"bad-cbtx-type",
r"bad-cbtx-invalid",
r"bad-cbtx-payload",
r"bad-cbtx-version",
r"bad-cbtx-height",
r"bad-cbtx-mnmerkleroot",
r"bad-txns-payload-oversize",
r"bad-txns-type",
r"bad-txns-cb-type",
r"qc-not-allowed",
r"bad-txlockrequest",
r"tx-txlock-conflict",
r"tx-txlockreq-mempool-conflict",
r"txlockreq-tx-mempool-conflict",
r"mempool min fee not met",
r"insufficient priority",
r"rate limited free transaction",
r"bad-txns-fee-negative",
r"bad-txns-BIP30",
r"bad-sb-start",
r"bad-blk-sigops",
r"bad-txns-nonfinal",
r"bad-cb-amount",
r"bad-cb-payee",
r"high-hash",
r"devnet-genesis",
r"bad-txnmrklroot",
r"bad-txns-duplicate",
r"bad-blk-length",
r"bad-cb-missing",
r"bad-cb-multiple",
r"conflict-tx-lock",
r"forked chain older than last checkpoint",
r"incorrect proof of work (DGW pre-fd-diffbitsork)",
r"bad-diffbits",
r"time-too-old",
r"time-too-new",
r"bad-cb-height",
r"bad-cb-type",
r"bad-prevblk",
r"Inputs unavailable",
r"Transaction check failed",
r"bad-version",
}
for substring in audaxd_specific_error_messages:
if substring in server_msg:
return substring
# otherwise:
return _("Unknown error")
@best_effort_reliable
@catch_server_exceptions
async def request_chunk(self, height: int, tip=None, *, can_return_early=False):
if not is_non_negative_integer(height):
raise Exception(f"{repr(height)} is not a block height")
return await self.interface.request_chunk(height, tip=tip, can_return_early=can_return_early)
@best_effort_reliable
@catch_server_exceptions
async def get_transaction(self, tx_hash: str, *, timeout=None) -> str:
if not is_hash256_str(tx_hash):
raise Exception(f"{repr(tx_hash)} is not a txid")
return await self.interface.session.send_request('blockchain.transaction.get', [tx_hash],
timeout=timeout)
@best_effort_reliable
@catch_server_exceptions
async def get_history_for_scripthash(self, sh: str) -> List[dict]:
if not is_hash256_str(sh):
raise Exception(f"{repr(sh)} is not a scripthash")
return await self.interface.session.send_request('blockchain.scripthash.get_history', [sh])
@best_effort_reliable
@catch_server_exceptions
async def listunspent_for_scripthash(self, sh: str) -> List[dict]:
if not is_hash256_str(sh):
raise Exception(f"{repr(sh)} is not a scripthash")
return await self.interface.session.send_request('blockchain.scripthash.listunspent', [sh])
@best_effort_reliable
@catch_server_exceptions
async def get_balance_for_scripthash(self, sh: str) -> dict:
if not is_hash256_str(sh):
raise Exception(f"{repr(sh)} is not a scripthash")
return await self.interface.session.send_request('blockchain.scripthash.get_balance', [sh])
def blockchain(self) -> Blockchain:
interface = self.interface
if interface and interface.blockchain is not None:
self._blockchain = interface.blockchain
return self._blockchain
def get_blockchains(self):
out = {} # blockchain_id -> list(interfaces)
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
with self.interfaces_lock: interfaces_values = list(self.interfaces.values())
for chain_id, bc in blockchain_items:
r = list(filter(lambda i: i.blockchain==bc, interfaces_values))
if r:
out[chain_id] = r
return out
def _set_preferred_chain(self, chain: Blockchain):
height = chain.get_max_forkpoint()
header_hash = chain.get_hash(height)
self._blockchain_preferred_block = {
'height': height,
'hash': header_hash,
}
self.config.set_key('blockchain_preferred_block', self._blockchain_preferred_block)
async def follow_chain_given_id(self, chain_id: str) -> None:
bc = blockchain.blockchains.get(chain_id)
if not bc:
raise Exception('blockchain {} not found'.format(chain_id))
self._set_preferred_chain(bc)
# select server on this chain
with self.interfaces_lock: interfaces = list(self.interfaces.values())
interfaces_on_selected_chain = list(filter(lambda iface: iface.blockchain == bc, interfaces))
if len(interfaces_on_selected_chain) == 0: return
chosen_iface = random.choice(interfaces_on_selected_chain)
# switch to server (and save to config)
net_params = self.get_parameters()
host, port, protocol = deserialize_server(chosen_iface.server)
net_params = net_params._replace(host=host, port=port, protocol=protocol)
await self.set_parameters(net_params)
async def follow_chain_given_server(self, server_str: str) -> None:
# note that server_str should correspond to a connected interface
iface = self.interfaces.get(server_str)
if iface is None:
return
self._set_preferred_chain(iface.blockchain)
# switch to server (and save to config)
net_params = self.get_parameters()
host, port, protocol = deserialize_server(server_str)
net_params = net_params._replace(host=host, port=port, protocol=protocol)
await self.set_parameters(net_params)
def get_local_height(self):
return self.blockchain().height()
def export_checkpoints(self, path):
"""Run manually to generate blockchain checkpoints.
Kept for console use only.
"""
cp = self.blockchain().get_checkpoints()
with open(path, 'w', encoding='utf-8') as f:
f.write(json.dumps(cp, indent=4))
async def _start(self):
assert not self.main_taskgroup
self.main_taskgroup = main_taskgroup = SilentTaskGroup()
assert not self.interface and not self.interfaces
assert not self.connecting and not self.server_queue
self.logger.info('starting network')
self.disconnected_servers = set([])
self.protocol = deserialize_server(self.default_server)[2]
self.server_queue = queue.Queue()
self._set_proxy(deserialize_proxy(self.config.get('proxy')))
self._set_oneserver(self.config.get('oneserver', False))
self._start_interface(self.default_server)
async def main():
try:
await self._init_headers_file()
# note: if a task finishes with CancelledError, that
# will NOT raise, and the group will keep the other tasks running
async with main_taskgroup as group:
await group.spawn(self._maintain_sessions())
[await group.spawn(job) for job in self._jobs]
except Exception as e:
self.logger.exception('')
raise e
asyncio.run_coroutine_threadsafe(main(), self.asyncio_loop)
self.trigger_callback('network_updated')
def start(self, jobs: List=None):
self._jobs = jobs or []
asyncio.run_coroutine_threadsafe(self._start(), self.asyncio_loop)
@log_exceptions
async def _stop(self, full_shutdown=False):
self.logger.info("stopping network")
try:
await asyncio.wait_for(self.main_taskgroup.cancel_remaining(), timeout=2)
except (asyncio.TimeoutError, asyncio.CancelledError) as e:
self.logger.info(f"exc during main_taskgroup cancellation: {repr(e)}")
self.main_taskgroup = None # type: TaskGroup
self.interface = None # type: Interface
self.interfaces = {} # type: Dict[str, Interface]
self.connecting.clear()
self.server_queue = None
if not full_shutdown:
self.trigger_callback('network_updated')
def stop(self):
assert self._loop_thread != threading.current_thread(), 'must not be called from network thread'
fut = asyncio.run_coroutine_threadsafe(self._stop(full_shutdown=True), self.asyncio_loop)
try:
fut.result(timeout=2)
except (asyncio.TimeoutError, asyncio.CancelledError): pass
async def _ensure_there_is_a_main_interface(self):
if self.is_connected():
return
now = time.time()
# if auto_connect is set, try a different server
if self.auto_connect and not self.is_connecting():
await self._switch_to_random_interface()
# if auto_connect is not set, or still no main interface, retry current
if not self.is_connected() and not self.is_connecting():
if self.default_server in self.disconnected_servers:
if now - self.server_retry_time > SERVER_RETRY_INTERVAL:
self.disconnected_servers.remove(self.default_server)
self.server_retry_time = now
else:
await self.switch_to_interface(self.default_server)
async def _maintain_sessions(self):
async def launch_already_queued_up_new_interfaces():
while self.server_queue.qsize() > 0:
server = self.server_queue.get()
await self.main_taskgroup.spawn(self._run_new_interface(server))
async def maybe_queue_new_interfaces_to_be_launched_later():
now = time.time()
for i in range(self.num_server - len(self.interfaces) - len(self.connecting)):
# FIXME this should try to honour "healthy spread of connected servers"
self._start_random_interface()
if now - self.nodes_retry_time > NODES_RETRY_INTERVAL:
self.logger.info('network: retrying connections')
self.disconnected_servers = set([])
self.nodes_retry_time = now
async def maintain_healthy_spread_of_connected_servers():
with self.interfaces_lock: interfaces = list(self.interfaces.values())
random.shuffle(interfaces)
for iface in interfaces:
if not self.check_interface_against_healthy_spread_of_connected_servers(iface):
self.logger.info(f"disconnecting from {iface.server}. too many connected "
f"servers already in bucket {iface.bucket_based_on_ipaddress()}")
await self._close_interface(iface)
async def maintain_main_interface():
await self._ensure_there_is_a_main_interface()
if self.is_connected():
if self.config.is_fee_estimates_update_required():
await self.interface.group.spawn(self._request_fee_estimates, self.interface)
while True:
try:
await launch_already_queued_up_new_interfaces()
await maybe_queue_new_interfaces_to_be_launched_later()
await maintain_healthy_spread_of_connected_servers()
await maintain_main_interface()
except asyncio.CancelledError:
# suppress spurious cancellations
group = self.main_taskgroup
if not group or group._closed:
raise
await asyncio.sleep(0.1)
@classmethod
async def _send_http_on_proxy(cls, method: str, url: str, params: str = None,
body: bytes = None, json: dict = None, headers=None,
on_finish=None, timeout=None):
async def default_on_finish(resp: ClientResponse):
resp.raise_for_status()
return await resp.text()
if headers is None:
headers = {}
if on_finish is None:
on_finish = default_on_finish
network = cls.get_instance()
proxy = network.proxy if network else None
async with make_aiohttp_session(proxy, timeout=timeout) as session:
if method == 'get':
async with session.get(url, params=params, headers=headers) as resp:
return await on_finish(resp)
elif method == 'post':
assert body is not None or json is not None, 'body or json must be supplied if method is post'
if body is not None:
async with session.post(url, data=body, headers=headers) as resp:
return await on_finish(resp)
elif json is not None:
async with session.post(url, json=json, headers=headers) as resp:
return await on_finish(resp)
else:
assert False
@classmethod
def send_http_on_proxy(cls, method, url, **kwargs):
network = cls.get_instance()
if network:
assert network._loop_thread is not threading.currentThread()
loop = network.asyncio_loop
else:
loop = asyncio.get_event_loop()
coro = asyncio.run_coroutine_threadsafe(cls._send_http_on_proxy(method, url, **kwargs), loop)
# note: _send_http_on_proxy has its own timeout, so no timeout here:
return coro.result()
@classmethod
def detect_tor_proxy(cls, proxy=None):
detected = None
tor_ip = '127.0.0.1'
tor_ports = [9050, 9150]
proxies = [('socks5', tor_ip, p) for p in tor_ports]
if proxy:
try:
psplit = proxy.split(':')[:3]
proxies.insert(0, (psplit[0], psplit[1], int(psplit[2])))
except:
pass
if hasattr(socket, "_socketobject"):
s = socket._socketobject(socket.AF_INET, socket.SOCK_STREAM)
else:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.1)
for p in proxies:
try:
s.connect(p[1:])
# Tor responds uniquely to HTTP-like requests
s.send(b"GET\n")
if b"Tor is not an HTTP Proxy" in s.recv(1024):
detected = p
break
except socket.error:
continue
return "%s:%s:%s::" % detected if detected else None
# methods used in scripts
async def get_peers(self):
while not self.is_connected():
await asyncio.sleep(1)
session = self.interface.session
return parse_servers(await session.send_request('server.peers.subscribe'))
async def send_multiple_requests(self, servers: List[str], method: str, params: Sequence):
responses = dict()
async def get_response(server):
interface = Interface(self, server, self.proxy)
timeout = self.get_network_timeout_seconds(NetworkTimeout.Urgent)
try:
await asyncio.wait_for(interface.ready, timeout)
except BaseException as e:
await interface.close()
return
try:
res = await interface.session.send_request(method, params, timeout=10)
except Exception as e:
res = e
responses[interface.server] = res
async with TaskGroup() as group:
for server in servers:
await group.spawn(get_response(server))
return responses
| 42.514306
| 124
| 0.618659
|
4807ff9ea9ae20d63575451c718006e17884d96c
| 12,488
|
py
|
Python
|
src/parascopy/inner/genome.py
|
emikoifish/parascopy
|
4b43f0e16cfa135c953dacd06b08cd25b93ca578
|
[
"MIT"
] | null | null | null |
src/parascopy/inner/genome.py
|
emikoifish/parascopy
|
4b43f0e16cfa135c953dacd06b08cd25b93ca578
|
[
"MIT"
] | null | null | null |
src/parascopy/inner/genome.py
|
emikoifish/parascopy
|
4b43f0e16cfa135c953dacd06b08cd25b93ca578
|
[
"MIT"
] | null | null | null |
import pysam
import re
import sys
from . import common
class OnlyChromNames:
def __init__(self, names):
self._names = names
def chrom_name(self, chrom_id):
return self._names[chrom_id]
class ChromNames:
def __init__(self, names, lengths):
self._names = names
self._ids = { name: i for i, name in enumerate(names) }
self._lengths = lengths
def table_header(self):
res = '# Chromosomes: '
res += ','.join(map('%s:%d'.__mod__, zip(self._names, self._lengths)))
return res + '\n'
@classmethod
def from_table(cls, table):
prefix = '# Chromosomes: '
for line in table.header:
if line.startswith(prefix):
line = line[len(prefix):].strip()
names = []
lengths = []
for entry in line.split(','):
name, length = entry.split(':')
names.append(name)
lengths.append(int(length))
return cls(names, lengths)
raise ValueError('Cannot find line with prefix "{}" in the header'.format(prefix))
def matches_header(self, header):
chromosomes = header.split(':', 1)[1].strip().split(',')
if len(chromosomes) != len(self._names):
return False
for i, entry in enumerate(chromosomes):
name, length = entry.split(':')
if name != self._names[i] or int(length) != self._lengths[i]:
return False
return True
def chrom_id(self, chrom_name):
return self._ids[chrom_name]
def has_chrom(self, chrom_name):
return chrom_name in self._ids
def chrom_name(self, chrom_id):
return self._names[chrom_id]
def chrom_len(self, chrom_id):
return self._lengths[chrom_id]
@property
def has_lengths(self):
return bool(self._lengths)
@property
def chrom_names(self):
return self._names
@property
def chrom_lengths(self):
return self._lengths
@property
def n_chromosomes(self):
return len(self._names)
@classmethod
def from_pysam(cls, obj):
"""
From pysam object with fields `references` and `lengths` (for example `FastaFile` or `AlignmentFile`).
"""
return cls(obj.references, obj.lengths)
def generate_bam_header(self):
return '\n'.join(map('@SQ\tSN:%s\tLN:%d'.__mod__, zip(self._names, self._lengths)))
def chrom_interval(self, chrom_id):
return Interval(chrom_id, 0, self._lengths[chrom_id])
class Genome(ChromNames):
def __init__(self, filename):
self._filename = filename
self._fasta_file = pysam.FastaFile(filename)
super().__init__(self._fasta_file.references, self._fasta_file.lengths)
@property
def filename(self):
return self._filename
def fetch_interval(self, interval):
return self._fasta_file.fetch(self.chrom_name(interval.chrom_id), interval.start, interval.end).upper()
def close(self):
self._fasta_file.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def only_chrom_names(self):
return OnlyChromNames(self._fasta_file.references)
class Interval:
_interval_pattern = re.compile(r'^([^:]+):([0-9,]+)-([0-9,]+)$')
def __init__(self, chrom_id, start, end):
"""
Construct genomic interval.
Parameters:
- chrom_id: int, 0-based chromosome number,
- start: int, 0-based start (inclusive),
- end: int, 0-based end (exclusive).
"""
self._chrom_id = chrom_id
self._start = start
self._end = end
if self._end <= self._start:
raise ValueError('Cannot construct an empty interval: start0 = {:,}, end = {:,}'
.format(self._start, self._end))
@classmethod
def parse(cls, string, genome):
"""
Parse interval from string name:start-end, where start-end is 1-based closed interval.
`genome` should be an object with method `chrom_id(chrom_name)`. This can be `genome.Genome`.
"""
m = re.match(Interval._interval_pattern, string)
if m is None:
raise ValueError('Cannot parse "{}"'.format(string))
chrom_id = genome.chrom_id(m.group(1))
start = int(m.group(2).replace(',', '')) - 1
end = int(m.group(3).replace(',', ''))
return cls(chrom_id, start, end)
@classmethod
def parse_with_strand(cls, string, genome):
interval, strand = string.rsplit(':', 1)
assert strand == '+' or strand == '-'
return cls.parse(interval, genome), strand == '+'
@property
def chrom_id(self):
return self._chrom_id
def chrom_name(self, genome):
return genome.chrom_name(self._chrom_id)
@property
def start(self):
return self._start
@property
def start_1(self):
"""
Returns 1-based start.
"""
return self._start + 1
@property
def end(self):
return self._end
def to_str(self, genome):
"""
Returns string "chr:start-end", where start-end is 1-based inclusive interval.
"""
return '{}:{}-{}'.format(genome.chrom_name(self._chrom_id), self.start_1, self._end)
def to_str_comma(self, genome):
"""
Returns string "chr:start-end", where start-end is 1-based inclusive interval.
"""
return '{}:{:,}-{:,}'.format(genome.chrom_name(self._chrom_id), self.start_1, self._end)
def to_str_path(self, genome):
return '{}_{}-{}'.format(genome.chrom_name(self._chrom_id), self.start_1, self._end)
def to_str0(self, genome):
return '{}:{}..{}'.format(genome.chrom_name(self._chrom_id), self._start, self._end)
def to_bed(self, genome):
"""
Returns string "chr\tstart\tend", where start-end is 0-based semi-exclusive interval.
"""
return '{}\t{}\t{}'.format(genome.chrom_name(self._chrom_id), self._start, self._end)
def get_sequence(self, genome, strand=True):
"""
Returns genomic sequence.
`strand` can be True (forward strand) or False (reverse strand).
"""
seq = genome.fetch_interval(self)
if strand:
return seq
else:
return common.rev_comp(seq)
def __len__(self):
return self._end - self._start
def intersects(self, other):
return self._chrom_id == other._chrom_id and self._start < other._end and self._end > other._start
def intersection(self, other):
assert self.intersects(other)
return Interval(self._chrom_id, max(self._start, other._start), min(self._end, other._end))
def intersection_size(self, other):
if self._chrom_id != other._chrom_id:
return 0
return max(0, min(self._end, other._end) - max(self._start, other._start))
def forward_order(self, other, strict_order):
if self._chrom_id != other._chrom_id:
return False
if strict_order:
return self._end <= other._start
return self._start <= other._end
def with_max_start(self, max_start):
"""
Returns new Interval with start = max(self.start, max_start).
"""
if self._start >= max_start:
return self
return Interval(self._chrom_id, max_start, self._end)
def with_min_end(self, min_end):
"""
Returns new Interval with end = min(self.end, min_end).
"""
if self._end <= min_end:
return self
return Interval(self._chrom_id, self._start, min_end)
def out_of_bounds(self, genome):
return self._end > genome.chrom_len(self._chrom_id)
def trim(self, genome):
"""
Trim end if it is bigger than the chromosome length.
"""
chrom_len = genome.chrom_len(self._chrom_id)
if self._start >= chrom_len:
raise ValueError('Interval {} is out of bounds: chromosome length is {}'
.format(self.to_str(genome), chrom_len))
self._end = min(chrom_len, self._end)
def start_distance(self, other):
"""
Returns distance between starts. Returns -1 if intervals lie on different chromosomes.
"""
if self._chrom_id != other._chrom_id:
return -1
return abs(self._start - other._start)
def distance(self, other):
"""
Returns distance between closest points of two duplications.
Returns sys.maxsize if intervals lie on different chromosomes.
"""
if self._chrom_id != other._chrom_id:
return sys.maxsize
return max(0, self._start - other._end + 1, other._start - self._end + 1)
def combine(self, other):
"""
Returns combined interval (min of starts, max of ends). Should have the same chromosomes.
"""
assert self._chrom_id == other._chrom_id
return Interval(self._chrom_id, min(self._start, other._start), max(self._end, other._end))
def add_padding(self, padding):
return Interval(self._chrom_id, max(self._start - padding, 0), self._end + padding)
def to_tuple(self):
return (self._chrom_id, self._start, self._end)
def __lt__(self, other):
return self.to_tuple() < other.to_tuple()
def __eq__(self, other):
return self._chrom_id == other._chrom_id and self._start == other._start and self._end == other._end
def __hash__(self):
return hash(self.to_tuple())
def contains(self, other):
return self._chrom_id == other._chrom_id and self._start <= other._start and self._end >= other._end
def contains_point(self, chrom_id, pos):
return self._chrom_id == chrom_id and self._start <= pos < self._end
def __repr__(self):
return 'Region(chrom_id={}, start={:,}, end={:,})'.format(self._chrom_id, self._start, self._end)
@staticmethod
def combine_overlapping(intervals, max_dist=0):
res = []
for interval in sorted(intervals):
if not res or res[-1].distance(interval) > max_dist:
res.append(interval)
else:
res[-1] = res[-1].combine(interval)
return res
class NamedInterval(Interval):
def __init__(self, chrom_id, start, end, genome, name=None):
super().__init__(chrom_id, start, end)
self._name_provided = name is not None
if name is None:
self._name = self.to_str_comma(genome)
self._os_name = self.to_str(genome)
else:
self._name = name
self._os_name = re.sub(r'[^0-9a-zA-Z_:-]', '_', name)
@classmethod
def from_region(cls, region, name):
assert name is not None
return cls(region.chrom_id, region.start, region.end, None, name)
@property
def name_provided(self):
return self._name_provided
@property
def name(self):
return self._name
@property
def os_name(self):
"""
Returns name, which can be used as file names.
"""
return self._os_name
def full_name(self, genome):
if self._name_provided:
return '{} ({})'.format(super().to_str_comma(genome), self._name)
return self._name
@classmethod
def parse(cls, string, genome):
interval = Interval.parse(string, genome)
return cls(interval.chrom_id, interval.start, interval.end, genome)
_nucleotides = 'ACGT'
_nucleotide_index = { nt: i for i, nt in enumerate(_nucleotides) }
def kmers(seq, k):
"""
Returns iterator over pairs `(index: int, kmer: int)`.
`kmer` can be transformed into sequence using `kmer_sequence(...)`.
"""
k1 = k - 1
cut_old = (1 << 2 * k1) - 1
kmer = 0
til_yield = k1
for i, nt in enumerate(seq):
nt_index = _nucleotide_index.get(nt)
if nt_index is None:
kmer = 0
til_yield = k1
continue
kmer = ((kmer & cut_old) << 2) + nt_index
if til_yield:
til_yield -= 1
else:
yield (i - k1, kmer)
def kmer_sequence(int_kmer, kmer_len):
res = ''
for i in range(kmer_len):
res += _nucleotides[value % 4]
int_kmer = int_kmer >> 2
return ''.join(reversed(res))
| 31.142145
| 111
| 0.599455
|
09d59e8e9a1a7905ab9e6d73c6978056c16497ad
| 83,980
|
py
|
Python
|
lib/werkzeug/datastructures.py
|
Slashbunny/maraschino
|
941a0f82a352e9c178e701d5156711b613f7f6db
|
[
"MIT"
] | 137
|
2015-01-12T19:29:04.000Z
|
2022-02-25T04:51:02.000Z
|
lib/werkzeug/datastructures.py
|
Slashbunny/maraschino
|
941a0f82a352e9c178e701d5156711b613f7f6db
|
[
"MIT"
] | 24
|
2015-01-06T08:36:13.000Z
|
2019-04-08T13:59:05.000Z
|
lib/werkzeug/datastructures.py
|
Slashbunny/maraschino
|
941a0f82a352e9c178e701d5156711b613f7f6db
|
[
"MIT"
] | 57
|
2015-01-01T00:42:44.000Z
|
2022-03-10T20:54:41.000Z
|
# -*- coding: utf-8 -*-
"""
werkzeug.datastructures
~~~~~~~~~~~~~~~~~~~~~~~
This module provides mixins and classes with an immutable interface.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import codecs
import mimetypes
from itertools import repeat
from werkzeug._internal import _proxy_repr, _missing, _empty_stream
_locale_delim_re = re.compile(r'[_-]')
def is_immutable(self):
raise TypeError('%r objects are immutable' % self.__class__.__name__)
def iter_multi_items(mapping):
"""Iterates over the items of a mapping yielding keys and values
without dropping any from more complex structures.
"""
if isinstance(mapping, MultiDict):
for item in mapping.iteritems(multi=True):
yield item
elif isinstance(mapping, dict):
for key, value in mapping.iteritems():
if isinstance(value, (tuple, list)):
for value in value:
yield key, value
else:
yield key, value
else:
for item in mapping:
yield item
class ImmutableListMixin(object):
"""Makes a :class:`list` immutable.
.. versionadded:: 0.5
:private:
"""
_hash_cache = None
def __hash__(self):
if self._hash_cache is not None:
return self._hash_cache
rv = self._hash_cache = hash(tuple(self))
return rv
def __reduce_ex__(self, protocol):
return type(self), (list(self),)
def __delitem__(self, key):
is_immutable(self)
def __delslice__(self, i, j):
is_immutable(self)
def __iadd__(self, other):
is_immutable(self)
__imul__ = __iadd__
def __setitem__(self, key, value):
is_immutable(self)
def __setslice__(self, i, j, value):
is_immutable(self)
def append(self, item):
is_immutable(self)
remove = append
def extend(self, iterable):
is_immutable(self)
def insert(self, pos, value):
is_immutable(self)
def pop(self, index=-1):
is_immutable(self)
def reverse(self):
is_immutable(self)
def sort(self, cmp=None, key=None, reverse=None):
is_immutable(self)
class ImmutableList(ImmutableListMixin, list):
"""An immutable :class:`list`.
.. versionadded:: 0.5
:private:
"""
__repr__ = _proxy_repr(list)
class ImmutableDictMixin(object):
"""Makes a :class:`dict` immutable.
.. versionadded:: 0.5
:private:
"""
_hash_cache = None
@classmethod
def fromkeys(cls, keys, value=None):
instance = super(cls, cls).__new__(cls)
instance.__init__(zip(keys, repeat(value)))
return instance
def __reduce_ex__(self, protocol):
return type(self), (dict(self),)
def _iter_hashitems(self):
return self.iteritems()
def __hash__(self):
if self._hash_cache is not None:
return self._hash_cache
rv = self._hash_cache = hash(frozenset(self._iter_hashitems()))
return rv
def setdefault(self, key, default=None):
is_immutable(self)
def update(self, *args, **kwargs):
is_immutable(self)
def pop(self, key, default=None):
is_immutable(self)
def popitem(self):
is_immutable(self)
def __setitem__(self, key, value):
is_immutable(self)
def __delitem__(self, key):
is_immutable(self)
def clear(self):
is_immutable(self)
class ImmutableMultiDictMixin(ImmutableDictMixin):
"""Makes a :class:`MultiDict` immutable.
.. versionadded:: 0.5
:private:
"""
def __reduce_ex__(self, protocol):
return type(self), (self.items(multi=True),)
def _iter_hashitems(self):
return self.iteritems(multi=True)
def add(self, key, value):
is_immutable(self)
def popitemlist(self):
is_immutable(self)
def poplist(self, key):
is_immutable(self)
def setlist(self, key, new_list):
is_immutable(self)
def setlistdefault(self, key, default_list=None):
is_immutable(self)
class UpdateDictMixin(object):
"""Makes dicts call `self.on_update` on modifications.
.. versionadded:: 0.5
:private:
"""
on_update = None
def calls_update(name):
def oncall(self, *args, **kw):
rv = getattr(super(UpdateDictMixin, self), name)(*args, **kw)
if self.on_update is not None:
self.on_update(self)
return rv
oncall.__name__ = name
return oncall
__setitem__ = calls_update('__setitem__')
__delitem__ = calls_update('__delitem__')
clear = calls_update('clear')
pop = calls_update('pop')
popitem = calls_update('popitem')
setdefault = calls_update('setdefault')
update = calls_update('update')
del calls_update
class TypeConversionDict(dict):
"""Works like a regular dict but the :meth:`get` method can perform
type conversions. :class:`MultiDict` and :class:`CombinedMultiDict`
are subclasses of this class and provide the same feature.
.. versionadded:: 0.5
"""
def get(self, key, default=None, type=None):
"""Return the default value if the requested data doesn't exist.
If `type` is provided and is a callable it should convert the value,
return it or raise a :exc:`ValueError` if that is not possible. In
this case the function will return the default as if the value was not
found:
>>> d = TypeConversionDict(foo='42', bar='blub')
>>> d.get('foo', type=int)
42
>>> d.get('bar', -1, type=int)
-1
:param key: The key to be looked up.
:param default: The default value to be returned if the key can't
be looked up. If not further specified `None` is
returned.
:param type: A callable that is used to cast the value in the
:class:`MultiDict`. If a :exc:`ValueError` is raised
by this callable the default value is returned.
"""
try:
rv = self[key]
if type is not None:
rv = type(rv)
except (KeyError, ValueError):
rv = default
return rv
class ImmutableTypeConversionDict(ImmutableDictMixin, TypeConversionDict):
"""Works like a :class:`TypeConversionDict` but does not support
modifications.
.. versionadded:: 0.5
"""
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return TypeConversionDict(self)
def __copy__(self):
return self
class MultiDict(TypeConversionDict):
"""A :class:`MultiDict` is a dictionary subclass customized to deal with
multiple values for the same key which is for example used by the parsing
functions in the wrappers. This is necessary because some HTML form
elements pass multiple values for the same key.
:class:`MultiDict` implements all standard dictionary methods.
Internally, it saves all values for a key as a list, but the standard dict
access methods will only return the first value for a key. If you want to
gain access to the other values, too, you have to use the `list` methods as
explained below.
Basic Usage:
>>> d = MultiDict([('a', 'b'), ('a', 'c')])
>>> d
MultiDict([('a', 'b'), ('a', 'c')])
>>> d['a']
'b'
>>> d.getlist('a')
['b', 'c']
>>> 'a' in d
True
It behaves like a normal dict thus all dict functions will only return the
first value when multiple values for one key are found.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
exceptions.
A :class:`MultiDict` can be constructed from an iterable of
``(key, value)`` tuples, a dict, a :class:`MultiDict` or from Werkzeug 0.2
onwards some keyword parameters.
:param mapping: the initial value for the :class:`MultiDict`. Either a
regular dict, an iterable of ``(key, value)`` tuples
or `None`.
"""
def __init__(self, mapping=None):
if isinstance(mapping, MultiDict):
dict.__init__(self, ((k, l[:]) for k, l in mapping.iterlists()))
elif isinstance(mapping, dict):
tmp = {}
for key, value in mapping.iteritems():
if isinstance(value, (tuple, list)):
value = list(value)
else:
value = [value]
tmp[key] = value
dict.__init__(self, tmp)
else:
tmp = {}
for key, value in mapping or ():
tmp.setdefault(key, []).append(value)
dict.__init__(self, tmp)
def __getstate__(self):
return dict(self.lists())
def __setstate__(self, value):
dict.clear(self)
dict.update(self, value)
def __iter__(self):
return self.iterkeys()
def __getitem__(self, key):
"""Return the first data value for this key;
raises KeyError if not found.
:param key: The key to be looked up.
:raise KeyError: if the key does not exist.
"""
if key in self:
return dict.__getitem__(self, key)[0]
raise BadRequestKeyError(key)
def __setitem__(self, key, value):
"""Like :meth:`add` but removes an existing key first.
:param key: the key for the value.
:param value: the value to set.
"""
dict.__setitem__(self, key, [value])
def add(self, key, value):
"""Adds a new value for the key.
.. versionadded:: 0.6
:param key: the key for the value.
:param value: the value to add.
"""
dict.setdefault(self, key, []).append(value)
def getlist(self, key, type=None):
"""Return the list of items for a given key. If that key is not in the
`MultiDict`, the return value will be an empty list. Just as `get`
`getlist` accepts a `type` parameter. All items will be converted
with the callable defined there.
:param key: The key to be looked up.
:param type: A callable that is used to cast the value in the
:class:`MultiDict`. If a :exc:`ValueError` is raised
by this callable the value will be removed from the list.
:return: a :class:`list` of all the values for the key.
"""
try:
rv = dict.__getitem__(self, key)
except KeyError:
return []
if type is None:
return list(rv)
result = []
for item in rv:
try:
result.append(type(item))
except ValueError:
pass
return result
def setlist(self, key, new_list):
"""Remove the old values for a key and add new ones. Note that the list
you pass the values in will be shallow-copied before it is inserted in
the dictionary.
>>> d = MultiDict()
>>> d.setlist('foo', ['1', '2'])
>>> d['foo']
'1'
>>> d.getlist('foo')
['1', '2']
:param key: The key for which the values are set.
:param new_list: An iterable with the new values for the key. Old values
are removed first.
"""
dict.__setitem__(self, key, list(new_list))
def setdefault(self, key, default=None):
"""Returns the value for the key if it is in the dict, otherwise it
returns `default` and sets that value for `key`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key is not
in the dict. If not further specified it's `None`.
"""
if key not in self:
self[key] = default
else:
default = self[key]
return default
def setlistdefault(self, key, default_list=None):
"""Like `setdefault` but sets multiple values. The list returned
is not a copy, but the list that is actually used internally. This
means that you can put new values into the dict by appending items
to the list:
>>> d = MultiDict({"foo": 1})
>>> d.setlistdefault("foo").extend([2, 3])
>>> d.getlist("foo")
[1, 2, 3]
:param key: The key to be looked up.
:param default: An iterable of default values. It is either copied
(in case it was a list) or converted into a list
before returned.
:return: a :class:`list`
"""
if key not in self:
default_list = list(default_list or ())
dict.__setitem__(self, key, default_list)
else:
default_list = dict.__getitem__(self, key)
return default_list
def items(self, multi=False):
"""Return a list of ``(key, value)`` pairs.
:param multi: If set to `True` the list returned will have a
pair for each value of each key. Otherwise it
will only contain pairs for the first value of
each key.
:return: a :class:`list`
"""
return list(self.iteritems(multi))
def lists(self):
"""Return a list of ``(key, values)`` pairs, where values is the list of
all values associated with the key.
:return: a :class:`list`
"""
return list(self.iterlists())
def values(self):
"""Returns a list of the first value on every key's value list.
:return: a :class:`list`.
"""
return [self[key] for key in self.iterkeys()]
def listvalues(self):
"""Return a list of all values associated with a key. Zipping
:meth:`keys` and this is the same as calling :meth:`lists`:
>>> d = MultiDict({"foo": [1, 2, 3]})
>>> zip(d.keys(), d.listvalues()) == d.lists()
True
:return: a :class:`list`
"""
return list(self.iterlistvalues())
def iteritems(self, multi=False):
"""Like :meth:`items` but returns an iterator."""
for key, values in dict.iteritems(self):
if multi:
for value in values:
yield key, value
else:
yield key, values[0]
def iterlists(self):
"""Like :meth:`items` but returns an iterator."""
for key, values in dict.iteritems(self):
yield key, list(values)
def itervalues(self):
"""Like :meth:`values` but returns an iterator."""
for values in dict.itervalues(self):
yield values[0]
def iterlistvalues(self):
"""Like :meth:`listvalues` but returns an iterator."""
return dict.itervalues(self)
def copy(self):
"""Return a shallow copy of this object."""
return self.__class__(self)
def to_dict(self, flat=True):
"""Return the contents as regular dict. If `flat` is `True` the
returned dict will only have the first item present, if `flat` is
`False` all values will be returned as lists.
:param flat: If set to `False` the dict returned will have lists
with all the values in it. Otherwise it will only
contain the first value for each key.
:return: a :class:`dict`
"""
if flat:
return dict(self.iteritems())
return dict(self.lists())
def update(self, other_dict):
"""update() extends rather than replaces existing key lists."""
for key, value in iter_multi_items(other_dict):
MultiDict.add(self, key, value)
def pop(self, key, default=_missing):
"""Pop the first item for a list on the dict. Afterwards the
key is removed from the dict, so additional values are discarded:
>>> d = MultiDict({"foo": [1, 2, 3]})
>>> d.pop("foo")
1
>>> "foo" in d
False
:param key: the key to pop.
:param default: if provided the value to return if the key was
not in the dictionary.
"""
try:
return dict.pop(self, key)[0]
except KeyError, e:
if default is not _missing:
return default
raise BadRequestKeyError(str(e))
def popitem(self):
"""Pop an item from the dict."""
try:
item = dict.popitem(self)
return (item[0], item[1][0])
except KeyError, e:
raise BadRequestKeyError(str(e))
def poplist(self, key):
"""Pop the list for a key from the dict. If the key is not in the dict
an empty list is returned.
.. versionchanged:: 0.5
If the key does no longer exist a list is returned instead of
raising an error.
"""
return dict.pop(self, key, [])
def popitemlist(self):
"""Pop a ``(key, list)`` tuple from the dict."""
try:
return dict.popitem(self)
except KeyError, e:
raise BadRequestKeyError(str(e))
def __copy__(self):
return self.copy()
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.items(multi=True))
class _omd_bucket(object):
"""Wraps values in the :class:`OrderedMultiDict`. This makes it
possible to keep an order over multiple different keys. It requires
a lot of extra memory and slows down access a lot, but makes it
possible to access elements in O(1) and iterate in O(n).
"""
__slots__ = ('prev', 'key', 'value', 'next')
def __init__(self, omd, key, value):
self.prev = omd._last_bucket
self.key = key
self.value = value
self.next = None
if omd._first_bucket is None:
omd._first_bucket = self
if omd._last_bucket is not None:
omd._last_bucket.next = self
omd._last_bucket = self
def unlink(self, omd):
if self.prev:
self.prev.next = self.next
if self.next:
self.next.prev = self.prev
if omd._first_bucket is self:
omd._first_bucket = self.next
if omd._last_bucket is self:
omd._last_bucket = self.prev
class OrderedMultiDict(MultiDict):
"""Works like a regular :class:`MultiDict` but preserves the
order of the fields. To convert the ordered multi dict into a
list you can use the :meth:`items` method and pass it ``multi=True``.
In general an :class:`OrderedMultiDict` is an order of magnitude
slower than a :class:`MultiDict`.
.. admonition:: note
Due to a limitation in Python you cannot convert an ordered
multi dict into a regular dict by using ``dict(multidict)``.
Instead you have to use the :meth:`to_dict` method, otherwise
the internal bucket objects are exposed.
"""
def __init__(self, mapping=None):
dict.__init__(self)
self._first_bucket = self._last_bucket = None
if mapping is not None:
OrderedMultiDict.update(self, mapping)
def __eq__(self, other):
if not isinstance(other, MultiDict):
return NotImplemented
if isinstance(other, OrderedMultiDict):
iter1 = self.iteritems(multi=True)
iter2 = other.iteritems(multi=True)
try:
for k1, v1 in iter1:
k2, v2 = iter2.next()
if k1 != k2 or v1 != v2:
return False
except StopIteration:
return False
try:
iter2.next()
except StopIteration:
return True
return False
if len(self) != len(other):
return False
for key, values in self.iterlists():
if other.getlist(key) != values:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __reduce_ex__(self, protocol):
return type(self), (self.items(multi=True),)
def __getstate__(self):
return self.items(multi=True)
def __setstate__(self, values):
dict.clear(self)
for key, value in values:
self.add(key, value)
def __getitem__(self, key):
if key in self:
return dict.__getitem__(self, key)[0].value
raise BadRequestKeyError(key)
def __setitem__(self, key, value):
self.poplist(key)
self.add(key, value)
def __delitem__(self, key):
self.pop(key)
def iterkeys(self):
return (key for key, value in self.iteritems())
def itervalues(self):
return (value for key, value in self.iteritems())
def iteritems(self, multi=False):
ptr = self._first_bucket
if multi:
while ptr is not None:
yield ptr.key, ptr.value
ptr = ptr.next
else:
returned_keys = set()
while ptr is not None:
if ptr.key not in returned_keys:
returned_keys.add(ptr.key)
yield ptr.key, ptr.value
ptr = ptr.next
def iterlists(self):
returned_keys = set()
ptr = self._first_bucket
while ptr is not None:
if ptr.key not in returned_keys:
yield ptr.key, self.getlist(ptr.key)
returned_keys.add(ptr.key)
ptr = ptr.next
def iterlistvalues(self):
for key, values in self.iterlists():
yield values
def add(self, key, value):
dict.setdefault(self, key, []).append(_omd_bucket(self, key, value))
def getlist(self, key, type=None):
try:
rv = dict.__getitem__(self, key)
except KeyError:
return []
if type is None:
return [x.value for x in rv]
result = []
for item in rv:
try:
result.append(type(item.value))
except ValueError:
pass
return result
def setlist(self, key, new_list):
self.poplist(key)
for value in new_list:
self.add(key, value)
def setlistdefault(self, key, default_list=None):
raise TypeError('setlistdefault is unsupported for '
'ordered multi dicts')
def update(self, mapping):
for key, value in iter_multi_items(mapping):
OrderedMultiDict.add(self, key, value)
def poplist(self, key):
buckets = dict.pop(self, key, ())
for bucket in buckets:
bucket.unlink(self)
return [x.value for x in buckets]
def pop(self, key, default=_missing):
try:
buckets = dict.pop(self, key)
except KeyError, e:
if default is not _missing:
return default
raise BadRequestKeyError(str(e))
for bucket in buckets:
bucket.unlink(self)
return buckets[0].value
def popitem(self):
try:
key, buckets = dict.popitem(self)
except KeyError, e:
raise BadRequestKeyError(str(e))
for bucket in buckets:
bucket.unlink(self)
return key, buckets[0].value
def popitemlist(self):
try:
key, buckets = dict.popitem(self)
except KeyError, e:
raise BadRequestKeyError(str(e))
for bucket in buckets:
bucket.unlink(self)
return key, [x.value for x in buckets]
def _options_header_vkw(value, kw):
return dump_options_header(value, dict((k.replace('_', '-'), v)
for k, v in kw.items()))
class Headers(object):
"""An object that stores some headers. It has a dict-like interface
but is ordered and can store the same keys multiple times.
This data structure is useful if you want a nicer way to handle WSGI
headers which are stored as tuples in a list.
From Werkzeug 0.3 onwards, the :exc:`KeyError` raised by this class is
also a subclass of the :class:`~exceptions.BadRequest` HTTP exception
and will render a page for a ``400 BAD REQUEST`` if caught in a
catch-all for HTTP exceptions.
Headers is mostly compatible with the Python :class:`wsgiref.headers.Headers`
class, with the exception of `__getitem__`. :mod:`wsgiref` will return
`None` for ``headers['missing']``, whereas :class:`Headers` will raise
a :class:`KeyError`.
To create a new :class:`Headers` object pass it a list or dict of headers
which are used as default values. This does not reuse the list passed
to the constructor for internal usage. To create a :class:`Headers`
object that uses as internal storage the list or list-like object you
can use the :meth:`linked` class method.
:param defaults: The list of default values for the :class:`Headers`.
"""
def __init__(self, defaults=None, _list=None):
if _list is None:
_list = []
self._list = _list
if defaults is not None:
if isinstance(defaults, (list, Headers)):
self._list.extend(defaults)
else:
self.extend(defaults)
@classmethod
def linked(cls, headerlist):
"""Create a new :class:`Headers` object that uses the list of headers
passed as internal storage:
>>> headerlist = [('Content-Length', '40')]
>>> headers = Headers.linked(headerlist)
>>> headers['Content-Type'] = 'text/html'
>>> headerlist
[('Content-Length', '40'), ('Content-Type', 'text/html')]
:param headerlist: The list of headers the class is linked to.
:return: new linked :class:`Headers` object.
"""
return cls(_list=headerlist)
def __getitem__(self, key, _get_mode=False):
if not _get_mode:
if isinstance(key, (int, long)):
return self._list[key]
elif isinstance(key, slice):
return self.__class__(self._list[key])
ikey = key.lower()
for k, v in self._list:
if k.lower() == ikey:
return v
# micro optimization: if we are in get mode we will catch that
# exception one stack level down so we can raise a standard
# key error instead of our special one.
if _get_mode:
raise KeyError()
raise BadRequestKeyError(key)
def __eq__(self, other):
return other.__class__ is self.__class__ and \
set(other._list) == set(self._list)
def __ne__(self, other):
return not self.__eq__(other)
def get(self, key, default=None, type=None):
"""Return the default value if the requested data doesn't exist.
If `type` is provided and is a callable it should convert the value,
return it or raise a :exc:`ValueError` if that is not possible. In
this case the function will return the default as if the value was not
found:
>>> d = Headers([('Content-Length', '42')])
>>> d.get('Content-Length', type=int)
42
If a headers object is bound you must not add unicode strings
because no encoding takes place.
:param key: The key to be looked up.
:param default: The default value to be returned if the key can't
be looked up. If not further specified `None` is
returned.
:param type: A callable that is used to cast the value in the
:class:`Headers`. If a :exc:`ValueError` is raised
by this callable the default value is returned.
"""
try:
rv = self.__getitem__(key, _get_mode=True)
except KeyError:
return default
if type is None:
return rv
try:
return type(rv)
except ValueError:
return default
def getlist(self, key, type=None):
"""Return the list of items for a given key. If that key is not in the
:class:`Headers`, the return value will be an empty list. Just as
:meth:`get` :meth:`getlist` accepts a `type` parameter. All items will
be converted with the callable defined there.
:param key: The key to be looked up.
:param type: A callable that is used to cast the value in the
:class:`Headers`. If a :exc:`ValueError` is raised
by this callable the value will be removed from the list.
:return: a :class:`list` of all the values for the key.
"""
ikey = key.lower()
result = []
for k, v in self:
if k.lower() == ikey:
if type is not None:
try:
v = type(v)
except ValueError:
continue
result.append(v)
return result
def get_all(self, name):
"""Return a list of all the values for the named field.
This method is compatible with the :mod:`wsgiref`
:meth:`~wsgiref.headers.Headers.get_all` method.
"""
return self.getlist(name)
def iteritems(self, lower=False):
for key, value in self:
if lower:
key = key.lower()
yield key, value
def iterkeys(self, lower=False):
for key, _ in self.iteritems(lower):
yield key
def itervalues(self):
for _, value in self.iteritems():
yield value
def keys(self, lower=False):
return list(self.iterkeys(lower))
def values(self):
return list(self.itervalues())
def items(self, lower=False):
return list(self.iteritems(lower))
def extend(self, iterable):
"""Extend the headers with a dict or an iterable yielding keys and
values.
"""
if isinstance(iterable, dict):
for key, value in iterable.iteritems():
if isinstance(value, (tuple, list)):
for v in value:
self.add(key, v)
else:
self.add(key, value)
else:
for key, value in iterable:
self.add(key, value)
def __delitem__(self, key, _index_operation=True):
if _index_operation and isinstance(key, (int, long, slice)):
del self._list[key]
return
key = key.lower()
new = []
for k, v in self._list:
if k.lower() != key:
new.append((k, v))
self._list[:] = new
def remove(self, key):
"""Remove a key.
:param key: The key to be removed.
"""
return self.__delitem__(key, _index_operation=False)
def pop(self, key=None, default=_missing):
"""Removes and returns a key or index.
:param key: The key to be popped. If this is an integer the item at
that position is removed, if it's a string the value for
that key is. If the key is omitted or `None` the last
item is removed.
:return: an item.
"""
if key is None:
return self._list.pop()
if isinstance(key, (int, long)):
return self._list.pop(key)
try:
rv = self[key]
self.remove(key)
except KeyError:
if default is not _missing:
return default
raise
return rv
def popitem(self):
"""Removes a key or index and returns a (key, value) item."""
return self.pop()
def __contains__(self, key):
"""Check if a key is present."""
try:
self.__getitem__(key, _get_mode=True)
except KeyError:
return False
return True
has_key = __contains__
def __iter__(self):
"""Yield ``(key, value)`` tuples."""
return iter(self._list)
def __len__(self):
return len(self._list)
def add(self, _key, _value, **kw):
"""Add a new header tuple to the list.
Keyword arguments can specify additional parameters for the header
value, with underscores converted to dashes::
>>> d = Headers()
>>> d.add('Content-Type', 'text/plain')
>>> d.add('Content-Disposition', 'attachment', filename='foo.png')
The keyword argument dumping uses :func:`dump_options_header`
behind the scenes.
.. versionadded:: 0.4.1
keyword arguments were added for :mod:`wsgiref` compatibility.
"""
if kw:
_value = _options_header_vkw(_value, kw)
self._validate_value(_value)
self._list.append((_key, _value))
def _validate_value(self, value):
if isinstance(value, basestring) and ('\n' in value or '\r' in value):
raise ValueError('Detected newline in header value. This is '
'a potential security problem')
def add_header(self, _key, _value, **_kw):
"""Add a new header tuple to the list.
An alias for :meth:`add` for compatibility with the :mod:`wsgiref`
:meth:`~wsgiref.headers.Headers.add_header` method.
"""
self.add(_key, _value, **_kw)
def clear(self):
"""Clears all headers."""
del self._list[:]
def set(self, _key, _value, **kw):
"""Remove all header tuples for `key` and add a new one. The newly
added key either appears at the end of the list if there was no
entry or replaces the first one.
Keyword arguments can specify additional parameters for the header
value, with underscores converted to dashes. See :meth:`add` for
more information.
.. versionchanged:: 0.6.1
:meth:`set` now accepts the same arguments as :meth:`add`.
:param key: The key to be inserted.
:param value: The value to be inserted.
"""
if kw:
_value = _options_header_vkw(_value, kw)
self._validate_value(_value)
if not self._list:
self._list.append((_key, _value))
return
listiter = iter(self._list)
ikey = _key.lower()
for idx, (old_key, old_value) in enumerate(listiter):
if old_key.lower() == ikey:
# replace first ocurrence
self._list[idx] = (_key, _value)
break
else:
self._list.append((_key, _value))
return
self._list[idx + 1:] = [t for t in listiter if t[0].lower() != ikey]
def setdefault(self, key, value):
"""Returns the value for the key if it is in the dict, otherwise it
returns `default` and sets that value for `key`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key is not
in the dict. If not further specified it's `None`.
"""
if key in self:
return self[key]
self.set(key, value)
return value
def __setitem__(self, key, value):
"""Like :meth:`set` but also supports index/slice based setting."""
if isinstance(key, (slice, int, long)):
self._validate_value(value)
self._list[key] = value
else:
self.set(key, value)
def to_list(self, charset='iso-8859-1'):
"""Convert the headers into a list and converts the unicode header
items to the specified charset.
:return: list
"""
return [(k, isinstance(v, unicode) and v.encode(charset) or str(v))
for k, v in self]
def copy(self):
return self.__class__(self._list)
def __copy__(self):
return self.copy()
def __str__(self, charset='iso-8859-1'):
"""Returns formatted headers suitable for HTTP transmission."""
strs = []
for key, value in self.to_list(charset):
strs.append('%s: %s' % (key, value))
strs.append('\r\n')
return '\r\n'.join(strs)
def __repr__(self):
return '%s(%r)' % (
self.__class__.__name__,
list(self)
)
class ImmutableHeadersMixin(object):
"""Makes a :class:`Headers` immutable. We do not mark them as
hashable though since the only usecase for this datastructure
in Werkzeug is a view on a mutable structure.
.. versionadded:: 0.5
:private:
"""
def __delitem__(self, key):
is_immutable(self)
def __setitem__(self, key, value):
is_immutable(self)
set = __setitem__
def add(self, item):
is_immutable(self)
remove = add_header = add
def extend(self, iterable):
is_immutable(self)
def insert(self, pos, value):
is_immutable(self)
def pop(self, index=-1):
is_immutable(self)
def popitem(self):
is_immutable(self)
def setdefault(self, key, default):
is_immutable(self)
class EnvironHeaders(ImmutableHeadersMixin, Headers):
"""Read only version of the headers from a WSGI environment. This
provides the same interface as `Headers` and is constructed from
a WSGI environment.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for
HTTP exceptions.
"""
def __init__(self, environ):
self.environ = environ
@classmethod
def linked(cls, environ):
raise TypeError('%r object is always linked to environment, '
'no separate initializer' % cls.__name__)
def __eq__(self, other):
return self.environ is other.environ
def __getitem__(self, key, _get_mode=False):
# _get_mode is a no-op for this class as there is no index but
# used because get() calls it.
key = key.upper().replace('-', '_')
if key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
return self.environ[key]
return self.environ['HTTP_' + key]
def __len__(self):
# the iter is necessary because otherwise list calls our
# len which would call list again and so forth.
return len(list(iter(self)))
def __iter__(self):
for key, value in self.environ.iteritems():
if key.startswith('HTTP_') and key not in \
('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
yield key[5:].replace('_', '-').title(), value
elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
yield key.replace('_', '-').title(), value
def copy(self):
raise TypeError('cannot create %r copies' % self.__class__.__name__)
class CombinedMultiDict(ImmutableMultiDictMixin, MultiDict):
"""A read only :class:`MultiDict` that you can pass multiple :class:`MultiDict`
instances as sequence and it will combine the return values of all wrapped
dicts:
>>> from werkzeug.datastructures import CombinedMultiDict, MultiDict
>>> post = MultiDict([('foo', 'bar')])
>>> get = MultiDict([('blub', 'blah')])
>>> combined = CombinedMultiDict([get, post])
>>> combined['foo']
'bar'
>>> combined['blub']
'blah'
This works for all read operations and will raise a `TypeError` for
methods that usually change data which isn't possible.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
exceptions.
"""
def __reduce_ex__(self, protocol):
return type(self), (self.dicts,)
def __init__(self, dicts=None):
self.dicts = dicts or []
@classmethod
def fromkeys(cls):
raise TypeError('cannot create %r instances by fromkeys' %
cls.__name__)
def __getitem__(self, key):
for d in self.dicts:
if key in d:
return d[key]
raise BadRequestKeyError(key)
def get(self, key, default=None, type=None):
for d in self.dicts:
if key in d:
if type is not None:
try:
return type(d[key])
except ValueError:
continue
return d[key]
return default
def getlist(self, key, type=None):
rv = []
for d in self.dicts:
rv.extend(d.getlist(key, type))
return rv
def keys(self):
rv = set()
for d in self.dicts:
rv.update(d.keys())
return list(rv)
def iteritems(self, multi=False):
found = set()
for d in self.dicts:
for key, value in d.iteritems(multi):
if multi:
yield key, value
elif key not in found:
found.add(key)
yield key, value
def itervalues(self):
for key, value in self.iteritems():
yield value
def values(self):
return list(self.itervalues())
def items(self, multi=False):
return list(self.iteritems(multi))
def iterlists(self):
rv = {}
for d in self.dicts:
for key, values in d.iterlists():
rv.setdefault(key, []).extend(values)
return rv.iteritems()
def lists(self):
return list(self.iterlists())
def iterlistvalues(self):
return (x[0] for x in self.lists())
def listvalues(self):
return list(self.iterlistvalues())
def iterkeys(self):
return iter(self.keys())
__iter__ = iterkeys
def copy(self):
"""Return a shallow copy of this object."""
return self.__class__(self.dicts[:])
def to_dict(self, flat=True):
"""Return the contents as regular dict. If `flat` is `True` the
returned dict will only have the first item present, if `flat` is
`False` all values will be returned as lists.
:param flat: If set to `False` the dict returned will have lists
with all the values in it. Otherwise it will only
contain the first item for each key.
:return: a :class:`dict`
"""
rv = {}
for d in reversed(self.dicts):
rv.update(d.to_dict(flat))
return rv
def __len__(self):
return len(self.keys())
def __contains__(self, key):
for d in self.dicts:
if key in d:
return True
return False
has_key = __contains__
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.dicts)
class FileMultiDict(MultiDict):
"""A special :class:`MultiDict` that has convenience methods to add
files to it. This is used for :class:`EnvironBuilder` and generally
useful for unittesting.
.. versionadded:: 0.5
"""
def add_file(self, name, file, filename=None, content_type=None):
"""Adds a new file to the dict. `file` can be a file name or
a :class:`file`-like or a :class:`FileStorage` object.
:param name: the name of the field.
:param file: a filename or :class:`file`-like object
:param filename: an optional filename
:param content_type: an optional content type
"""
if isinstance(file, FileStorage):
value = file
else:
if isinstance(file, basestring):
if filename is None:
filename = file
file = open(file, 'rb')
if filename and content_type is None:
content_type = mimetypes.guess_type(filename)[0] or \
'application/octet-stream'
value = FileStorage(file, filename, name, content_type)
self.add(name, value)
class ImmutableDict(ImmutableDictMixin, dict):
"""An immutable :class:`dict`.
.. versionadded:: 0.5
"""
__repr__ = _proxy_repr(dict)
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return dict(self)
def __copy__(self):
return self
class ImmutableMultiDict(ImmutableMultiDictMixin, MultiDict):
"""An immutable :class:`MultiDict`.
.. versionadded:: 0.5
"""
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return MultiDict(self)
def __copy__(self):
return self
class ImmutableOrderedMultiDict(ImmutableMultiDictMixin, OrderedMultiDict):
"""An immutable :class:`OrderedMultiDict`.
.. versionadded:: 0.6
"""
def _iter_hashitems(self):
return enumerate(self.iteritems(multi=True))
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return OrderedMultiDict(self)
def __copy__(self):
return self
class Accept(ImmutableList):
"""An :class:`Accept` object is just a list subclass for lists of
``(value, quality)`` tuples. It is automatically sorted by quality.
All :class:`Accept` objects work similar to a list but provide extra
functionality for working with the data. Containment checks are
normalized to the rules of that header:
>>> a = CharsetAccept([('ISO-8859-1', 1), ('utf-8', 0.7)])
>>> a.best
'ISO-8859-1'
>>> 'iso-8859-1' in a
True
>>> 'UTF8' in a
True
>>> 'utf7' in a
False
To get the quality for an item you can use normal item lookup:
>>> print a['utf-8']
0.7
>>> a['utf7']
0
.. versionchanged:: 0.5
:class:`Accept` objects are forced immutable now.
"""
def __init__(self, values=()):
if values is None:
list.__init__(self)
self.provided = False
elif isinstance(values, Accept):
self.provided = values.provided
list.__init__(self, values)
else:
self.provided = True
values = [(a, b) for b, a in values]
values.sort()
values.reverse()
list.__init__(self, [(a, b) for b, a in values])
def _value_matches(self, value, item):
"""Check if a value matches a given accept item."""
return item == '*' or item.lower() == value.lower()
def __getitem__(self, key):
"""Besides index lookup (getting item n) you can also pass it a string
to get the quality for the item. If the item is not in the list, the
returned quality is ``0``.
"""
if isinstance(key, basestring):
return self.quality(key)
return list.__getitem__(self, key)
def quality(self, key):
"""Returns the quality of the key.
.. versionadded:: 0.6
In previous versions you had to use the item-lookup syntax
(eg: ``obj[key]`` instead of ``obj.quality(key)``)
"""
for item, quality in self:
if self._value_matches(key, item):
return quality
return 0
def __contains__(self, value):
for item, quality in self:
if self._value_matches(value, item):
return True
return False
def __repr__(self):
return '%s([%s])' % (
self.__class__.__name__,
', '.join('(%r, %s)' % (x, y) for x, y in self)
)
def index(self, key):
"""Get the position of an entry or raise :exc:`ValueError`.
:param key: The key to be looked up.
.. versionchanged:: 0.5
This used to raise :exc:`IndexError`, which was inconsistent
with the list API.
"""
if isinstance(key, basestring):
for idx, (item, quality) in enumerate(self):
if self._value_matches(key, item):
return idx
raise ValueError(key)
return list.index(self, key)
def find(self, key):
"""Get the position of an entry or return -1.
:param key: The key to be looked up.
"""
try:
return self.index(key)
except ValueError:
return -1
def values(self):
"""Return a list of the values, not the qualities."""
return list(self.itervalues())
def itervalues(self):
"""Iterate over all values."""
for item in self:
yield item[0]
def to_header(self):
"""Convert the header set into an HTTP header string."""
result = []
for value, quality in self:
if quality != 1:
value = '%s;q=%s' % (value, quality)
result.append(value)
return ','.join(result)
def __str__(self):
return self.to_header()
def best_match(self, matches, default=None):
"""Returns the best match from a list of possible matches based
on the quality of the client. If two items have the same quality,
the one is returned that comes first.
:param matches: a list of matches to check for
:param default: the value that is returned if none match
"""
best_quality = -1
result = default
for server_item in matches:
for client_item, quality in self:
if quality <= best_quality:
break
if self._value_matches(server_item, client_item):
best_quality = quality
result = server_item
return result
@property
def best(self):
"""The best match as value."""
if self:
return self[0][0]
class MIMEAccept(Accept):
"""Like :class:`Accept` but with special methods and behavior for
mimetypes.
"""
def _value_matches(self, value, item):
def _normalize(x):
x = x.lower()
return x == '*' and ('*', '*') or x.split('/', 1)
# this is from the application which is trusted. to avoid developer
# frustration we actually check these for valid values
if '/' not in value:
raise ValueError('invalid mimetype %r' % value)
value_type, value_subtype = _normalize(value)
if value_type == '*' and value_subtype != '*':
raise ValueError('invalid mimetype %r' % value)
if '/' not in item:
return False
item_type, item_subtype = _normalize(item)
if item_type == '*' and item_subtype != '*':
return False
return (
(item_type == item_subtype == '*' or
value_type == value_subtype == '*') or
(item_type == value_type and (item_subtype == '*' or
value_subtype == '*' or
item_subtype == value_subtype))
)
@property
def accept_html(self):
"""True if this object accepts HTML."""
return (
'text/html' in self or
'application/xhtml+xml' in self or
self.accept_xhtml
)
@property
def accept_xhtml(self):
"""True if this object accepts XHTML."""
return (
'application/xhtml+xml' in self or
'application/xml' in self
)
@property
def accept_json(self):
"""True if this object accepts JSON."""
return 'application/json' in self
class LanguageAccept(Accept):
"""Like :class:`Accept` but with normalization for languages."""
def _value_matches(self, value, item):
def _normalize(language):
return _locale_delim_re.split(language.lower())
return item == '*' or _normalize(value) == _normalize(item)
class CharsetAccept(Accept):
"""Like :class:`Accept` but with normalization for charsets."""
def _value_matches(self, value, item):
def _normalize(name):
try:
return codecs.lookup(name).name
except LookupError:
return name.lower()
return item == '*' or _normalize(value) == _normalize(item)
def cache_property(key, empty, type):
"""Return a new property object for a cache header. Useful if you
want to add support for a cache extension in a subclass."""
return property(lambda x: x._get_cache_value(key, empty, type),
lambda x, v: x._set_cache_value(key, v, type),
lambda x: x._del_cache_value(key),
'accessor for %r' % key)
class _CacheControl(UpdateDictMixin, dict):
"""Subclass of a dict that stores values for a Cache-Control header. It
has accessors for all the cache-control directives specified in RFC 2616.
The class does not differentiate between request and response directives.
Because the cache-control directives in the HTTP header use dashes the
python descriptors use underscores for that.
To get a header of the :class:`CacheControl` object again you can convert
the object into a string or call the :meth:`to_header` method. If you plan
to subclass it and add your own items have a look at the sourcecode for
that class.
.. versionchanged:: 0.4
Setting `no_cache` or `private` to boolean `True` will set the implicit
none-value which is ``*``:
>>> cc = ResponseCacheControl()
>>> cc.no_cache = True
>>> cc
<ResponseCacheControl 'no-cache'>
>>> cc.no_cache
'*'
>>> cc.no_cache = None
>>> cc
<ResponseCacheControl ''>
In versions before 0.5 the behavior documented here affected the now
no longer existing `CacheControl` class.
"""
no_cache = cache_property('no-cache', '*', None)
no_store = cache_property('no-store', None, bool)
max_age = cache_property('max-age', -1, int)
no_transform = cache_property('no-transform', None, None)
def __init__(self, values=(), on_update=None):
dict.__init__(self, values or ())
self.on_update = on_update
self.provided = values is not None
def _get_cache_value(self, key, empty, type):
"""Used internally by the accessor properties."""
if type is bool:
return key in self
if key in self:
value = self[key]
if value is None:
return empty
elif type is not None:
try:
value = type(value)
except ValueError:
pass
return value
def _set_cache_value(self, key, value, type):
"""Used internally by the accessor properties."""
if type is bool:
if value:
self[key] = None
else:
self.pop(key, None)
else:
if value is None:
self.pop(key)
elif value is True:
self[key] = None
else:
self[key] = value
def _del_cache_value(self, key):
"""Used internally by the accessor properties."""
if key in self:
del self[key]
def to_header(self):
"""Convert the stored values into a cache control header."""
return dump_header(self)
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.to_header()
)
class RequestCacheControl(ImmutableDictMixin, _CacheControl):
"""A cache control for requests. This is immutable and gives access
to all the request-relevant cache control headers.
To get a header of the :class:`RequestCacheControl` object again you can
convert the object into a string or call the :meth:`to_header` method. If
you plan to subclass it and add your own items have a look at the sourcecode
for that class.
.. versionadded:: 0.5
In previous versions a `CacheControl` class existed that was used
both for request and response.
"""
max_stale = cache_property('max-stale', '*', int)
min_fresh = cache_property('min-fresh', '*', int)
no_transform = cache_property('no-transform', None, None)
only_if_cached = cache_property('only-if-cached', None, bool)
class ResponseCacheControl(_CacheControl):
"""A cache control for responses. Unlike :class:`RequestCacheControl`
this is mutable and gives access to response-relevant cache control
headers.
To get a header of the :class:`ResponseCacheControl` object again you can
convert the object into a string or call the :meth:`to_header` method. If
you plan to subclass it and add your own items have a look at the sourcecode
for that class.
.. versionadded:: 0.5
In previous versions a `CacheControl` class existed that was used
both for request and response.
"""
public = cache_property('public', None, bool)
private = cache_property('private', '*', None)
must_revalidate = cache_property('must-revalidate', None, bool)
proxy_revalidate = cache_property('proxy-revalidate', None, bool)
s_maxage = cache_property('s-maxage', None, None)
# attach cache_property to the _CacheControl as staticmethod
# so that others can reuse it.
_CacheControl.cache_property = staticmethod(cache_property)
class CallbackDict(UpdateDictMixin, dict):
"""A dict that calls a function passed every time something is changed.
The function is passed the dict instance.
"""
def __init__(self, initial=None, on_update=None):
dict.__init__(self, initial or ())
self.on_update = on_update
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
dict.__repr__(self)
)
class HeaderSet(object):
"""Similar to the :class:`ETags` class this implements a set-like structure.
Unlike :class:`ETags` this is case insensitive and used for vary, allow, and
content-language headers.
If not constructed using the :func:`parse_set_header` function the
instantiation works like this:
>>> hs = HeaderSet(['foo', 'bar', 'baz'])
>>> hs
HeaderSet(['foo', 'bar', 'baz'])
"""
def __init__(self, headers=None, on_update=None):
self._headers = list(headers or ())
self._set = set([x.lower() for x in self._headers])
self.on_update = on_update
def add(self, header):
"""Add a new header to the set."""
self.update((header,))
def remove(self, header):
"""Remove a header from the set. This raises an :exc:`KeyError` if the
header is not in the set.
.. versionchanged:: 0.5
In older versions a :exc:`IndexError` was raised instead of a
:exc:`KeyError` if the object was missing.
:param header: the header to be removed.
"""
key = header.lower()
if key not in self._set:
raise KeyError(header)
self._set.remove(key)
for idx, key in enumerate(self._headers):
if key.lower() == header:
del self._headers[idx]
break
if self.on_update is not None:
self.on_update(self)
def update(self, iterable):
"""Add all the headers from the iterable to the set.
:param iterable: updates the set with the items from the iterable.
"""
inserted_any = False
for header in iterable:
key = header.lower()
if key not in self._set:
self._headers.append(header)
self._set.add(key)
inserted_any = True
if inserted_any and self.on_update is not None:
self.on_update(self)
def discard(self, header):
"""Like :meth:`remove` but ignores errors.
:param header: the header to be discarded.
"""
try:
return self.remove(header)
except KeyError:
pass
def find(self, header):
"""Return the index of the header in the set or return -1 if not found.
:param header: the header to be looked up.
"""
header = header.lower()
for idx, item in enumerate(self._headers):
if item.lower() == header:
return idx
return -1
def index(self, header):
"""Return the index of the header in the set or raise an
:exc:`IndexError`.
:param header: the header to be looked up.
"""
rv = self.find(header)
if rv < 0:
raise IndexError(header)
return rv
def clear(self):
"""Clear the set."""
self._set.clear()
del self._headers[:]
if self.on_update is not None:
self.on_update(self)
def as_set(self, preserve_casing=False):
"""Return the set as real python set type. When calling this, all
the items are converted to lowercase and the ordering is lost.
:param preserve_casing: if set to `True` the items in the set returned
will have the original case like in the
:class:`HeaderSet`, otherwise they will
be lowercase.
"""
if preserve_casing:
return set(self._headers)
return set(self._set)
def to_header(self):
"""Convert the header set into an HTTP header string."""
return ', '.join(map(quote_header_value, self._headers))
def __getitem__(self, idx):
return self._headers[idx]
def __delitem__(self, idx):
rv = self._headers.pop(idx)
self._set.remove(rv.lower())
if self.on_update is not None:
self.on_update(self)
def __setitem__(self, idx, value):
old = self._headers[idx]
self._set.remove(old.lower())
self._headers[idx] = value
self._set.add(value.lower())
if self.on_update is not None:
self.on_update(self)
def __contains__(self, header):
return header.lower() in self._set
def __len__(self):
return len(self._set)
def __iter__(self):
return iter(self._headers)
def __nonzero__(self):
return bool(self._set)
def __str__(self):
return self.to_header()
def __repr__(self):
return '%s(%r)' % (
self.__class__.__name__,
self._headers
)
class ETags(object):
"""A set that can be used to check if one etag is present in a collection
of etags.
"""
def __init__(self, strong_etags=None, weak_etags=None, star_tag=False):
self._strong = frozenset(not star_tag and strong_etags or ())
self._weak = frozenset(weak_etags or ())
self.star_tag = star_tag
def as_set(self, include_weak=False):
"""Convert the `ETags` object into a python set. Per default all the
weak etags are not part of this set."""
rv = set(self._strong)
if include_weak:
rv.update(self._weak)
return rv
def is_weak(self, etag):
"""Check if an etag is weak."""
return etag in self._weak
def contains_weak(self, etag):
"""Check if an etag is part of the set including weak and strong tags."""
return self.is_weak(etag) or self.contains(etag)
def contains(self, etag):
"""Check if an etag is part of the set ignoring weak tags.
It is also possible to use the ``in`` operator.
"""
if self.star_tag:
return True
return etag in self._strong
def contains_raw(self, etag):
"""When passed a quoted tag it will check if this tag is part of the
set. If the tag is weak it is checked against weak and strong tags,
otherwise strong only."""
etag, weak = unquote_etag(etag)
if weak:
return self.contains_weak(etag)
return self.contains(etag)
def to_header(self):
"""Convert the etags set into a HTTP header string."""
if self.star_tag:
return '*'
return ', '.join(
['"%s"' % x for x in self._strong] +
['w/"%s"' % x for x in self._weak]
)
def __call__(self, etag=None, data=None, include_weak=False):
if [etag, data].count(None) != 1:
raise TypeError('either tag or data required, but at least one')
if etag is None:
etag = generate_etag(data)
if include_weak:
if etag in self._weak:
return True
return etag in self._strong
def __nonzero__(self):
return bool(self.star_tag or self._strong)
def __str__(self):
return self.to_header()
def __iter__(self):
return iter(self._strong)
def __contains__(self, etag):
return self.contains(etag)
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class IfRange(object):
"""Very simple object that represents the `If-Range` header in parsed
form. It will either have neither a etag or date or one of either but
never both.
.. versionadded:: 0.7
"""
def __init__(self, etag=None, date=None):
#: The etag parsed and unquoted. Ranges always operate on strong
#: etags so the weakness information is not necessary.
self.etag = etag
#: The date in parsed format or `None`.
self.date = date
def to_header(self):
"""Converts the object back into an HTTP header."""
if self.date is not None:
return http_date(self.date)
if self.etag is not None:
return quote_etag(self.etag)
return ''
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class Range(object):
"""Represents a range header. All the methods are only supporting bytes
as unit. It does store multiple ranges but :meth:`range_for_length` will
only work if only one range is provided.
.. versionadded:: 0.7
"""
def __init__(self, units, ranges):
#: The units of this range. Usually "bytes".
self.units = units
#: A list of ``(begin, end)`` tuples for the range header provided.
#: The ranges are non-inclusive.
self.ranges = ranges
def range_for_length(self, length):
"""If the range is for bytes, the length is not None and there is
exactly one range and it is satisfiable it returns a ``(start, stop)``
tuple, otherwise `None`.
"""
if self.units != 'bytes' or length is None or len(self.ranges) != 1:
return None
start, end = self.ranges[0]
if end is None:
end = length
if start < 0:
start += length
if is_byte_range_valid(start, end, length):
return start, min(end, length)
def make_content_range(self, length):
"""Creates a :class:`~werkzeug.datastructures.ContentRange` object
from the current range and given content length.
"""
rng = self.range_for_length(length)
if rng is not None:
return ContentRange(self.units, rng[0], rng[1], length)
def to_header(self):
"""Converts the object back into an HTTP header."""
ranges = []
for begin, end in self.ranges:
if end is None:
ranges.append(begin >= 0 and '%s-' % begin or str(begin))
else:
ranges.append('%s-%s' % (begin, end - 1))
return '%s=%s' % (self.units, ','.join(ranges))
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class ContentRange(object):
"""Represents the content range header.
.. versionadded:: 0.7
"""
def __init__(self, units, start, stop, length=None, on_update=None):
assert is_byte_range_valid(start, stop, length), \
'Bad range provided'
self.on_update = on_update
self.set(start, stop, length, units)
def _callback_property(name):
def fget(self):
return getattr(self, name)
def fset(self, value):
setattr(self, name, value)
if self.on_update is not None:
self.on_update(self)
return property(fget, fset)
#: The units to use, usually "bytes"
units = _callback_property('_units')
#: The start point of the range or `None`.
start = _callback_property('_start')
#: The stop point of the range (non-inclusive) or `None`. Can only be
#: `None` if also start is `None`.
stop = _callback_property('_stop')
#: The length of the range or `None`.
length = _callback_property('_length')
def set(self, start, stop, length=None, units='bytes'):
"""Simple method to update the ranges."""
assert is_byte_range_valid(start, stop, length), \
'Bad range provided'
self._units = units
self._start = start
self._stop = stop
self._length = length
if self.on_update is not None:
self.on_update(self)
def unset(self):
"""Sets the units to `None` which indicates that the header should
no longer be used.
"""
self.set(None, None, units=None)
def to_header(self):
if self.units is None:
return ''
if self.length is None:
length = '*'
else:
length = self.length
if self.start is None:
return '%s */%s' % (self.units, length)
return '%s %s-%s/%s' % (
self.units,
self.start,
self.stop - 1,
length
)
def __nonzero__(self):
return self.units is not None
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class Authorization(ImmutableDictMixin, dict):
"""Represents an `Authorization` header sent by the client. You should
not create this kind of object yourself but use it when it's returned by
the `parse_authorization_header` function.
This object is a dict subclass and can be altered by setting dict items
but it should be considered immutable as it's returned by the client and
not meant for modifications.
.. versionchanged:: 0.5
This object became immutable.
"""
def __init__(self, auth_type, data=None):
dict.__init__(self, data or {})
self.type = auth_type
username = property(lambda x: x.get('username'), doc='''
The username transmitted. This is set for both basic and digest
auth all the time.''')
password = property(lambda x: x.get('password'), doc='''
When the authentication type is basic this is the password
transmitted by the client, else `None`.''')
realm = property(lambda x: x.get('realm'), doc='''
This is the server realm sent back for HTTP digest auth.''')
nonce = property(lambda x: x.get('nonce'), doc='''
The nonce the server sent for digest auth, sent back by the client.
A nonce should be unique for every 401 response for HTTP digest
auth.''')
uri = property(lambda x: x.get('uri'), doc='''
The URI from Request-URI of the Request-Line; duplicated because
proxies are allowed to change the Request-Line in transit. HTTP
digest auth only.''')
nc = property(lambda x: x.get('nc'), doc='''
The nonce count value transmitted by clients if a qop-header is
also transmitted. HTTP digest auth only.''')
cnonce = property(lambda x: x.get('cnonce'), doc='''
If the server sent a qop-header in the ``WWW-Authenticate``
header, the client has to provide this value for HTTP digest auth.
See the RFC for more details.''')
response = property(lambda x: x.get('response'), doc='''
A string of 32 hex digits computed as defined in RFC 2617, which
proves that the user knows a password. Digest auth only.''')
opaque = property(lambda x: x.get('opaque'), doc='''
The opaque header from the server returned unchanged by the client.
It is recommended that this string be base64 or hexadecimal data.
Digest auth only.''')
@property
def qop(self):
"""Indicates what "quality of protection" the client has applied to
the message for HTTP digest auth."""
def on_update(header_set):
if not header_set and 'qop' in self:
del self['qop']
elif header_set:
self['qop'] = header_set.to_header()
return parse_set_header(self.get('qop'), on_update)
class WWWAuthenticate(UpdateDictMixin, dict):
"""Provides simple access to `WWW-Authenticate` headers."""
#: list of keys that require quoting in the generated header
_require_quoting = frozenset(['domain', 'nonce', 'opaque', 'realm'])
def __init__(self, auth_type=None, values=None, on_update=None):
dict.__init__(self, values or ())
if auth_type:
self['__auth_type__'] = auth_type
self.on_update = on_update
def set_basic(self, realm='authentication required'):
"""Clear the auth info and enable basic auth."""
dict.clear(self)
dict.update(self, {'__auth_type__': 'basic', 'realm': realm})
if self.on_update:
self.on_update(self)
def set_digest(self, realm, nonce, qop=('auth',), opaque=None,
algorithm=None, stale=False):
"""Clear the auth info and enable digest auth."""
d = {
'__auth_type__': 'digest',
'realm': realm,
'nonce': nonce,
'qop': dump_header(qop)
}
if stale:
d['stale'] = 'TRUE'
if opaque is not None:
d['opaque'] = opaque
if algorithm is not None:
d['algorithm'] = algorithm
dict.clear(self)
dict.update(self, d)
if self.on_update:
self.on_update(self)
def to_header(self):
"""Convert the stored values into a WWW-Authenticate header."""
d = dict(self)
auth_type = d.pop('__auth_type__', None) or 'basic'
return '%s %s' % (auth_type.title(), ', '.join([
'%s=%s' % (key, quote_header_value(value,
allow_token=key not in self._require_quoting))
for key, value in d.iteritems()
]))
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.to_header()
)
def auth_property(name, doc=None):
"""A static helper function for subclasses to add extra authentication
system properties onto a class::
class FooAuthenticate(WWWAuthenticate):
special_realm = auth_property('special_realm')
For more information have a look at the sourcecode to see how the
regular properties (:attr:`realm` etc.) are implemented.
"""
def _set_value(self, value):
if value is None:
self.pop(name, None)
else:
self[name] = str(value)
return property(lambda x: x.get(name), _set_value, doc=doc)
def _set_property(name, doc=None):
def fget(self):
def on_update(header_set):
if not header_set and name in self:
del self[name]
elif header_set:
self[name] = header_set.to_header()
return parse_set_header(self.get(name), on_update)
return property(fget, doc=doc)
type = auth_property('__auth_type__', doc='''
The type of the auth mechanism. HTTP currently specifies
`Basic` and `Digest`.''')
realm = auth_property('realm', doc='''
A string to be displayed to users so they know which username and
password to use. This string should contain at least the name of
the host performing the authentication and might additionally
indicate the collection of users who might have access.''')
domain = _set_property('domain', doc='''
A list of URIs that define the protection space. If a URI is an
absolute path, it is relative to the canonical root URL of the
server being accessed.''')
nonce = auth_property('nonce', doc='''
A server-specified data string which should be uniquely generated
each time a 401 response is made. It is recommended that this
string be base64 or hexadecimal data.''')
opaque = auth_property('opaque', doc='''
A string of data, specified by the server, which should be returned
by the client unchanged in the Authorization header of subsequent
requests with URIs in the same protection space. It is recommended
that this string be base64 or hexadecimal data.''')
algorithm = auth_property('algorithm', doc='''
A string indicating a pair of algorithms used to produce the digest
and a checksum. If this is not present it is assumed to be "MD5".
If the algorithm is not understood, the challenge should be ignored
(and a different one used, if there is more than one).''')
qop = _set_property('qop', doc='''
A set of quality-of-privacy directives such as auth and auth-int.''')
def _get_stale(self):
val = self.get('stale')
if val is not None:
return val.lower() == 'true'
def _set_stale(self, value):
if value is None:
self.pop('stale', None)
else:
self['stale'] = value and 'TRUE' or 'FALSE'
stale = property(_get_stale, _set_stale, doc='''
A flag, indicating that the previous request from the client was
rejected because the nonce value was stale.''')
del _get_stale, _set_stale
# make auth_property a staticmethod so that subclasses of
# `WWWAuthenticate` can use it for new properties.
auth_property = staticmethod(auth_property)
del _set_property
class FileStorage(object):
"""The :class:`FileStorage` class is a thin wrapper over incoming files.
It is used by the request object to represent uploaded files. All the
attributes of the wrapper stream are proxied by the file storage so
it's possible to do ``storage.read()`` instead of the long form
``storage.stream.read()``.
"""
def __init__(self, stream=None, filename=None, name=None,
content_type=None, content_length=None,
headers=None):
self.name = name
self.stream = stream or _empty_stream
# if no filename is provided we can attempt to get the filename
# from the stream object passed. There we have to be careful to
# skip things like <fdopen>, <stderr> etc. Python marks these
# special filenames with angular brackets.
if filename is None:
filename = getattr(stream, 'name', None)
if filename and filename[0] == '<' and filename[-1] == '>':
filename = None
self.filename = filename
if headers is None:
headers = Headers()
self.headers = headers
if content_type is not None:
headers['Content-Type'] = content_type
if content_length is not None:
headers['Content-Length'] = str(content_length)
def _parse_content_type(self):
if not hasattr(self, '_parsed_content_type'):
self._parsed_content_type = \
parse_options_header(self.content_type)
@property
def content_type(self):
"""The file's content type. Usually not available"""
return self.headers.get('content-type')
@property
def content_length(self):
"""The file's content length. Usually not available"""
return int(self.headers.get('content-length') or 0)
@property
def mimetype(self):
"""Like :attr:`content_type` but without parameters (eg, without
charset, type etc.). For example if the content
type is ``text/html; charset=utf-8`` the mimetype would be
``'text/html'``.
.. versionadded:: 0.7
"""
self._parse_content_type()
return self._parsed_content_type[0]
@property
def mimetype_params(self):
"""The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
.. versionadded:: 0.7
"""
self._parse_content_type()
return self._parsed_content_type[1]
def save(self, dst, buffer_size=16384):
"""Save the file to a destination path or file object. If the
destination is a file object you have to close it yourself after the
call. The buffer size is the number of bytes held in memory during
the copy process. It defaults to 16KB.
For secure file saving also have a look at :func:`secure_filename`.
:param dst: a filename or open file object the uploaded file
is saved to.
:param buffer_size: the size of the buffer. This works the same as
the `length` parameter of
:func:`shutil.copyfileobj`.
"""
from shutil import copyfileobj
close_dst = False
if isinstance(dst, basestring):
dst = file(dst, 'wb')
close_dst = True
try:
copyfileobj(self.stream, dst, buffer_size)
finally:
if close_dst:
dst.close()
def close(self):
"""Close the underlying file if possible."""
try:
self.stream.close()
except Exception:
pass
def __nonzero__(self):
return bool(self.filename)
def __getattr__(self, name):
return getattr(self.stream, name)
def __iter__(self):
return iter(self.readline, '')
def __repr__(self):
return '<%s: %r (%r)>' % (
self.__class__.__name__,
self.filename,
self.content_type
)
# circular dependencies
from werkzeug.http import dump_options_header, dump_header, generate_etag, \
quote_header_value, parse_set_header, unquote_etag, quote_etag, \
parse_options_header, http_date, is_byte_range_valid
from werkzeug.exceptions import BadRequestKeyError
| 32.626263
| 83
| 0.58602
|
7c69129b6cced3c0efdd5f70dd54e6615a3e11f3
| 738
|
py
|
Python
|
AlgoExpert-Array/SpiralTraversal.py
|
betaSolver05609/LeetCode
|
0e7386e7ead80597e2893ec45f6dc6dfcd5d694b
|
[
"MIT"
] | null | null | null |
AlgoExpert-Array/SpiralTraversal.py
|
betaSolver05609/LeetCode
|
0e7386e7ead80597e2893ec45f6dc6dfcd5d694b
|
[
"MIT"
] | null | null | null |
AlgoExpert-Array/SpiralTraversal.py
|
betaSolver05609/LeetCode
|
0e7386e7ead80597e2893ec45f6dc6dfcd5d694b
|
[
"MIT"
] | null | null | null |
#Spiral Traversal
def spiralTraverse(array):
# Write your code here.
result=[]
startRow=0
startCol=0
endRow=len(array)-1
endCol=len(array[0])-1
while startRow<=endRow and startCol<=endCol:
for col in range(startCol, endCol+1):
print(col)
result.append(array[startRow][col])
for row in range(startRow+1, endRow+1):
print(row)
result.append(array[row][endCol])
for col in reversed(range(startCol, endCol)):
print(col)
if startRow==endRow:
break
result.append(array[endRow][col])
for row in reversed(range(startRow+1, endRow)):
print(row)
if startCol==endCol:
break
result.append(array[row][startCol])
startRow+=1
endRow-=1
startCol+=1
endCol-=1
print(result)
return result
| 22.363636
| 49
| 0.691057
|
13ded90303ca12e3b961849b225afc50c85c500b
| 9,891
|
py
|
Python
|
docs/conf.py
|
jaor/flatline
|
dabccdfd5b70520cbe99afa86a8fcf3f6a9dbd93
|
[
"Apache-2.0"
] | 18
|
2015-04-06T22:06:03.000Z
|
2021-10-29T22:05:23.000Z
|
docs/conf.py
|
jaor/flatline
|
dabccdfd5b70520cbe99afa86a8fcf3f6a9dbd93
|
[
"Apache-2.0"
] | 5
|
2015-03-09T19:43:18.000Z
|
2015-12-03T19:24:21.000Z
|
docs/conf.py
|
jaor/flatline
|
dabccdfd5b70520cbe99afa86a8fcf3f6a9dbd93
|
[
"Apache-2.0"
] | 11
|
2015-02-16T22:38:46.000Z
|
2020-09-24T08:37:04.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Flatline documentation build configuration file, created by
# sphinx-quickstart on Tue Jan 10 00:20:23 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
from recommonmark.parser import CommonMarkParser
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
source_parsers = {
'.md': CommonMarkParser,
}
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Flatline'
copyright = '2017-2018, The BigML Team'
author = 'The BigML Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'Flatline v1.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Flatlinedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Flatline.tex', 'Flatline Documentation',
'The BigML Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'flatline', 'Flatline Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Flatline', 'Flatline Documentation',
author, 'Flatline', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| 28.504323
| 80
| 0.702558
|
f45a09c1ebf0f43e72c54554e40b85fbf60b0583
| 10,271
|
py
|
Python
|
dataset/dataset.py
|
VedaSmartMathSolver/image-to-latex
|
978f196c51912aa3ee72484907c984648f870694
|
[
"MIT"
] | null | null | null |
dataset/dataset.py
|
VedaSmartMathSolver/image-to-latex
|
978f196c51912aa3ee72484907c984648f870694
|
[
"MIT"
] | null | null | null |
dataset/dataset.py
|
VedaSmartMathSolver/image-to-latex
|
978f196c51912aa3ee72484907c984648f870694
|
[
"MIT"
] | null | null | null |
import albumentations as alb
from albumentations.pytorch import ToTensorV2
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
import torch.utils.data as data
from torchvision import transforms
import numpy as np
import imagesize
import logging
import glob
import os
from os.path import join
from collections import defaultdict
import pickle
from PIL import Image
import cv2
from transformers import PreTrainedTokenizerFast
from tqdm.auto import tqdm
train_transform = alb.Compose(
[
alb.Compose(
[alb.ShiftScaleRotate(shift_limit=0, scale_limit=(-.15, 0), rotate_limit=1, border_mode=0, interpolation=3,
value=[255, 255, 255], p=1),
alb.GridDistortion(distort_limit=0.1, border_mode=0, interpolation=3, value=[255, 255, 255], p=.5)], p=.15),
alb.InvertImg(p=.15),
alb.RGBShift(r_shift_limit=15, g_shift_limit=15,
b_shift_limit=15, p=0.3),
alb.GaussNoise(10, p=.2),
alb.RandomBrightnessContrast(.05, (-.2, 0), True, p=0.2),
alb.JpegCompression(95, p=.5),
alb.ToGray(always_apply=True),
alb.Normalize((0.7931, 0.7931, 0.7931), (0.1738, 0.1738, 0.1738)),
# alb.Sharpen()
ToTensorV2(),
]
)
test_transform = alb.Compose(
[
alb.ToGray(always_apply=True),
alb.Normalize((0.7931, 0.7931, 0.7931), (0.1738, 0.1738, 0.1738)),
# alb.Sharpen()
ToTensorV2(),
]
)
class Im2LatexDataset:
keep_smaller_batches = False
shuffle = True
batchsize = 16
max_dimensions = (1024, 512)
min_dimensions = (32, 32)
max_seq_len = 1024
pad_token = "[PAD]"
bos_token = "[BOS]"
eos_token = "[EOS]"
pad_token_id = 0
bos_token_id = 1
eos_token_id = 2
transform = train_transform
def __init__(self, equations=None, images=None, tokenizer=None, shuffle=True, batchsize=16, max_seq_len=1024,
max_dimensions=(1024, 512), min_dimensions=(32, 32), pad=False, keep_smaller_batches=False, test=False):
"""Generates a torch dataset from pairs of `equations` and `images`.
Args:
equations (str, optional): Path to equations. Defaults to None.
images (str, optional): Directory where images are saved. Defaults to None.
tokenizer (str, optional): Path to saved tokenizer. Defaults to None.
shuffle (bool, opitonal): Defaults to True.
batchsize (int, optional): Defaults to 16.
max_seq_len (int, optional): Defaults to 1024.
max_dimensions (tuple(int, int), optional): Maximal dimensions the model can handle
min_dimensions (tuple(int, int), optional): Minimal dimensions the model can handle
pad (bool): Pad the images to `max_dimensions`. Defaults to False.
keep_smaller_batches (bool): Whether to also return batches with smaller size than `batchsize`. Defaults to False.
test (bool): Whether to use the test transformation or not. Defaults to False.
"""
if images is not None and equations is not None:
assert tokenizer is not None
self.images = [path.replace('\\', '/') for path in glob.glob(join(images, '*.png'))]
self.sample_size = len(self.images)
eqs = open(equations, 'r').read().split('\n')
self.indices = [int(os.path.basename(img).split('.')[0]) for img in self.images]
self.tokenizer = PreTrainedTokenizerFast(tokenizer_file=tokenizer)
self.shuffle = shuffle
self.batchsize = batchsize
self.max_dimensions = max_dimensions
self.min_dimensions = min_dimensions
self.pad = pad
self.keep_smaller_batches = keep_smaller_batches
self.test = test
self.data = defaultdict(lambda: [])
# check the image dimension for every image and group them together
try:
for i, im in tqdm(enumerate(self.images), total=len(self.images)):
width, height = imagesize.get(im)
if min_dimensions[0] <= width <= max_dimensions[0] and min_dimensions[1] <= height <= max_dimensions[1]:
self.data[(width, height)].append((eqs[self.indices[i]], im))
except KeyboardInterrupt:
pass
self.data = dict(self.data)
self._get_size()
iter(self)
def __len__(self):
return self.size
def __iter__(self):
self.i = 0
self.transform = test_transform if self.test else train_transform
self.pairs = []
for k in self.data:
info = np.array(self.data[k], dtype=object)
p = torch.randperm(len(info)) if self.shuffle else torch.arange(len(info))
for i in range(0, len(info), self.batchsize):
batch = info[p[i:i+self.batchsize]]
if len(batch.shape) == 1:
batch = batch[None, :]
if len(batch) < self.batchsize and not self.keep_smaller_batches:
continue
self.pairs.append(batch)
if self.shuffle:
self.pairs = np.random.permutation(np.array(self.pairs, dtype=object))
else:
self.pairs = np.array(self.pairs, dtype=object)
self.size = len(self.pairs)
return self
def __next__(self):
if self.i >= self.size:
raise StopIteration
self.i += 1
return self.prepare_data(self.pairs[self.i-1])
def prepare_data(self, batch):
"""loads images into memory
Args:
batch (numpy.array[[str, str]]): array of equations and image path pairs
Returns:
tuple(torch.tensor, torch.tensor): data in memory
"""
eqs, ims = batch.T
images = []
for path in list(ims):
im = cv2.imread(path)
if im is None:
print(path, 'not found!')
continue
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
if not self.test:
# sometimes convert to bitmask
if np.random.random() < .04:
im[im != 255] = 0
images.append(self.transform(image=im)['image'][:1])
tok = self.tokenizer(list(eqs), return_token_type_ids=False)
# pad with bos and eos token
for k, p in zip(tok, [[self.bos_token_id, self.eos_token_id], [1, 1]]):
tok[k] = pad_sequence([torch.LongTensor([p[0]]+x+[p[1]]) for x in tok[k]], batch_first=True, padding_value=self.pad_token_id)
# check if sequence length is too long
if self.max_seq_len < len(tok[0]):
return next(self)
try:
images = torch.cat(images).float().unsqueeze(1)
except RuntimeError:
logging.critical('Images not working: %s' % (' '.join(list(ims))))
return None, None
if self.pad:
h, w = images.shape[2:]
images = F.pad(images, (0, self.max_dimensions[0]-w, 0, self.max_dimensions[1]-h), value=1)
return tok, images
def _get_size(self):
self.size = 0
for k in self.data:
div, mod = divmod(len(self.data[k]), self.batchsize)
self.size += div # + (1 if mod > 0 else 0)
def load(self, filename, args=[]):
"""returns a pickled version of a dataset
Args:
filename (str): Path to dataset
"""
with open(filename, 'rb') as file:
x = pickle.load(file)
return x
def save(self, filename):
"""save a pickled version of a dataset
Args:
filename (str): Path to dataset
"""
with open(filename, 'wb') as file:
pickle.dump(self, file)
def update(self, **kwargs):
for k in ['batchsize', 'shuffle', 'pad', 'keep_smaller_batches', 'test', 'max_seq_len']:
if k in kwargs:
setattr(self, k, kwargs[k])
if 'max_dimensions' in kwargs or 'min_dimensions' in kwargs:
if 'max_dimensions' in kwargs:
self.max_dimensions = kwargs['max_dimensions']
if 'min_dimensions' in kwargs:
self.min_dimensions = kwargs['min_dimensions']
temp = {}
for k in self.data:
if self.min_dimensions[0] <= k[0] <= self.max_dimensions[0] and self.min_dimensions[1] <= k[1] <= self.max_dimensions[1]:
temp[k] = self.data[k]
self.data = temp
self._get_size()
iter(self)
def generate_tokenizer(equations, output, vocab_size):
from tokenizers import Tokenizer, pre_tokenizers
from tokenizers.models import BPE
from tokenizers.trainers import BpeTrainer
tokenizer = Tokenizer(BPE())
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=False)
trainer = BpeTrainer(special_tokens=["[PAD]", "[BOS]", "[EOS]"], vocab_size=vocab_size, show_progress=True)
tokenizer.train(trainer, [equations])
tokenizer.save(path=output, pretty=False)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Train model', add_help=False)
parser.add_argument('-i', '--images', type=str, default=None, help='Image folder')
parser.add_argument('-e', '--equations', type=str, default=None, help='equations text file')
parser.add_argument('-t', '--tokenizer', default=None, help='Pretrained tokenizer file')
parser.add_argument('-o', '--out', required=True, help='output file')
parser.add_argument('-s', '--vocab-size', default=8000, help='vocabulary size when training a tokenizer')
args = parser.parse_args()
if args.images is None and args.equations is not None and args.tokenizer is None:
print('Generate tokenizer')
generate_tokenizer(args.equations, args.out, args.vocab_size)
elif args.images is not None and args.equations is not None and args.tokenizer is not None:
print('Generate dataset')
Im2LatexDataset(args.equations, args.images, args.tokenizer).save(args.out)
else:
print('Not defined')
| 40.596838
| 137
| 0.60442
|
7da7317926e8edb5b1066fd1642d6b5a2acba52c
| 14,862
|
py
|
Python
|
website/addons/base/views.py
|
dplorimer/osf
|
9f3f400f62dc7bde18532949ed35bf1d3f6ec3d6
|
[
"Apache-2.0"
] | null | null | null |
website/addons/base/views.py
|
dplorimer/osf
|
9f3f400f62dc7bde18532949ed35bf1d3f6ec3d6
|
[
"Apache-2.0"
] | null | null | null |
website/addons/base/views.py
|
dplorimer/osf
|
9f3f400f62dc7bde18532949ed35bf1d3f6ec3d6
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import uuid
import httplib
import functools
import furl
from flask import request
from flask import redirect
from flask import make_response
from modularodm.exceptions import NoResultsFound
from framework.auth import Auth
from framework.sessions import session
from framework.sentry import log_exception
from framework.exceptions import HTTPError
from framework.auth.decorators import must_be_logged_in, must_be_signed
from website import mails
from website import settings
from website.project import decorators
from website.addons.base import exceptions
from website.models import User, Node, NodeLog
from website.util import rubeus
from website.profile.utils import get_gravatar
from website.project.decorators import must_be_valid_project, must_be_contributor_or_public
from website.project.utils import serialize_node
@decorators.must_have_permission('write')
@decorators.must_not_be_registration
def disable_addon(auth, **kwargs):
node = kwargs['node'] or kwargs['project']
addon_name = kwargs.get('addon')
if addon_name is None:
raise HTTPError(httplib.BAD_REQUEST)
deleted = node.delete_addon(addon_name, auth)
return {'deleted': deleted}
@must_be_logged_in
def get_addon_user_config(**kwargs):
user = kwargs['auth'].user
addon_name = kwargs.get('addon')
if addon_name is None:
raise HTTPError(httplib.BAD_REQUEST)
addon = user.get_addon(addon_name)
if addon is None:
raise HTTPError(httplib.BAD_REQUEST)
return addon.to_json(user)
def check_file_guid(guid):
guid_url = '/{0}/'.format(guid._id)
if not request.path.startswith(guid_url):
url_split = request.url.split(guid.file_url)
try:
guid_url += url_split[1].lstrip('/')
except IndexError:
pass
return guid_url
return None
permission_map = {
'create_folder': 'write',
'revisions': 'read',
'metadata': 'read',
'download': 'read',
'upload': 'write',
'delete': 'write',
'copy': 'write',
'move': 'write',
'copyto': 'write',
'moveto': 'write',
'copyfrom': 'read',
'movefrom': 'write',
}
def check_access(node, user, action, key=None):
"""Verify that user can perform requested action on resource. Raise appropriate
error code if action cannot proceed.
"""
permission = permission_map.get(action, None)
if permission is None:
raise HTTPError(httplib.BAD_REQUEST)
if node.has_permission(user, permission):
return True
if permission == 'read':
if node.is_public or key in node.private_link_keys_active:
return True
# Users attempting to register projects with components might not have
# `write` permissions for all components. This will result in a 403 for
# all `copyto` actions as well as `copyfrom` actions if the component
# in question is not public. To get around this, we have to recursively
# check the node's parent node to determine if they have `write`
# permissions up the stack.
# TODO(hrybacki): is there a way to tell if this is for a registration?
# All nodes being registered that receive the `copyto` action will have
# `node.is_registration` == True. However, we have no way of telling if
# `copyfrom` actions are originating from a node being registered.
if action == 'copyfrom' or (action == 'copyto' and node.is_registration):
parent = node.parent_node
while parent:
if parent.has_permission(user, 'write'):
return True
parent = parent.parent_node
code = httplib.FORBIDDEN if user else httplib.UNAUTHORIZED
raise HTTPError(code)
def make_auth(user):
if user is not None:
return {
'id': user._id,
'email': '{}@osf.io'.format(user._id),
'name': user.fullname,
}
return {}
def restrict_addrs(*addrs):
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
remote = request.remote_addr
if remote not in addrs:
raise HTTPError(httplib.FORBIDDEN)
return func(*args, **kwargs)
return wrapped
return wrapper
restrict_waterbutler = restrict_addrs(*settings.WATERBUTLER_ADDRS)
@restrict_waterbutler
def get_auth(**kwargs):
try:
action = request.args['action']
node_id = request.args['nid']
provider_name = request.args['provider']
except KeyError:
raise HTTPError(httplib.BAD_REQUEST)
cookie = request.args.get('cookie')
view_only = request.args.get('view_only')
if 'auth_user_id' in session.data:
user = User.load(session.data['auth_user_id'])
elif cookie:
user = User.from_cookie(cookie)
else:
user = None
node = Node.load(node_id)
if not node:
raise HTTPError(httplib.NOT_FOUND)
check_access(node, user, action, key=view_only)
provider_settings = node.get_addon(provider_name)
if not provider_settings:
raise HTTPError(httplib.BAD_REQUEST)
try:
credentials = provider_settings.serialize_waterbutler_credentials()
settings = provider_settings.serialize_waterbutler_settings()
except exceptions.AddonError:
log_exception()
raise HTTPError(httplib.BAD_REQUEST)
return {
'auth': make_auth(user),
'credentials': credentials,
'settings': settings,
'callback_url': node.api_url_for(
('create_waterbutler_log' if not node.is_registration else 'registration_callbacks'),
_absolute=True,
),
}
LOG_ACTION_MAP = {
'move': NodeLog.FILE_MOVED,
'copy': NodeLog.FILE_COPIED,
'create': NodeLog.FILE_ADDED,
'update': NodeLog.FILE_UPDATED,
'delete': NodeLog.FILE_REMOVED,
'create_folder': NodeLog.FOLDER_CREATED,
}
@must_be_signed
@restrict_waterbutler
@must_be_valid_project
def create_waterbutler_log(payload, **kwargs):
try:
auth = payload['auth']
action = LOG_ACTION_MAP[payload['action']]
except KeyError:
raise HTTPError(httplib.BAD_REQUEST)
user = User.load(auth['id'])
if user is None:
raise HTTPError(httplib.BAD_REQUEST)
auth = Auth(user=user)
node = kwargs['node'] or kwargs['project']
if action in (NodeLog.FILE_MOVED, NodeLog.FILE_COPIED):
for bundle in ('source', 'destination'):
for key in ('provider', 'materialized', 'name', 'nid'):
if key not in payload[bundle]:
raise HTTPError(httplib.BAD_REQUEST)
destination_node = node # For clarity
source_node = Node.load(payload['source']['nid'])
source = source_node.get_addon(payload['source']['provider'])
destination = node.get_addon(payload['destination']['provider'])
payload['source'].update({
'materialized': payload['source']['materialized'].lstrip('/'),
'addon': source.config.full_name,
'url': source_node.web_url_for(
'addon_view_or_download_file',
path=payload['source']['path'].lstrip('/'),
provider=payload['source']['provider']
),
'node': {
'_id': source_node._id,
'url': source_node.url,
'title': source_node.title,
}
})
payload['destination'].update({
'materialized': payload['destination']['materialized'].lstrip('/'),
'addon': destination.config.full_name,
'url': destination_node.web_url_for(
'addon_view_or_download_file',
path=payload['destination']['path'].lstrip('/'),
provider=payload['destination']['provider']
),
'node': {
'_id': destination_node._id,
'url': destination_node.url,
'title': destination_node.title,
}
})
payload.update({
'node': destination_node._id,
'project': destination_node.parent_id,
})
if not payload.get('errors'):
destination_node.add_log(
action=action,
auth=auth,
params=payload
)
if payload.get('email') is True or payload.get('errors'):
mails.send_mail(
user.username,
mails.FILE_OPERATION_FAILED if payload.get('errors')
else mails.FILE_OPERATION_SUCCESS,
action=payload['action'],
source_node=source_node,
destination_node=destination_node,
source_path=payload['source']['path'],
destination_path=payload['source']['path'],
source_addon=payload['source']['addon'],
destination_addon=payload['destination']['addon'],
)
else:
try:
metadata = payload['metadata']
node_addon = node.get_addon(payload['provider'])
except KeyError:
raise HTTPError(httplib.BAD_REQUEST)
if node_addon is None:
raise HTTPError(httplib.BAD_REQUEST)
metadata['path'] = metadata['path'].lstrip('/')
node_addon.create_waterbutler_log(auth, action, metadata)
return {'status': 'success'}
@must_be_valid_project
def addon_view_or_download_file_legacy(**kwargs):
query_params = request.args.to_dict()
node = kwargs.get('node') or kwargs['project']
action = query_params.pop('action', 'view')
provider = kwargs.get('provider', 'osfstorage')
if kwargs.get('path'):
path = kwargs['path']
elif kwargs.get('fid'):
path = kwargs['fid']
if 'download' in request.path or request.path.startswith('/api/v1/'):
action = 'download'
if kwargs.get('vid'):
query_params['version'] = kwargs['vid']
# If provider is OSFstorage, check existence of requested file in the filetree
# This prevents invalid GUIDs from being created
if provider == 'osfstorage':
node_settings = node.get_addon('osfstorage')
try:
path = node_settings.root_node.find_child_by_name(path)._id
except NoResultsFound:
raise HTTPError(
404, data=dict(
message_short='File not found',
message_long='You requested a file that does not exist.'
)
)
return redirect(
node.web_url_for(
'addon_view_or_download_file',
path=path,
provider=provider,
action=action,
**query_params
),
code=httplib.MOVED_PERMANENTLY
)
@must_be_valid_project
@must_be_contributor_or_public
def addon_view_or_download_file(auth, path, provider, **kwargs):
extras = request.args.to_dict()
action = extras.get('action', 'view')
node = kwargs.get('node') or kwargs['project']
node_addon = node.get_addon(provider)
if not path:
raise HTTPError(httplib.BAD_REQUEST)
if not node_addon:
raise HTTPError(httplib.BAD_REQUEST, {
'message_short': 'Bad Request',
'message_long': 'The add-on containing this file is no longer connected to the {}.'.format(node.project_or_component)
})
if not node_addon.has_auth:
raise HTTPError(httplib.UNAUTHORIZED, {
'message_short': 'Unauthorized',
'message_long': 'The add-on containing this file is no longer authorized.'
})
if not node_addon.complete:
raise HTTPError(httplib.BAD_REQUEST, {
'message_short': 'Bad Request',
'message_long': 'The add-on containing this file is no longer configured.'
})
if not path.startswith('/'):
path = '/' + path
guid_file, created = node_addon.find_or_create_file_guid(path)
if guid_file.guid_url != request.path:
guid_url = furl.furl(guid_file.guid_url)
guid_url.args.update(extras)
return redirect(guid_url)
guid_file.maybe_set_version(**extras)
if request.method == 'HEAD':
download_url = furl.furl(guid_file.download_url)
download_url.args.update(extras)
download_url.args['accept_url'] = 'false'
return make_response(('', 200, {'Location': download_url.url}))
if action == 'download':
download_url = furl.furl(guid_file.download_url)
download_url.args.update(extras)
return redirect(download_url.url)
return addon_view_file(auth, node, node_addon, guid_file, extras)
def addon_view_file(auth, node, node_addon, guid_file, extras):
# TODO: resolve circular import issue
from website.addons.wiki import settings as wiki_settings
ret = serialize_node(node, auth, primary=True)
# Disable OSF Storage file deletion in DISK_SAVING_MODE
if settings.DISK_SAVING_MODE and node_addon.config.short_name == 'osfstorage':
ret['user']['can_edit'] = False
try:
guid_file.enrich()
except exceptions.AddonEnrichmentError as e:
error = e.as_html()
else:
error = None
if guid_file._id not in node.file_guid_to_share_uuids:
node.file_guid_to_share_uuids[guid_file._id] = uuid.uuid4()
node.save()
if ret['user']['can_edit']:
sharejs_uuid = str(node.file_guid_to_share_uuids[guid_file._id])
else:
sharejs_uuid = None
size = getattr(guid_file, 'size', None)
if size is None: # Size could be 0 which is a falsey value
size = 9966699 # if we dont know the size assume its to big to edit
ret.update({
'error': error.replace('\n', '') if error else None,
'provider': guid_file.provider,
'file_path': guid_file.waterbutler_path,
'panels_used': ['edit', 'view'],
'private': getattr(node_addon, 'is_private', False),
'sharejs_uuid': sharejs_uuid,
'urls': {
'files': node.web_url_for('collect_file_trees'),
'render': guid_file.mfr_render_url,
'sharejs': wiki_settings.SHAREJS_URL,
'mfr': settings.MFR_SERVER_URL,
'gravatar': get_gravatar(auth.user, 25),
'external': getattr(guid_file, 'external_url', None)
},
# Note: must be called after get_or_start_render. This is really only for github
'size': size,
'extra': getattr(guid_file, 'extra', {}),
#NOTE: get_or_start_render must be called first to populate name
'file_name': getattr(guid_file, 'name', os.path.split(guid_file.waterbutler_path)[1]),
'materialized_path': getattr(guid_file, 'materialized', guid_file.waterbutler_path),
})
ret.update(rubeus.collect_addon_assets(node))
return ret
| 32.099352
| 129
| 0.634302
|
dd538814886b7edcabafb1cd941cc45cd5657a4b
| 15,854
|
py
|
Python
|
lib/mplcairo/base.py
|
briennakh/mplcairo
|
60456018425381b49d9ab1119be4dcdb78f20091
|
[
"MIT"
] | null | null | null |
lib/mplcairo/base.py
|
briennakh/mplcairo
|
60456018425381b49d9ab1119be4dcdb78f20091
|
[
"MIT"
] | null | null | null |
lib/mplcairo/base.py
|
briennakh/mplcairo
|
60456018425381b49d9ab1119be4dcdb78f20091
|
[
"MIT"
] | null | null | null |
from collections import OrderedDict
import functools
from functools import partial, partialmethod
from gzip import GzipFile
import logging
import os
from pathlib import Path
import shutil
import sys
from tempfile import TemporaryDirectory
from threading import RLock
import numpy as np
try:
from PIL import Image
except ImportError:
Image = None
import matplotlib as mpl
from matplotlib import _png, cbook, colors, dviread
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, GraphicsContextBase,
RendererBase)
from matplotlib.backends import backend_ps
from matplotlib.mathtext import MathTextParser
from . import _mplcairo, _util
from ._mplcairo import _StreamSurfaceType
_log = logging.getLogger()
# FreeType2 is thread-unsafe.
_LOCK = RLock()
MathTextParser._backend_mapping["mplcairo"] = \
_mplcairo.MathtextBackendCairo
@functools.lru_cache(1)
def _get_tex_font_map():
return dviread.PsfontsMap(dviread.find_tex_file("pdftex.map"))
def _get_drawn_subarray_and_bounds(img):
"""Return the drawn region of a buffer and its ``(l, b, w, h)`` bounds."""
drawn = img[..., 3] != 0
x_nz, = drawn.any(axis=0).nonzero()
y_nz, = drawn.any(axis=1).nonzero()
if len(x_nz) and len(y_nz):
l, r = drawn.any(axis=0).nonzero()[0][[0, -1]]
b, t = drawn.any(axis=1).nonzero()[0][[0, -1]]
return img[b:t+1, l:r+1], (l, b, r - l + 1, t - b + 1)
else:
return np.zeros((0, 0, 4), dtype=np.uint8), (0, 0, 0, 0)
_mplcairo._Region.to_string_argb = (
# For spoofing BackendAgg.BufferRegion.
lambda self:
_util.to_unmultiplied_rgba8888(self._get_buffer())[
..., [2, 1, 0, 3] if sys.byteorder == "little" else [3, 0, 1, 2]]
.tobytes())
class GraphicsContextRendererCairo(
_mplcairo.GraphicsContextRendererCairo,
# Fill in the missing methods.
GraphicsContextBase,
RendererBase):
def __init__(self, width, height, dpi):
# Hide the overloaded constructor, provided by from_pycairo_ctx.
_mplcairo.GraphicsContextRendererCairo.__init__(
self, width, height, dpi)
@classmethod
def from_pycairo_ctx(cls, ctx, dpi):
obj = _mplcairo.GraphicsContextRendererCairo.__new__(cls, ctx, dpi)
_mplcairo.GraphicsContextRendererCairo.__init__(obj, ctx, dpi)
return obj
@classmethod
def _for_fmt_output(cls, fmt, stream, width, height, dpi):
args = fmt, stream, width, height, dpi
obj = _mplcairo.GraphicsContextRendererCairo.__new__(cls, *args)
_mplcairo.GraphicsContextRendererCairo.__init__(obj, *args)
return obj
_for_pdf_output = partialmethod(_for_fmt_output, _StreamSurfaceType.PDF)
_for_ps_output = partialmethod(_for_fmt_output, _StreamSurfaceType.PS)
_for_eps_output = partialmethod(_for_fmt_output, _StreamSurfaceType.EPS)
_for_svg_output = partialmethod(_for_fmt_output, _StreamSurfaceType.SVG)
_for_script_output = partialmethod(
_for_fmt_output, _StreamSurfaceType.Script)
@classmethod
def _for_svgz_output(cls, stream, width, height, dpi):
gzip_file = GzipFile(fileobj=stream)
obj = cls._for_svg_output(gzip_file, width, height, dpi)
def _finish():
cls._finish(obj)
gzip_file.close()
obj._finish = _finish
return obj
def option_image_nocomposite(self):
return (not mpl.rcParams["image.composite_image"]
if self._has_vector_surface() else True)
# Based on the backend_pdf implementation.
def draw_tex(self, gc, x, y, s, prop, angle, ismath="TeX!", mtext=None):
fontsize = prop.get_size_in_points()
dvifile = self.get_texmanager().make_dvi(s, fontsize)
with dviread.Dvi(dvifile, self.dpi) as dvi:
page = next(iter(dvi))
mb = _mplcairo.MathtextBackendCairo()
for x1, y1, dvifont, glyph, width in page.text:
texfont = _get_tex_font_map()[dvifont.texname]
if texfont.filename is None:
# Not TypeError:
# :mpltest:`test_backend_svg.test_missing_psfont`.
raise ValueError("No font file found for {} ({!a})"
.format(texfont.psname, texfont.texname))
mb._render_usetex_glyph(
x1, -y1, texfont.filename, dvifont.size, glyph)
for x1, y1, h, w in page.boxes:
mb.render_rect_filled(x1, -y1, x1 + w, -(y1 + h))
mb._draw(self, x, y, angle)
def stop_filter(self, filter_func):
img = _util.to_unmultiplied_rgba8888(self._stop_filter_get_buffer())
img, (l, b, w, h) = _get_drawn_subarray_and_bounds(img)
if not (w and h):
return
img, dx, dy = filter_func(img[::-1] / 255, self.dpi)
if img.dtype.kind == "f":
img = np.asarray(img * 255, np.uint8)
width, height = self.get_canvas_width_height()
self.draw_image(self, l + dx, height - b - h + dy, img)
start_rasterizing = _mplcairo.GraphicsContextRendererCairo.start_filter
# While we could just write
# stop_rasterizing = partialmethod(stop_filter,
# lambda img, dpi: (img, 0, 0))
# this crashes inspect.signature on Py3.6
# (https://bugs.python.org/issue33009).
def stop_rasterizing(self):
return self.stop_filter(lambda img, dpi: (img, 0, 0))
# "Undocumented" APIs needed to patch Agg.
_renderer = property(lambda self: self._get_buffer()) # Needed for tkagg.
lock = _LOCK # Needed for webagg_core; fixed by matplotlib#10708.
def buffer_rgba(self): # Needed for webagg_core.
return _util.to_unmultiplied_rgba8888(self._get_buffer())
def tostring_rgba_minimized(self): # Needed for MixedModeRenderer.
img, bounds = _get_drawn_subarray_and_bounds(
_util.to_unmultiplied_rgba8888(self._get_buffer()))
return img.tobytes(), bounds
@functools.lru_cache(1)
def _fix_ipython_backend2gui():
# Fix hard-coded module -> toolkit mapping in IPython (used for `ipython
# --auto`). This cannot be done at import time due to ordering issues (so
# we do it when creating a canvas) and should only be done once (hence the
# `lru_cache(1)`).
if "IPython" not in sys.modules:
return
import IPython
ip = IPython.get_ipython()
if not ip:
return
from IPython.core import pylabtools as pt
pt.backend2gui.update({
"module://mplcairo.gtk": "gtk3",
"module://mplcairo.qt": "qt",
"module://mplcairo.tk": "tk",
"module://mplcairo.wx": "wx",
"module://mplcairo.macosx": "osx",
})
# Work around pylabtools.find_gui_and_backend always reading from
# rcParamsOrig.
orig_origbackend = mpl.rcParamsOrig["backend"]
try:
mpl.rcParamsOrig["backend"] = mpl.rcParams["backend"]
ip.enable_matplotlib()
finally:
mpl.rcParamsOrig["backend"] = orig_origbackend
class FigureCanvasCairo(FigureCanvasBase):
# Although this attribute should semantically be set from __init__ (it is
# purely an instance attribute), initializing it at the class level helps
# when patching FigureCanvasAgg (for gtk3agg) as the latter would fail to
# initialize it.
_last_renderer_call = None, None
def __init__(self, *args, **kwargs):
_fix_ipython_backend2gui()
super().__init__(*args, **kwargs)
def _get_cached_or_new_renderer(
self, func, *args, _draw_if_new=False, **kwargs):
last_call, last_renderer = self._last_renderer_call
if (func, args, kwargs) == last_call:
return last_renderer
else:
renderer = func(*args, **kwargs)
self._last_renderer_call = (func, args, kwargs), renderer
if _draw_if_new:
with _LOCK:
self.figure.draw(renderer)
return renderer
# NOTE: Needed for tight_layout() (and we use it too).
def get_renderer(self, *, _draw_if_new=False):
return self._get_cached_or_new_renderer(
GraphicsContextRendererCairo,
self.figure.bbox.width, self.figure.bbox.height, self.figure.dpi,
_draw_if_new=_draw_if_new)
renderer = property(get_renderer) # NOTE: Needed for FigureCanvasAgg.
def draw(self):
self._last_renderer_call = None, None # Draw on a clean canvas.
with _LOCK:
self.figure.draw(self.get_renderer())
super().draw()
def copy_from_bbox(self, bbox):
return self.get_renderer(_draw_if_new=True).copy_from_bbox(bbox)
def restore_region(self, region):
with _LOCK:
self.get_renderer().restore_region(region)
super().draw()
def _print_method(
self, renderer_factory,
path_or_stream, *, metadata=None, dpi=72,
# These arguments are already taken care of by print_figure().
facecolor=None, edgecolor=None, orientation="portrait",
dryrun=False, bbox_inches_restore=None):
self.figure.set_dpi(72)
with cbook.open_file_cm(path_or_stream, "wb") as stream:
renderer = renderer_factory(
stream, self.figure.bbox.width, self.figure.bbox.height, dpi)
renderer._set_metadata(metadata)
with _LOCK:
self.figure.draw(renderer)
# _finish() corresponds finalize() in Matplotlib's PDF and SVG
# backends; it is inlined in Matplotlib's PS backend.
renderer._finish()
print_pdf = partialmethod(
_print_method, GraphicsContextRendererCairo._for_pdf_output)
print_svg = partialmethod(
_print_method, GraphicsContextRendererCairo._for_svg_output)
print_svgz = partialmethod(
_print_method, GraphicsContextRendererCairo._for_svgz_output)
if os.environ.get("MPLCAIRO_SCRIPT_SURFACE") in ["raster", "vector"]:
print_cairoscript = partialmethod(
_print_method, GraphicsContextRendererCairo._for_script_output)
def _print_ps_impl(self, is_eps, path_or_stream,
orientation="portrait", papertype=None, **kwargs):
if papertype is None:
papertype = mpl.rcParams["ps.papersize"]
if orientation == "portrait":
if papertype == "auto":
width, height = self.figure.get_size_inches()
papertype = backend_ps._get_papertype(height, width)
elif orientation == "landscape":
if papertype == "auto":
width, height = self.figure.get_size_inches()
papertype = backend_ps._get_papertype(width, height)
else:
raise ValueError("Invalid orientation ({!r})".format(orientation))
dsc_comments = kwargs.setdefault("metadata", {})["_dsc_comments"] = [
"%%Orientation: {}".format(orientation)]
if not is_eps:
dsc_comments.append("%%DocumentPaperSizes: {}".format(papertype))
print_method = partial(self._print_method,
GraphicsContextRendererCairo._for_eps_output
if is_eps else
GraphicsContextRendererCairo._for_ps_output)
if mpl.rcParams["ps.usedistiller"]:
with TemporaryDirectory() as tmp_dirname:
tmp_name = str(Path(tmp_dirname, "tmp"))
print_method(tmp_name, **kwargs)
# Assume we can get away without passing the bbox.
{"ghostscript": backend_ps.gs_distill,
"xpdf": backend_ps.xpdf_distill}[
mpl.rcParams["ps.usedistiller"]](
tmp_name, False, ptype=papertype)
with open(tmp_name, "rb") as tmp_file, \
cbook.open_file_cm(path_or_stream, "wb") as stream:
shutil.copyfileobj(tmp_file, stream)
else:
print_method(path_or_stream, **kwargs)
print_ps = partialmethod(_print_ps_impl, False)
print_eps = partialmethod(_print_ps_impl, True)
def _get_fresh_unmultiplied_rgba8888(self):
# Swap out the cache, as savefig may be playing with the background
# color.
last_renderer_call = self._last_renderer_call
self._last_renderer_call = (None, None)
with _LOCK:
renderer = self.get_renderer(_draw_if_new=True)
self._last_renderer_call = last_renderer_call
return _util.to_unmultiplied_rgba8888(renderer._get_buffer())
def print_rgba(
self, path_or_stream, *, metadata=None,
# These arguments are already taken care of by print_figure().
dpi=72, facecolor=None, edgecolor=None, orientation="portrait",
dryrun=False, bbox_inches_restore=None):
img = self._get_fresh_unmultiplied_rgba8888()
if dryrun:
return
with cbook.open_file_cm(path_or_stream, "wb") as stream:
stream.write(img.tobytes())
print_raw = print_rgba
def print_png(
self, path_or_stream, *, metadata=None,
# These arguments are already taken care of by print_figure().
dpi=72, facecolor=None, edgecolor=None, orientation="portrait",
dryrun=False, bbox_inches_restore=None):
img = self._get_fresh_unmultiplied_rgba8888()
if dryrun:
return
full_metadata = OrderedDict(
[("Software",
"matplotlib version {}, https://matplotlib.org"
.format(mpl.__version__))])
full_metadata.update(metadata or {})
with cbook.open_file_cm(path_or_stream, "wb") as stream:
_png.write_png(img, stream, metadata=full_metadata)
if Image:
def print_jpeg(
self, path_or_stream, *,
# These arguments are already taken care of by print_figure().
dpi=72, facecolor=None, edgecolor=None, orientation="portrait",
dryrun=False, bbox_inches_restore=None,
# Remaining kwargs are passed to PIL.
**kwargs):
buf = self._get_fresh_unmultiplied_rgba8888()
if dryrun:
return
img = Image.frombuffer(
"RGBA", buf.shape[:2][::-1], buf, "raw", "RGBA", 0, 1)
# Composite against the background (actually we could just skip the
# conversion to unpremultiplied RGBA earlier).
# NOTE: Agg composites against rcParams["savefig.facecolor"].
background = tuple(
(np.array(colors.to_rgb(facecolor)) * 255).astype(int))
composited = Image.new("RGB", buf.shape[:2][::-1], background)
composited.paste(img, img)
kwargs.setdefault("quality", mpl.rcParams["savefig.jpeg_quality"])
composited.save(path_or_stream, format="jpeg",
dpi=(self.figure.dpi, self.figure.dpi), **kwargs)
print_jpg = print_jpeg
def print_tiff(
self, path_or_stream, *,
# These arguments are already taken care of by print_figure().
dpi=72, facecolor=None, edgecolor=None, orientation="portrait",
dryrun=False, bbox_inches_restore=None):
buf = self._get_fresh_unmultiplied_rgba8888()
if dryrun:
return
(Image.frombuffer(
"RGBA", buf.shape[:2][::-1], buf, "raw", "RGBA", 0, 1)
.save(path_or_stream, format="tiff",
dpi=(self.figure.dpi, self.figure.dpi)))
print_tif = print_tiff
@_Backend.export
class _BackendCairo(_Backend):
FigureCanvas = FigureCanvasCairo
FigureManager = FigureManagerBase
| 40.035354
| 79
| 0.633783
|
e7316ad4022e41d081f5e1ba4d261d92940bd723
| 9,043
|
py
|
Python
|
dataProcess.py
|
aguijarro/vizProject
|
b8ac13264c56c5fa6d3c6574c531bdb24be81366
|
[
"MIT"
] | null | null | null |
dataProcess.py
|
aguijarro/vizProject
|
b8ac13264c56c5fa6d3c6574c531bdb24be81366
|
[
"MIT"
] | null | null | null |
dataProcess.py
|
aguijarro/vizProject
|
b8ac13264c56c5fa6d3c6574c531bdb24be81366
|
[
"MIT"
] | null | null | null |
import psycopg2
import psycopg2.extras
import os
import collections
import json
def create_json_file(result, upload_folder, name):
#fligths_result = json.dumps(flight_list)
try:
"Creating json file"
with open(os.path.join(upload_folder, name), 'w') as outfile:
json.dump(result, outfile)
except:
print "Failed creating json file..."
raise
def close_db(conn):
conn.close()
print('Database connection closed.')
def make_query(conn, statement):
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
try:
print "Executing query ..."
cur.execute(statement)
except:
print "Failed executing query..."
raise
resultset = cur.fetchall()
dict_result = []
for row in resultset:
dict_result.append(dict(row))
return dict_result
def connect_db(user, password):
# Try to connect
try:
conn = psycopg2.connect(dbname='flights', user=user, password=password)
print "Database connection started."
return conn
except:
raise
def main():
USER = "aguijarro"
PASSWORD = "aguijarro"
UPLOAD_FOLDER = os.path.realpath('.') + '/data'
# Make database connection
conn = connect_db(USER, PASSWORD)
# Query Data
year_st = "select year, count(id) total \
from flights group by year;"
year_rt = make_query(conn, year_st)
month_st = "select month, count(id) total \
from flights group by month;"
month_rt = make_query(conn, month_st)
day_month_st = "select day_of_month, count(id) total \
from flights group by day_of_month;"
day_month_rt = make_query(conn, day_month_st)
day_week_st = "select day_of_week, count(id) total \
from flights group by day_of_week;"
day_week_rt = make_query(conn, day_week_st)
carrier_year_st = "(select f.year, c.description, count(f.id) total\
from flights f, carriers c\
where f.unique_carrier = c.code\
and f.year = '2000'\
group by f.year, c.description\
order by count(f.id) desc\
limit 5)\
UNION\
(select f.year, c.description, count(f.id) total\
from flights f, carriers c\
where f.unique_carrier = c.code\
and f.year = '2001'\
group by f.year, c.description\
order by count(f.id) desc\
limit 5)\
UNION\
(select f.year, c.description, count(f.id) total\
from flights f, carriers c\
where f.unique_carrier = c.code\
and f.year = '2002'\
group by f.year, c.description\
order by count(f.id) desc\
limit 5)\
UNION\
(select f.year, c.description, count(f.id) total\
from flights f, carriers c\
where f.unique_carrier = c.code\
and f.year = '2003'\
group by f.year, c.description\
order by count(f.id) desc\
limit 5)\
UNION\
(select f.year, c.description, count(f.id) total\
from flights f, carriers c\
where f.unique_carrier = c.code\
and f.year = '2004'\
group by f.year, c.description\
order by count(f.id) desc\
limit 5)\
UNION\
(select f.year, c.description, count(f.id) total\
from flights f, carriers c\
where f.unique_carrier = c.code\
and f.year = '2005'\
group by f.year, c.description\
order by count(f.id) desc\
limit 5)\
UNION\
(select f.year, c.description, count(f.id) total\
from flights f, carriers c\
where f.unique_carrier = c.code\
and f.year = '2006'\
group by f.year, c.description\
order by count(f.id) desc\
limit 5)\
UNION\
(select f.year, c.description, count(f.id) total\
from flights f, carriers c\
where f.unique_carrier = c.code\
and f.year = '2007'\
group by f.year, c.description\
order by count(f.id) desc\
limit 5)\
UNION\
(select f.year, c.description, count(f.id) total\
from flights f, carriers c\
where f.unique_carrier = c.code\
and f.year = '2008'\
group by f.year, c.description\
order by count(f.id) desc\
limit 5);"
carrier_year_rt = make_query(conn, carrier_year_st)
top5_carriers_rs = "select c.description, count(f.id) total\
from flights f, carriers c\
where f.unique_carrier = c.code\
group by c.description\
order by count(f.id) desc\
limit 5"
top5_carriers_rt = make_query(conn, top5_carriers_rs)
airport_st = "select iata, airport, city, state, lat, long\
from airports\
where country = 'USA'\
and iata not in ('ABO', 'BQN', 'CPX',\
'FAQ', 'GRO', 'GSN', 'GUM', 'MAZ',\
'PPG', 'PR03', 'PSE', 'SIG', 'SJU',\
'STT', 'STX', 'TNI', 'TT01', 'VQS',\
'X63', 'X6','X67', 'X95', 'X96', 'Z08',\
'X66');"
airport_rt = make_query(conn, airport_st)
#Returns que top25 airports by number of figths
top25_airport_st = "select a.iata, a.airport, a.city, a.state, a.lat,\
a.long, f.year, count(f.id) flights\
from airports a, flights f\
where a.iata = f.origin\
and f.origin in ('ATL','ORD','DFW','LAX','PHX',\
'IAH','DEN','LAS','DTW','MSP','EWR','SFO','BOS',\
'SLC','CLT','LGA','CVG','PHL','MCO','SEA','BWI',\
'STL','IAD','DCA','JFK')\
group by a.iata, a.airport, a.city, a.state, a.lat,\
a.long, f.year;"
top25_airport_rt = make_query(conn, top25_airport_st)
year_cancelled_st = "select f.year, f.cancelled, count(f.id) total\
from flights f\
group by f.year, f.cancelled;"
year_cancelled_rt = make_query(conn, year_cancelled_st)
year_cancelled_code_st = "select f.year, f.cancellation_code, count(f.id) total\
from flights f\
group by f.year, f.cancellation_code;"
year_cancelled_code_rt = make_query(conn, year_cancelled_code_st)
cancelled_code_st = "select f.cancellation_code, count(f.id) total\
from flights f\
group by f.cancellation_code;"
cancelled_code_rt = make_query(conn, cancelled_code_st)
cancelled_st = "select f.cancelled, count(f.id) total\
from flights f\
group by f.cancelled;"
cancelled_rt = make_query(conn, cancelled_st)
#Create json file
create_json_file(year_rt, UPLOAD_FOLDER, "year_flights.json")
create_json_file(month_rt, UPLOAD_FOLDER, "month_flights.json")
create_json_file(day_month_rt, UPLOAD_FOLDER, "day_months_flights.json")
create_json_file(day_week_rt, UPLOAD_FOLDER, "day_week_flights.json")
create_json_file(carrier_year_rt, UPLOAD_FOLDER, "carrier_year.json")
create_json_file(carrier_year_rt, UPLOAD_FOLDER, "top5_carriers.json")
create_json_file(airport_rt, UPLOAD_FOLDER, "airports.json")
create_json_file(top25_airport_rt, UPLOAD_FOLDER, "top25_airports.json")
create_json_file(year_cancelled_rt, UPLOAD_FOLDER, "year_cancelled.json")
create_json_file(year_cancelled_code_rt, UPLOAD_FOLDER, "year_cancelled_code.json")
create_json_file(cancelled_code_rt, UPLOAD_FOLDER, "cancelled_code.json")
create_json_file(cancelled_rt, UPLOAD_FOLDER, "cancelled.json")
# Close database connection
close_db(conn)
if __name__ == '__main__':
main()
| 38.978448
| 87
| 0.513104
|
bd7951d3a41f5b8b40f3e59980ff11d76608f0b7
| 4,719
|
py
|
Python
|
dpaybase/bip38.py
|
dpays/dpay-python
|
09218d2edc6361a0ebe8095b13c30d59b31a051c
|
[
"MIT"
] | null | null | null |
dpaybase/bip38.py
|
dpays/dpay-python
|
09218d2edc6361a0ebe8095b13c30d59b31a051c
|
[
"MIT"
] | null | null | null |
dpaybase/bip38.py
|
dpays/dpay-python
|
09218d2edc6361a0ebe8095b13c30d59b31a051c
|
[
"MIT"
] | null | null | null |
import hashlib
import logging
import os
import sys
from binascii import hexlify, unhexlify
from .account import PrivateKey
from .base58 import Base58, base58decode
from dpay.utils import compat_bytes
log = logging.getLogger(__name__)
try:
from Crypto.Cipher import AES
except ImportError:
raise ImportError("Missing dependency: pycrypto")
SCRYPT_MODULE = os.environ.get('SCRYPT_MODULE', None)
if not SCRYPT_MODULE:
try:
import scrypt
SCRYPT_MODULE = "scrypt"
except ImportError:
try:
import pylibscrypt as scrypt
SCRYPT_MODULE = "pylibscrypt"
except ImportError:
raise ImportError("Missing dependency: scrypt or pylibscrypt")
elif 'pylibscrypt' in SCRYPT_MODULE:
try:
import pylibscrypt as scrypt
except ImportError:
raise ImportError("Missing dependency: pylibscrypt explicitly set but missing")
elif 'scrypt' in SCRYPT_MODULE:
try:
import scrypt
except ImportError:
raise ImportError("Missing dependency: scrypt explicitly set but missing")
log.debug("Using scrypt module: %s" % SCRYPT_MODULE)
class SaltException(Exception):
pass
def _encrypt_xor(a, b, aes):
""" Returns encrypt(a ^ b). """
a = unhexlify('%0.32x' % (int((a), 16) ^ int(hexlify(b), 16)))
return aes.encrypt(a)
def encrypt(privkey, passphrase):
""" BIP0038 non-ec-multiply encryption. Returns BIP0038 encrypted privkey.
:param privkey: Private key
:type privkey: Base58
:param str passphrase: UTF-8 encoded passphrase for encryption
:return: BIP0038 non-ec-multiply encrypted wif key
:rtype: Base58
"""
privkeyhex = repr(privkey) # hex
addr = format(privkey.uncompressed.address, "BTC")
a = compat_bytes(addr, 'ascii')
salt = hashlib.sha256(hashlib.sha256(a).digest()).digest()[0:4]
if SCRYPT_MODULE == "scrypt":
if sys.version >= '3.0.0':
key = scrypt.hash(passphrase, salt, 16384, 8, 8)
else:
key = scrypt.hash(str(passphrase), str(salt), 16384, 8, 8)
elif SCRYPT_MODULE == "pylibscrypt":
key = scrypt.scrypt(compat_bytes(passphrase, "utf-8"), salt, 16384, 8, 8)
else:
raise ValueError("No scrypt module loaded")
(derived_half1, derived_half2) = (key[:32], key[32:])
aes = AES.new(derived_half2)
encrypted_half1 = _encrypt_xor(privkeyhex[:32], derived_half1[:16], aes)
encrypted_half2 = _encrypt_xor(privkeyhex[32:], derived_half1[16:], aes)
" flag byte is forced 0xc0 because Graphene only uses compressed keys "
payload = (
b'\x01' + b'\x42' + b'\xc0' + salt + encrypted_half1 + encrypted_half2)
" Checksum "
checksum = hashlib.sha256(hashlib.sha256(payload).digest()).digest()[:4]
privatekey = hexlify(payload + checksum).decode('ascii')
return Base58(privatekey)
def decrypt(encrypted_privkey, passphrase):
"""BIP0038 non-ec-multiply decryption. Returns WIF privkey.
:param Base58 encrypted_privkey: Private key
:param str passphrase: UTF-8 encoded passphrase for decryption
:return: BIP0038 non-ec-multiply decrypted key
:rtype: Base58
:raises SaltException: if checksum verification failed (e.g. wrong
password)
"""
d = unhexlify(base58decode(encrypted_privkey))
d = d[2:] # remove trailing 0x01 and 0x42
flagbyte = d[0:1] # get flag byte
d = d[1:] # get payload
assert flagbyte == b'\xc0', "Flagbyte has to be 0xc0"
salt = d[0:4]
d = d[4:-4]
if SCRYPT_MODULE == "scrypt":
if sys.version >= '3.0.0':
key = scrypt.hash(passphrase, salt, 16384, 8, 8)
else:
key = scrypt.hash(str(passphrase), str(salt), 16384, 8, 8)
elif SCRYPT_MODULE == "pylibscrypt":
key = scrypt.scrypt(compat_bytes(passphrase, "utf-8"), salt, 16384, 8, 8)
else:
raise ValueError("No scrypt module loaded")
derivedhalf1 = key[0:32]
derivedhalf2 = key[32:64]
encryptedhalf1 = d[0:16]
encryptedhalf2 = d[16:32]
aes = AES.new(derivedhalf2)
decryptedhalf2 = aes.decrypt(encryptedhalf2)
decryptedhalf1 = aes.decrypt(encryptedhalf1)
privraw = decryptedhalf1 + decryptedhalf2
privraw = ('%064x' %
(int(hexlify(privraw), 16) ^ int(hexlify(derivedhalf1), 16)))
wif = Base58(privraw)
""" Verify Salt """
privkey = PrivateKey(format(wif, "wif"))
addr = format(privkey.uncompressed.address, "BTC")
a = compat_bytes(addr, 'ascii')
saltverify = hashlib.sha256(hashlib.sha256(a).digest()).digest()[0:4]
if saltverify != salt:
raise SaltException(
'checksum verification failed! Password may be incorrect.')
return wif
| 33.707143
| 87
| 0.660945
|
5f61166f2e87783d7d75198ab76ad81711edbfdf
| 910
|
py
|
Python
|
televisao.py
|
gacarvalho/python-televisao
|
cf417e0dc9d3d3531dc7b9c336cbfabb3110fa43
|
[
"MIT"
] | null | null | null |
televisao.py
|
gacarvalho/python-televisao
|
cf417e0dc9d3d3531dc7b9c336cbfabb3110fa43
|
[
"MIT"
] | null | null | null |
televisao.py
|
gacarvalho/python-televisao
|
cf417e0dc9d3d3531dc7b9c336cbfabb3110fa43
|
[
"MIT"
] | null | null | null |
class Televisao:
def __init__(self):
self.ligada = False
self.canal = 2
def power(self):
if self.ligada == True:
self.ligada = False
else:
self.ligada = True
def aumenta_canal(self):
if self.ligada:
self.canal += 1
def diminiui_canal(self):
if self.ligada:
self.canal -= 1
televisao = Televisao()
print('A TV está ligada? {}'.format(televisao.ligada))
print('Canal: {}'.format(televisao.canal))
televisao.power()
televisao.aumenta_canal()
print('A TV está ligada? {}'.format(televisao.ligada))
print('Canal: {}'.format(televisao.canal))
televisao.power()
televisao.aumenta_canal()
televisao.aumenta_canal()
print('A TV está ligada? {}'.format(televisao.ligada))
print('Canal: {}'.format(televisao.canal))
televisao.power()
televisao.diminiui_canal()
| 24.594595
| 55
| 0.610989
|
6311fbbef7af1649d364a0f1ac8fae7cdac9b264
| 8,570
|
py
|
Python
|
rp_transferto/views.py
|
praekeltfoundation/rp-sidekick
|
01f2d1ced8caefb39c93112f74baac70dbe943bc
|
[
"BSD-3-Clause"
] | 1
|
2018-10-05T21:47:43.000Z
|
2018-10-05T21:47:43.000Z
|
rp_transferto/views.py
|
praekeltfoundation/rp-sidekick
|
01f2d1ced8caefb39c93112f74baac70dbe943bc
|
[
"BSD-3-Clause"
] | 114
|
2018-08-14T14:37:20.000Z
|
2020-07-31T15:56:51.000Z
|
rp_transferto/views.py
|
praekeltfoundation/rp-sidekick
|
01f2d1ced8caefb39c93112f74baac70dbe943bc
|
[
"BSD-3-Clause"
] | null | null | null |
from django.http import JsonResponse
from rest_framework import status
from rest_framework.views import APIView
from sidekick.models import Organization
from sidekick.utils import clean_msisdn
from .models import MsisdnInformation, TopupAttempt
from .tasks import buy_airtime_take_action, buy_product_take_action, topup_data
def process_status_code(info):
"""
returns a JsonResponse object with status updated to reflect info
For more detail on possible TransferTo error codes, see
section "9.0 Standard API Errors" in https://shop.transferto.com/shop/v3/doc/TransferTo_API.pdf
@param info: dict containing key "error_code"
@returns: JsonResponse object with status updated to reflect "error_code"
@raises keyError: if "error_code" is not contained within info dict
"""
error_code = info["error_code"]
if error_code not in ["0", 0]:
return JsonResponse(info, status=400)
# default to 200 status code
return JsonResponse(info)
class TransferToView(APIView):
client_method_name = None
args_for_client_method = None
def get(self, request, *args, **kwargs):
# check that org exists
# check that request belongs to org
# check that org has TransferTo account
org_id = kwargs["org_id"]
try:
org = Organization.objects.get(id=org_id)
except Organization.DoesNotExist:
return JsonResponse(data={}, status=status.HTTP_400_BAD_REQUEST)
if not org.users.filter(id=request.user.id).exists():
return JsonResponse(data={}, status=status.HTTP_401_UNAUTHORIZED)
try:
client = org.transferto_account.first().get_transferto_client()
except AttributeError:
return JsonResponse(data={}, status=status.HTTP_400_BAD_REQUEST)
if self.args_for_client_method:
kwargs_for_client = {
key: kwargs[key] for key in self.args_for_client_method
}
response = getattr(client, self.client_method_name)(**kwargs_for_client)
else:
response = getattr(client, self.client_method_name)()
if "error_code" in response:
return process_status_code(response)
return JsonResponse(response)
class Ping(TransferToView):
client_method_name = "ping"
class MsisdnInfo(APIView):
def get(self, request, *args, **kwargs):
org_id = kwargs["org_id"]
msisdn = kwargs["msisdn"]
try:
org = Organization.objects.get(id=org_id)
except Organization.DoesNotExist:
return JsonResponse(data={}, status=status.HTTP_400_BAD_REQUEST)
if not org.users.filter(id=request.user.id).exists():
return JsonResponse(data={}, status=status.HTTP_401_UNAUTHORIZED)
use_cache = (
request.GET.get("no_cache", False)
and request.GET.get("no_cache").lower() == "true"
)
if (
use_cache
or not MsisdnInformation.objects.filter(
msisdn=clean_msisdn(msisdn)
).exists()
):
try:
client = org.transferto_account.first().get_transferto_client()
except AttributeError:
return JsonResponse(data={}, status=status.HTTP_400_BAD_REQUEST)
cleaned_msisdn = clean_msisdn(msisdn)
info = client.get_misisdn_info(cleaned_msisdn)
MsisdnInformation.objects.create(data=info, msisdn=cleaned_msisdn)
# get cached result
else:
info = dict(
MsisdnInformation.objects.filter(msisdn=clean_msisdn(msisdn))
.latest()
.data
)
return process_status_code(info)
class ReserveId(TransferToView):
client_method_name = "reserve_id"
class GetCountries(TransferToView):
client_method_name = "get_countries"
class GetOperators(TransferToView):
client_method_name = "get_operators"
args_for_client_method = ["country_id"]
class GetOperatorAirtimeProducts(TransferToView):
client_method_name = "get_operator_airtime_products"
args_for_client_method = ["operator_id"]
class GetOperatorProducts(TransferToView):
client_method_name = "get_operator_products"
args_for_client_method = ["operator_id"]
class GetCountryServices(TransferToView):
client_method_name = "get_country_services"
args_for_client_method = ["country_id"]
class TopUpData(APIView):
def get(self, request, *args, **kwargs):
org_id = kwargs["org_id"]
try:
org = Organization.objects.get(id=org_id)
except Organization.DoesNotExist:
return JsonResponse(data={}, status=status.HTTP_400_BAD_REQUEST)
if not org.users.filter(id=request.user.id).exists():
return JsonResponse(data={}, status=status.HTTP_401_UNAUTHORIZED)
try:
# check that there is a valid TransferTo Account attached
org.transferto_account.first().get_transferto_client()
except AttributeError:
return JsonResponse(data={}, status=status.HTTP_400_BAD_REQUEST)
data = request.GET.dict()
msisdn = data["msisdn"]
user_uuid = data["user_uuid"]
data_amount = data["data_amount"]
# org_id, msisdn, user_uuid, amount
# e.g. 1, "+27827620000", "4a1b8cc8-905c-4c44-8bd2-dee3c4a3e2d1", "100MB"
topup_data.delay(org_id, msisdn, user_uuid, data_amount)
return JsonResponse({"info_txt": "top_up_data"})
class BuyProductTakeAction(APIView):
def get(self, request, *args, **kwargs):
org_id = kwargs["org_id"]
product_id = kwargs["product_id"]
msisdn = kwargs["msisdn"]
flow_uuid_key = "flow_uuid"
user_uuid_key = "user_uuid"
data = dict(request.GET.dict())
flow_start = request.GET.get(flow_uuid_key, False)
if flow_start:
del data[flow_uuid_key]
user_uuid = request.GET.get(user_uuid_key, False)
if user_uuid:
del data[user_uuid_key]
# remaining variables will be coerced from key:value mapping
# which represents variable on rapidpro to update: variable from response
buy_product_take_action.delay(
org_id,
clean_msisdn(msisdn),
product_id,
user_uuid=user_uuid,
values_to_update=data,
flow_start=flow_start,
)
return JsonResponse({"info_txt": "buy_product_take_action"})
class BuyAirtimeTakeAction(APIView):
def get(self, request, *args, **kwargs):
org_id = kwargs["org_id"]
airtime_amount = kwargs["airtime_amount"]
msisdn = kwargs["msisdn"]
from_string = kwargs["from_string"]
try:
org = Organization.objects.get(id=org_id)
except Organization.DoesNotExist:
return JsonResponse(data={}, status=status.HTTP_400_BAD_REQUEST)
if not org.users.filter(id=request.user.id).exists():
return JsonResponse(data={}, status=status.HTTP_401_UNAUTHORIZED)
try:
# check that there is a valid TransferTo Account attached
org.transferto_account.first().get_transferto_client()
except AttributeError:
return JsonResponse(data={}, status=status.HTTP_400_BAD_REQUEST)
flow_uuid_key = "flow_uuid"
user_uuid_key = "user_uuid"
fail_flow_uuid_key = "fail_flow_start"
data = dict(request.GET.dict())
flow_start = request.GET.get(flow_uuid_key, False)
if flow_start:
del data[flow_uuid_key]
user_uuid = request.GET.get(user_uuid_key, False)
if user_uuid:
del data[user_uuid_key]
fail_flow_start = request.GET.get(fail_flow_uuid_key, False)
if fail_flow_start:
del data[fail_flow_uuid_key]
# remaining variables will be coerced from key:value mapping
# which represents variable on rapidpro to update: variable from response
topup_attempt = TopupAttempt.objects.create(
msisdn=msisdn,
from_string=from_string,
amount=airtime_amount,
org=org,
rapidpro_user_uuid=user_uuid,
)
buy_airtime_take_action.delay(
topup_attempt_id=topup_attempt.id,
values_to_update=data,
flow_start=flow_start,
fail_flow_start=fail_flow_start,
)
return JsonResponse({"info_txt": "buy_airtime_take_action"})
| 34.143426
| 99
| 0.653676
|
e2405aa8a5a4ccf0aac749c15aa7e526a8017685
| 276
|
py
|
Python
|
codeabbey/ejerciciospracticepython/ejercicio1.py
|
rubyal/calculadora
|
f86a106651816efb18d94acf58160721d350ee7d
|
[
"Unlicense"
] | null | null | null |
codeabbey/ejerciciospracticepython/ejercicio1.py
|
rubyal/calculadora
|
f86a106651816efb18d94acf58160721d350ee7d
|
[
"Unlicense"
] | null | null | null |
codeabbey/ejerciciospracticepython/ejercicio1.py
|
rubyal/calculadora
|
f86a106651816efb18d94acf58160721d350ee7d
|
[
"Unlicense"
] | null | null | null |
#ingresar nombre y edad
print("SE INICIA PROGRAMA")
print("ingrese su nombre")
nombre=str(input())
print("ingrese su edad")
edad=int(input())
print("su nombre es {} y tiene {}".format(nombre,edad))
año=str((2021-edad)+100)
print(nombre + " cumplira 100 años en el año " + año)
| 30.666667
| 55
| 0.702899
|
c591f51810dae09b096c113cf9d54310aa0c9308
| 224
|
py
|
Python
|
setup.py
|
willembressers/CarND-Traffic-Sign-Classifier-Project
|
c461f6cdff0e0bdd2733fa1ca5e1452d3030d4c6
|
[
"MIT"
] | null | null | null |
setup.py
|
willembressers/CarND-Traffic-Sign-Classifier-Project
|
c461f6cdff0e0bdd2733fa1ca5e1452d3030d4c6
|
[
"MIT"
] | null | null | null |
setup.py
|
willembressers/CarND-Traffic-Sign-Classifier-Project
|
c461f6cdff0e0bdd2733fa1ca5e1452d3030d4c6
|
[
"MIT"
] | null | null | null |
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Udacity | Traffic Sign Recognition',
author='Willem Bressers',
license='MIT',
)
| 20.363636
| 53
| 0.674107
|
b745171f37a30e5aa4e7b85af58db6e59001783b
| 13,196
|
py
|
Python
|
scripts/imagetransfer.py
|
Darkdadaah/pywikibot-core
|
6cad0915f3e058fe4cf2bce4f37d395d21636df9
|
[
"MIT"
] | null | null | null |
scripts/imagetransfer.py
|
Darkdadaah/pywikibot-core
|
6cad0915f3e058fe4cf2bce4f37d395d21636df9
|
[
"MIT"
] | null | null | null |
scripts/imagetransfer.py
|
Darkdadaah/pywikibot-core
|
6cad0915f3e058fe4cf2bce4f37d395d21636df9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Script to copy images to Wikimedia Commons, or to another wiki.
Syntax:
python pwb.py imagetransfer pagename [-interwiki] [-tolang:x] [-tofamily:y]
Arguments:
-interwiki Look for images in pages found through interwiki links.
-keepname Keep the filename and do not verify description while replacing
-tolang:x Copy the image to the wiki in language x
-tofamily:y Copy the image to a wiki in the family y
-file:z Upload many files from textfile: [[Image:x]]
[[Image:y]]
If pagename is an image description page, offers to copy the image to the
target site. If it is a normal page, it will offer to copy any of the images
used on that page, or if the -interwiki argument is used, any of the images
used on a page reachable via interwiki links.
"""
#
# (C) Andre Engels, 2004
# (C) Pywikibot team, 2004-2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id: b745171f37a30e5aa4e7b85af58db6e59001783b $'
import re
import sys
import pywikibot
from pywikibot import config, i18n, pagegenerators, textlib
from pywikibot.specialbots import UploadRobot
nowCommonsTemplate = {
'ar': u'{{الآن كومنز|%s}}',
'de': u'{{NowCommons|%s}}',
'fr': u'{{Désormais sur Commons|%s}}',
'en': u'{{subst:ncd|Image:%s}}',
'fa': u'{{موجود در انبار|%s}}',
'he': u'{{גם בוויקישיתוף|%s}}',
'hu': u'{{azonnali-commons|Kép:%s}}',
'ia': u'{{OraInCommons|Imagine:%s}}',
'it': u'{{NowCommons unlink|%s}}',
'ja': u'{{NowCommons|Image:%s}}',
'kk': u'{{NowCommons|Image:%s}}',
'li': u'{{NowCommons|%s}}',
'lt': u'{{NowCommons|Image:%s}}',
'nds-nl': u'{{NoenCommons|File:%s}}',
'nl': u'{{NuCommons|Image:%s}}',
'pl': u'{{NowCommons|%s}}',
'pt': u'{{NowCommons|%s}}',
'sr': u'{{NowCommons|%s}}',
'zh': u'{{NowCommons|Image:%s}}',
}
# Translations for license templates.
# Must only be given when they are in fact different.
licenseTemplates = {
('wikipedia:de', 'commons:commons'): {
u'Bild-GFDL': u'GFDL',
u'Bild-GFDL-OpenGeoDB': u'GFDL-OpenGeoDB',
u'Bild-Innweb-Lizenz': u'Map-Austria-GNU',
u'Bild-PD': u'PD',
u'Bild-PD-alt': u'PD-old',
u'Bild-PD-Kunst': u'PD-Art',
u'Bild-PD-US': u'PD-USGov',
},
('wikipedia:fa', 'commons:commons'): {
u'مالکیت عمومی': u'PD',
u'مالکیت عمومی-خود': u'PD-self',
u'مجوز گنو': u'GFDL',
u'مجوز گنو-خود': u'GFDL-self',
u'نگاره قدیمی': u'PD-Iran',
u'نگاره نوشتاری': u'PD-textlogo',
u'نگاره عراقی': u'PD-Iraq',
u'نگاره بریتانیا': u'PD-UK',
u'نگاره هابل': u'PD-Hubble',
u'نگاره آمریکا': u'PD-US',
u'نگاره دولت آمریکا': u'PD-USGov',
u'کک-یاد-دو': u'Cc-by-2.0',
u'کک-یاد-حفظ-دونیم': u'Cc-by-sa-2.5',
u'کک-یاد-سه': u'Cc-by-3.0',
},
('wikipedia:fr', 'commons:commons'): {
u'Domaine public': u'PD'
},
('wikipedia:he', 'commons:commons'): {
u'שימוש חופשי': u'PD-self',
u'שימוש חופשי מוגן': u'Copyrighted free use',
u'שימוש חופשי מוגן בתנאי': u'Copyrighted free use provided that',
u'תמונה ישנה': u'PD-Israel',
u'ייחוס': u'Attribution',
u'לוגו ויקימדיה': u'Copyright by Wikimedia',
},
('wikipedia:hu', 'commons:commons'): {
u'Közkincs': u'PD',
u'Közkincs-régi': u'PD-old',
},
('wikipedia:pt', 'commons:commons'): {
u'Domínio público': u'PD',
},
}
class ImageTransferBot(object):
"""Image transfer bot."""
def __init__(self, generator, targetSite=None, interwiki=False,
keep_name=False, ignore_warning=False):
"""Constructor."""
self.generator = generator
self.interwiki = interwiki
self.targetSite = targetSite
self.keep_name = keep_name
self.ignore_warning = ignore_warning
def transferImage(self, sourceImagePage):
"""
Download image and its description, and upload it to another site.
@return: the filename which was used to upload the image
"""
sourceSite = sourceImagePage.site
url = sourceImagePage.fileUrl().encode('utf-8')
pywikibot.output(u"URL should be: %s" % url)
# localize the text that should be printed on the image description page
try:
description = sourceImagePage.get()
# try to translate license templates
if (sourceSite.sitename,
self.targetSite.sitename) in licenseTemplates:
for old, new in licenseTemplates[
(sourceSite.sitename,
self.targetSite.sitename)].items():
new = '{{%s}}' % new
old = re.compile('{{%s}}' % old)
description = textlib.replaceExcept(description, old, new,
['comment', 'math',
'nowiki', 'pre'])
description = i18n.twtranslate(self.targetSite,
'imagetransfer-file_page_message',
dict(site=sourceSite,
description=description))
description += '\n\n'
description += sourceImagePage.getFileVersionHistoryTable()
# add interwiki link
if sourceSite.family == self.targetSite.family:
description += u'\r\n\r\n{0}'.format(sourceImagePage)
except pywikibot.NoPage:
description = ''
pywikibot.output(
'Image does not exist or description page is empty.')
except pywikibot.IsRedirectPage:
description = ''
pywikibot.output('Image description page is redirect.')
else:
bot = UploadRobot(url=url, description=description,
targetSite=self.targetSite,
urlEncoding=sourceSite.encoding(),
keepFilename=self.keep_name,
verifyDescription=not self.keep_name,
ignoreWarning=self.ignore_warning)
# try to upload
targetFilename = bot.run()
if targetFilename and self.targetSite.family.name == 'commons' and \
self.targetSite.code == 'commons':
# upload to Commons was successful
reason = i18n.twtranslate(sourceSite,
'imagetransfer-nowcommons_notice')
# try to delete the original image if we have a sysop account
if sourceSite.family.name in config.sysopnames and \
sourceSite.lang in config.sysopnames[sourceSite.family.name]:
if sourceImagePage.delete(reason):
return
if sourceSite.lang in nowCommonsTemplate and \
sourceSite.family.name in config.usernames and \
sourceSite.lang in config.usernames[sourceSite.family.name]:
# add the nowCommons template.
pywikibot.output(u'Adding nowCommons template to %s'
% sourceImagePage.title())
sourceImagePage.put(sourceImagePage.get() + '\n\n' +
nowCommonsTemplate[sourceSite.lang]
% targetFilename,
summary=reason)
def showImageList(self, imagelist):
"""Print image list."""
for i in range(len(imagelist)):
image = imagelist[i]
pywikibot.output('-' * 60)
pywikibot.output(u"%s. Found image: %s"
% (i, image.title(asLink=True)))
try:
# Show the image description page's contents
pywikibot.output(image.get())
# look if page already exists with this name.
# TODO: consider removing this: a different image of the same
# name may exist on the target wiki, and the bot user may want
# to upload anyway, using another name.
try:
# Maybe the image is on the target site already
targetTitle = '%s:%s' % (self.targetSite.namespaces.FILE,
image.title().split(':', 1)[1])
targetImage = pywikibot.Page(self.targetSite, targetTitle)
targetImage.get()
pywikibot.output(u"Image with this name is already on %s."
% self.targetSite)
pywikibot.output('-' * 60)
pywikibot.output(targetImage.get())
sys.exit()
except pywikibot.NoPage:
# That's the normal case
pass
except pywikibot.IsRedirectPage:
pywikibot.output(
u"Description page on target wiki is redirect?!")
except pywikibot.NoPage:
break
pywikibot.output('=' * 60)
def run(self):
"""Run the bot."""
for page in self.generator:
if self.interwiki:
imagelist = []
for linkedPage in page.interwiki():
linkedPage = pywikibot.Page(linkedPage)
imagelist.extend(
linkedPage.imagelinks(
followRedirects=True))
elif page.is_filepage():
imagePage = pywikibot.FilePage(page.site, page.title())
imagelist = [imagePage]
else:
imagelist = list(page.imagelinks(followRedirects=True))
while len(imagelist) > 0:
self.showImageList(imagelist)
if len(imagelist) == 1:
# no need to query the user, only one possibility
todo = 0
else:
pywikibot.output(
u"Give the number of the image to transfer.")
todo = pywikibot.input(u"To end uploading, press enter:")
if not todo:
break
todo = int(todo)
if todo in range(len(imagelist)):
if (imagelist[todo].fileIsShared() and
imagelist[todo].site.image_repository() ==
self.targetSite.image_repository()):
pywikibot.output(
'The image is already shared on {0}.'.format(
self.targetSite.image_repository()))
else:
self.transferImage(imagelist[todo])
# remove the selected image from the list
imagelist = imagelist[:todo] + imagelist[todo + 1:]
else:
pywikibot.output(u'No such image number.')
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
gen = None
interwiki = False
keep_name = False
targetLang = None
targetFamily = None
local_args = pywikibot.handle_args(args)
generator_factory = pagegenerators.GeneratorFactory(
positional_arg_name='page')
for arg in local_args:
if arg == '-interwiki':
interwiki = True
elif arg.startswith('-keepname'):
keep_name = True
elif arg.startswith('-tolang:'):
targetLang = arg[8:]
elif arg.startswith('-tofamily:'):
targetFamily = arg[10:]
else:
generator_factory.handleArg(arg)
gen = generator_factory.getCombinedGenerator()
if not gen:
pywikibot.bot.suggest_help(missing_parameters=['page'])
return False
if not targetLang and not targetFamily:
targetSite = pywikibot.Site('commons', 'commons')
else:
if not targetLang:
targetLang = pywikibot.Site().language
if not targetFamily:
targetFamily = pywikibot.Site().family
targetSite = pywikibot.Site(targetLang, targetFamily)
bot = ImageTransferBot(gen, interwiki=interwiki, targetSite=targetSite,
keep_name=keep_name)
bot.run()
if __name__ == "__main__":
main()
| 39.746988
| 80
| 0.521673
|
e18aa30eaff031a94d6efaf198f1eb945bcaa239
| 4,766
|
py
|
Python
|
meraki/controllers/connectivity_monitoring_destinations_controller.py
|
meraki/meraki-python-sdk
|
9894089eb013318243ae48869cc5130eb37f80c0
|
[
"MIT"
] | 37
|
2019-04-24T14:01:33.000Z
|
2022-01-28T01:37:21.000Z
|
meraki/controllers/connectivity_monitoring_destinations_controller.py
|
ankita66666666/meraki-python-sdk
|
9894089eb013318243ae48869cc5130eb37f80c0
|
[
"MIT"
] | 10
|
2019-07-09T16:35:11.000Z
|
2021-12-07T03:47:53.000Z
|
meraki/controllers/connectivity_monitoring_destinations_controller.py
|
ankita66666666/meraki-python-sdk
|
9894089eb013318243ae48869cc5130eb37f80c0
|
[
"MIT"
] | 17
|
2019-04-30T23:53:21.000Z
|
2022-02-07T22:57:44.000Z
|
# -*- coding: utf-8 -*-
"""
meraki
This file was automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io ).
"""
from meraki.api_helper import APIHelper
from meraki.configuration import Configuration
from meraki.controllers.base_controller import BaseController
from meraki.http.auth.custom_header_auth import CustomHeaderAuth
class ConnectivityMonitoringDestinationsController(BaseController):
"""A Controller to access Endpoints in the meraki API."""
def get_network_connectivity_monitoring_destinations(self,
network_id):
"""Does a GET request to /networks/{networkId}/connectivityMonitoringDestinations.
Return the connectivity testing destinations for an MX network
Args:
network_id (string): TODO: type description here. Example:
Returns:
mixed: Response from the API. Successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(network_id=network_id)
# Prepare query URL
_url_path = '/networks/{networkId}/connectivityMonitoringDestinations'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'networkId': network_id
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.http_client.get(_query_url, headers=_headers)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body)
def update_network_connectivity_monitoring_destinations(self,
options=dict()):
"""Does a PUT request to /networks/{networkId}/connectivityMonitoringDestinations.
Update the connectivity testing destinations for an MX network
Args:
options (dict, optional): Key-value pairs for any of the
parameters to this API Endpoint. All parameters to the
endpoint are supplied through the dictionary with their names
being the key and their desired values being the value. A list
of parameters that can be used are::
network_id -- string -- TODO: type description here.
Example:
update_network_connectivity_monitoring_destinations --
UpdateNetworkConnectivityMonitoringDestinationsModel
-- TODO: type description here. Example:
Returns:
mixed: Response from the API. Successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(network_id=options.get("network_id"))
# Prepare query URL
_url_path = '/networks/{networkId}/connectivityMonitoringDestinations'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'networkId': options.get('network_id', None)
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.http_client.put(_query_url, headers=_headers, parameters=APIHelper.json_serialize(options.get('update_network_connectivity_monitoring_destinations')))
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body)
| 39.38843
| 175
| 0.633865
|
4f6c3cc4f88ddcfaf9e9bcd3601d54bd1671d8fe
| 826
|
py
|
Python
|
h5py/highlevel.py
|
rainwoodman/h5py
|
25eba5f019d4417de0908ce602f14c038faa9237
|
[
"BSD-3-Clause"
] | 31
|
2018-10-19T15:28:36.000Z
|
2022-02-14T03:01:25.000Z
|
SLpackage/private/thirdparty/pythonpkgs/h5py/h5py_2.7.0/lib/python2.7/site-packages/h5py/highlevel.py
|
fanglab/6mASCOPE
|
3f1fdcb7693ff152f17623ce549526ec272698b1
|
[
"BSD-3-Clause"
] | 13
|
2020-01-28T22:20:14.000Z
|
2022-03-11T23:20:14.000Z
|
SLpackage/private/thirdparty/pythonpkgs/h5py/h5py_2.7.0/lib/python2.7/site-packages/h5py/highlevel.py
|
fanglab/6mASCOPE
|
3f1fdcb7693ff152f17623ce549526ec272698b1
|
[
"BSD-3-Clause"
] | 10
|
2019-01-10T04:02:12.000Z
|
2021-11-17T01:52:15.000Z
|
# This file is part of h5py, a Python interface to the HDF5 library.
#
# http://www.h5py.org
#
# Copyright 2008-2013 Andrew Collette and contributors
#
# License: Standard 3-clause BSD; see "license.txt" for full license terms
# and contributor agreement.
# pylint: disable=unused-import
"""
This is the deprecated legacy high-level interface.
Everything here is canonically located at the root of the package.
New code should import directly from there, e.g. "from h5py import File".
"""
from __future__ import absolute_import
from ._hl import filters
from ._hl.base import is_hdf5, HLObject
from ._hl.files import File
from ._hl.group import Group, SoftLink, ExternalLink, HardLink
from ._hl.dataset import Dataset
from ._hl.datatype import Datatype
from ._hl.attrs import AttributeManager
| 27.533333
| 77
| 0.751816
|
26b2971ee42a1c53ef409144c7fdee8c762ab763
| 7,048
|
py
|
Python
|
password_based_shooting_stars_resource.py
|
andmcadams/shooting-stars-server
|
7bd8f74401dcf013d1a55edf1d97527d6cb77543
|
[
"BSD-2-Clause"
] | null | null | null |
password_based_shooting_stars_resource.py
|
andmcadams/shooting-stars-server
|
7bd8f74401dcf013d1a55edf1d97527d6cb77543
|
[
"BSD-2-Clause"
] | null | null | null |
password_based_shooting_stars_resource.py
|
andmcadams/shooting-stars-server
|
7bd8f74401dcf013d1a55edf1d97527d6cb77543
|
[
"BSD-2-Clause"
] | 3
|
2021-11-16T13:27:01.000Z
|
2022-03-21T02:48:42.000Z
|
import json
import sqlite3
import time
import falcon
from base_shooting_stars_resource import BaseShootingStarsResource, hook_validate_auth
from constants import ERROR_MSG_AUTHORIZATION_FAIL_SUBMIT, ERROR_MSG_DATA_VALIDATION_FAIL
master_pw_whitelist = set()
scout_pw_whitelist = set()
def hook_validate_scout_password(req: falcon.request.Request, resp: falcon.response.Response, resource, params):
authorization = req.auth
if authorization not in scout_pw_whitelist and authorization not in master_pw_whitelist:
msg = ERROR_MSG_AUTHORIZATION_FAIL_SUBMIT
raise falcon.HTTPBadRequest(title='Bad request', description=msg)
def hook_validate_master_password(req: falcon.request.Request, resp: falcon.response.Response, resource, params):
authorization = req.auth
if authorization not in master_pw_whitelist:
msg = ERROR_MSG_AUTHORIZATION_FAIL_SUBMIT
raise falcon.HTTPBadRequest(title='Bad request', description=msg)
def hook_validate_whitelist_params(req: falcon.request.Request, resp: falcon.response.Response, resource, params):
msg = ERROR_MSG_DATA_VALIDATION_FAIL
if not isinstance(req.media.get('password', None), str):
raise falcon.HTTPBadRequest(title='Bad request', description=msg)
class PasswordBasedShootingStarsResource:
def __init__(self, path_to_db: sqlite3.Connection):
self.shooting_stars_resource = BaseShootingStarsResource(path_to_db)
scout_pws = self.shooting_stars_resource.conn.execute('SELECT password FROM scout_whitelist').fetchall()
for scout in scout_pws:
scout_pw_whitelist.add(scout['password'])
master_pws = self.shooting_stars_resource.conn.execute('SELECT password FROM master_whitelist').fetchall()
for master in master_pws:
master_pw_whitelist.add(master['password'])
@falcon.before(hook_validate_master_password)
def on_get_whitelist(self, req: falcon.request.Request, resp: falcon.response.Response):
resp.status = falcon.HTTP_200
resp.text = json.dumps(list(scout_pw_whitelist.difference(master_pw_whitelist)))
resp.append_header('Access-Control-Allow-Origin', '*')
return resp
@falcon.before(hook_validate_master_password)
@falcon.before(hook_validate_whitelist_params)
def on_post_whitelist(self, req: falcon.request.Request, resp: falcon.response.Response):
pw = req.media['password'].strip()
scout_pw_whitelist.add(pw)
self.shooting_stars_resource.conn.execute("""
INSERT
INTO scout_whitelist
VALUES
(?)
""", [pw])
self.shooting_stars_resource.conn.commit()
resp.status = falcon.HTTP_200
resp.append_header('Access-Control-Allow-Origin', '*')
return resp
@falcon.before(hook_validate_master_password)
@falcon.before(hook_validate_whitelist_params)
def on_delete_whitelist(self, req: falcon.request.Request, resp: falcon.response.Response):
pw = req.media['password'].strip()
if pw in scout_pw_whitelist:
self.shooting_stars_resource.conn.execute("""
DELETE
FROM data
WHERE
sharedKey = ?
""", [pw])
self.shooting_stars_resource.conn.execute("""
DELETE
FROM scout_whitelist
WHERE
password = ?
""", [pw])
self.shooting_stars_resource.conn.commit()
scout_pw_whitelist.discard(pw)
resp.text = 'Successfully removed from whitelist and data cleared'
else:
resp.text = 'No such key found in the whitelist'
resp.status = falcon.HTTP_200
resp.append_header('Access-Control-Allow-Origin', '*')
return resp
def on_options_whitelist(self, req: falcon.request.Request, resp: falcon.response.Response):
resp.status = falcon.HTTP_200
resp.append_header('Access-Control-Allow-Origin', '*')
resp.append_header('Access-Control-Allow-Headers', '*')
resp.append_header('Access-Control-Allow-Methods', '*')
return resp
@falcon.before(hook_validate_scout_password)
def on_post(self, req: falcon.request.Request, resp: falcon.response.Response):
return self.shooting_stars_resource.on_post(req, resp)
@falcon.before(hook_validate_auth)
def on_get(self, req: falcon.request.Request, resp: falcon.response.Response):
"""Handles GET requests"""
resp.status = falcon.HTTP_200 # This is the default status
# Get all current worlds for all keys.
lowest_time = int(time.time()) - (60*60)
highest_time = int(time.time()) + (60*150)
rows = self.shooting_stars_resource.conn.execute("""
SELECT location, world, MAX(minTime) as minTime, MIN(maxTime) as maxTime
FROM data
WHERE
maxTime > ? AND maxTime < ?
GROUP BY location, world
ORDER BY maxTime
""", [lowest_time, highest_time]).fetchall()
# Put data in json format
data_blob = []
for row in rows:
data = {
'location': row['location'],
'world': row['world'],
'minTime': row['minTime'],
'maxTime': row['maxTime']
}
data_blob.append(data)
resp.text = json.dumps(data_blob)
return resp
@falcon.before(hook_validate_master_password)
def on_get_separate(self, req: falcon.request.Request, resp: falcon.response.Response):
"""Handles GET requests"""
resp.status = falcon.HTTP_200 # This is the default status
# Get all current worlds for all keys.
lowest_time = int(time.time()) - (60*60)
highest_time = int(time.time()) + (60*150)
rows = self.shooting_stars_resource.conn.execute(f"""
SELECT location, world, minTime, maxTime, sharedKey
FROM data
WHERE
maxTime > ? AND maxTime < ?
ORDER BY world
""", [lowest_time, highest_time]).fetchall()
# Put data in json format
data_blob = []
for row in rows:
data = {
'location': row['location'],
'world': row['world'],
'minTime': row['minTime'],
'maxTime': row['maxTime'],
'password': (row['sharedKey'] if row['sharedKey'] not in master_pw_whitelist else 'MASTER PASSWORD')
}
data_blob.append(data)
resp.text = json.dumps(data_blob)
resp.append_header('Access-Control-Allow-Origin', '*')
return resp
def on_options_separate(self, req: falcon.request.Request, resp: falcon.response.Response):
resp.status = falcon.HTTP_200
resp.append_header('Access-Control-Allow-Origin', '*')
resp.append_header('Access-Control-Allow-Headers', '*')
resp.append_header('Access-Control-Allow-Methods', '*')
return resp
| 41.216374
| 116
| 0.649688
|
780685cce1b0cbe4cebbe976ae38ba6fd9a37e8d
| 3,371
|
py
|
Python
|
api/tests/helpers.py
|
vanillaSlice/dawdle
|
99c2cc7c59433bd5d05677218537301e2ebb1d2d
|
[
"MIT"
] | null | null | null |
api/tests/helpers.py
|
vanillaSlice/dawdle
|
99c2cc7c59433bd5d05677218537301e2ebb1d2d
|
[
"MIT"
] | 26
|
2019-07-05T09:34:02.000Z
|
2020-07-10T16:22:44.000Z
|
api/tests/helpers.py
|
vanillaSlice/dawdle
|
99c2cc7c59433bd5d05677218537301e2ebb1d2d
|
[
"MIT"
] | null | null | null |
from faker import Faker
from werkzeug import exceptions
from werkzeug.http import HTTP_STATUS_CODES
from dawdle import create_app
from dawdle.components.auth.utils import (create_fresh_user_access_token,
create_user_access_token,
create_user_refresh_token,
encrypt_password)
from dawdle.components.users.models import User
from dawdle.extensions.marshmallow import Limits
fake = Faker()
class TestBase:
@classmethod
def setup_class(cls):
cls._app = create_app(testing=True)
cls._app.app_context().push()
cls._client = cls._app.test_client()
cls._password = fake.password()
cls._user = cls._create_user(password=cls._password)
cls._fresh_access_token = create_fresh_user_access_token(cls._user)
cls._access_token = create_user_access_token(cls._user)
cls._refresh_token = create_user_refresh_token(cls._user)
@classmethod
def _create_user(cls, **kwargs):
user = User()
user.active = kwargs.get("active", True)
user.email = kwargs.get("email", fake.email())
user.initials = kwargs.get(
"initials",
fake.pystr(max_chars=Limits.MAX_USER_INITIALS_LENGTH),
).upper()
user.name = kwargs.get("name", fake.name())
user.password = encrypt_password(
kwargs.get("password", fake.password()),
)
return user.save()
@classmethod
def teardown_class(cls):
cls.__clear_db()
@classmethod
def __clear_db(cls):
User.objects.delete()
@staticmethod
def __assert_response(response, status, body=None):
assert response.status_code == status
if body:
assert response.json == body
def _assert_200(self, response, body=None):
self.__assert_response(response, 200, body)
def _assert_201(self, response, body=None):
self.__assert_response(response, 201, body)
def _assert_204(self, response, body=None):
self.__assert_response(response, 204, body)
def __assert_error(self, response, exception, messages=None):
body = {
"status": exception.code,
"name": HTTP_STATUS_CODES[exception.code],
"description": exception.description,
"messages": messages if messages else {},
}
self.__assert_response(response, exception.code, body)
def _assert_400(self, response, messages=None):
self.__assert_error(response, exceptions.BadRequest(), messages)
def _assert_401(self, response, messages=None):
self.__assert_error(response, exceptions.Unauthorized(), messages)
def _assert_403(self, response, messages=None):
self.__assert_error(response, exceptions.Forbidden(), messages)
def _assert_404(self, response, messages=None):
self.__assert_error(response, exceptions.NotFound(), messages)
def _assert_415(self, response, messages=None):
self.__assert_error(
response,
exceptions.UnsupportedMediaType(),
messages,
)
def _assert_500(self, response, messages=None):
self.__assert_error(
response,
exceptions.InternalServerError(),
messages,
)
| 33.04902
| 75
| 0.641056
|
181418b3e595bfcf2ca9681df1b4d95b8af19af7
| 9,318
|
py
|
Python
|
tools/report-converter/codechecker_report_converter/cli.py
|
vabridgers/codechecker-1
|
08ae7e9d21a94385756b192fc13df86b9d1f79e3
|
[
"Apache-2.0"
] | null | null | null |
tools/report-converter/codechecker_report_converter/cli.py
|
vabridgers/codechecker-1
|
08ae7e9d21a94385756b192fc13df86b9d1f79e3
|
[
"Apache-2.0"
] | null | null | null |
tools/report-converter/codechecker_report_converter/cli.py
|
vabridgers/codechecker-1
|
08ae7e9d21a94385756b192fc13df86b9d1f79e3
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
import argparse
import glob
import importlib
import logging
import os
import shutil
import sys
from typing import Dict, Optional, Tuple
# If we run this script in an environment where 'codechecker_report_converter'
# module is not available we should add the grandparent directory of this file
# to the system path.
# TODO: This section will not be needed when CodeChecker will be delivered as
# a python package and will be installed in a virtual environment with all the
# dependencies.
if __name__ == '__main__':
current_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.dirname(current_dir))
from codechecker_report_converter.report.report_file import \
SUPPORTED_ANALYZER_EXTENSIONS
from codechecker_report_converter.report.parser import plist
LOG = logging.getLogger('report-converter')
msg_formatter = logging.Formatter('[%(levelname)s] - %(message)s')
log_handler = logging.StreamHandler(sys.stdout)
log_handler.setFormatter(msg_formatter)
LOG.setLevel(logging.INFO)
LOG.addHandler(log_handler)
class RawDescriptionDefaultHelpFormatter(
argparse.RawDescriptionHelpFormatter,
argparse.ArgumentDefaultsHelpFormatter
):
""" Adds default values to argument help and retains any formatting in
descriptions. """
pass
# Load supported converters dynamically.
supported_converters = {}
analyzers_dir_path = os.path.join(os.path.dirname(
os.path.realpath(__file__)), "analyzers")
analyzers = sorted(glob.glob(os.path.join(
analyzers_dir_path, '**', 'analyzer_result.py'), recursive=True))
for analyzer_path in analyzers:
analyzer_module = '.'.join(os.path.relpath(
os.path.splitext(analyzer_path)[0],
analyzers_dir_path).split(os.path.sep))
module_name = f"codechecker_report_converter.analyzers.{analyzer_module}"
try:
module = importlib.import_module(module_name)
if hasattr(module, "AnalyzerResult"):
analyzer_result = getattr(module, "AnalyzerResult")
supported_converters[analyzer_result.TOOL_NAME] = analyzer_result
except ModuleNotFoundError:
pass
supported_metadata_keys = ["analyzer_command", "analyzer_version"]
def transform_output(
analyzer_result: str,
parser_type: str,
output_dir: str,
file_name: str,
export_type: str,
clean: bool = False,
metadata: Optional[Dict[str, str]] = None
):
""" Creates .plist files from the given output to the given output dir. """
if clean and os.path.isdir(output_dir):
LOG.info("Previous analysis results in '%s' have been removed, "
"overwriting with current result", output_dir)
shutil.rmtree(output_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
parser = supported_converters[parser_type]()
parser.transform(
analyzer_result, output_dir, export_type, file_name, metadata)
def process_metadata(metadata) -> Tuple[Dict[str, str], Dict[str, str]]:
""" Returns a tuple of valid and invalid metadata values. """
if not metadata:
return {}, {}
valid_values = {}
invalid_values = {}
for m in metadata:
key, value = m.split("=", 1)
if key in supported_metadata_keys:
valid_values[key] = value
else:
invalid_values[key] = value
return valid_values, invalid_values
def __add_arguments_to_parser(parser):
""" Add arguments to the the given parser. """
parser.add_argument('input',
type=str,
metavar='file',
default=argparse.SUPPRESS,
help="Code analyzer output result file which will be "
"parsed and used to generate a CodeChecker "
"report directory.")
parser.add_argument('-o', '--output',
dest="output_dir",
required=True,
default=argparse.SUPPRESS,
help="This directory will be used to generate "
"CodeChecker report directory files.")
parser.add_argument('-t', '--type',
dest='type',
metavar='TYPE',
required=True,
choices=supported_converters,
default=argparse.SUPPRESS,
help="Specify the format of the code analyzer output. "
"Currently supported output types are: " +
', '.join(sorted(supported_converters)) + ".")
parser.add_argument('-e', '--export',
type=str,
dest='export',
metavar='EXPORT',
choices=SUPPORTED_ANALYZER_EXTENSIONS,
default=plist.EXTENSION,
help="Specify the export format of the converted "
"reports. Currently supported export types "
"are: " + ', '.join(sorted(
SUPPORTED_ANALYZER_EXTENSIONS)) + ".")
parser.add_argument('--meta',
nargs='*',
dest='meta',
metavar='META',
required=False,
help="Metadata information which will be stored "
"alongside the run when the created report "
"directory will be stored to a running "
"CodeChecker server. It has the following "
"format: key=value. Valid key values are: "
"{0}.".format(', '.join(supported_metadata_keys)))
parser.add_argument('--filename',
type=str,
dest='filename',
metavar='FILENAME',
default="{source_file}_{analyzer}_{file_hash}",
help="This option can be used to override the default "
"plist file name output of this tool. This tool "
"can produce multiple plist files on the given "
"code analyzer output result file. The problem "
"is if we run this tool multiple times on the "
"same file, it may override some plist files. To "
"prevent this we can generate a unique hash into "
"the plist file names with this option. For "
"example: "
"'{source_file}_{analyzer}_{file_hash}_xxxxx'. "
"{source_file}, {analyzer} and {file_hash} are "
"special values which will be replaced with the "
"current analyzer, source file name and hash of "
"the absolute file path where the bug was "
"found. ")
parser.add_argument('-c', '--clean',
dest="clean",
required=False,
action='store_true',
help="Delete files stored in the output directory.")
parser.add_argument('-v', '--verbose',
action='store_true',
dest='verbose',
help="Set verbosity level.")
def main():
""" Report converter main command line. """
parser = argparse.ArgumentParser(
prog="report-converter",
description="""
Creates a CodeChecker report directory from the given code analyzer output
which can be stored to a CodeChecker web server.""",
epilog="""
Supported analyzers:
{0}""".format('\n'.join([" {0} - {1}, {2}".format(
tool_name,
supported_converters[tool_name].NAME,
supported_converters[tool_name].URL)
for tool_name in sorted(supported_converters)])),
formatter_class=RawDescriptionDefaultHelpFormatter
)
__add_arguments_to_parser(parser)
args = parser.parse_args()
if 'verbose' in args and args.verbose:
LOG.setLevel(logging.DEBUG)
valid_metadata_values, invalid_metadata_values = \
process_metadata(args.meta)
if invalid_metadata_values:
LOG.error("The following metadata keys are invalid: %s. Valid key "
"values are: %s.",
', '.join(invalid_metadata_values),
', '.join(supported_metadata_keys))
sys.exit(1)
return transform_output(
args.input, args.type, args.output_dir, args.filename, args.export,
args.clean, valid_metadata_values)
if __name__ == "__main__":
main()
| 38.188525
| 79
| 0.565894
|
b0f8d739c653863d07386a86b53c4e7fe6355ef2
| 398
|
py
|
Python
|
setup.py
|
pysat/pysatModelUtils
|
e563c36531632cd68b4e15d2ac6099fa3cb45aef
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
pysat/pysatModelUtils
|
e563c36531632cd68b4e15d2ac6099fa3cb45aef
|
[
"BSD-3-Clause"
] | 17
|
2019-09-18T18:19:00.000Z
|
2020-01-30T18:41:59.000Z
|
setup.py
|
pysat/pysatModelUtils
|
e563c36531632cd68b4e15d2ac6099fa3cb45aef
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2022, Pysat Development Team
# Full license can be found in License.md and AUTHORS.md
# -----------------------------------------------------------------------------
"""Setup routine for pysat.
Note
----
Package metadata stored in setup.cfg
"""
from setuptools import setup
# Run setup, using setup.cfg metadata and options
setup()
| 22.111111
| 79
| 0.570352
|
4d0cc526329a24fe82db724e73bd5305f0f7004e
| 3,204
|
py
|
Python
|
main/settings.py
|
SpeedyCoder/task-manager
|
1f7d98bad6b8251e94bd8cd63f14e3f4524c4f72
|
[
"MIT"
] | null | null | null |
main/settings.py
|
SpeedyCoder/task-manager
|
1f7d98bad6b8251e94bd8cd63f14e3f4524c4f72
|
[
"MIT"
] | null | null | null |
main/settings.py
|
SpeedyCoder/task-manager
|
1f7d98bad6b8251e94bd8cd63f14e3f4524c4f72
|
[
"MIT"
] | null | null | null |
import os
import dj_database_url
from decouple import config
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', cast=bool)
if config('DEPLOYED', default=True, cast=bool):
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
RAVEN_CONFIG = {
'dsn': config('SENTRY_DSN'),
'release': config('COMMIT_HASH'),
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': config('DB_PORT'),
}
}
RAVEN_CONFIG = {
'dsn': '',
'release': '',
}
STATIC_URL = os.getenv('STATIC_URL', '/static/')
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
# Django
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 3rd party
'raven.contrib.django.raven_compat',
'rules',
# Project
'tasks',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'main.urls'
WSGI_APPLICATION = 'main.wsgi.application'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTHENTICATION_BACKENDS = (
'rules.permissions.ObjectPermissionBackend',
'django.contrib.auth.backends.ModelBackend',
)
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Login
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = 'home'
| 24.458015
| 91
| 0.647004
|
099a0b13ee37409ac379b19f135132e10c1dc82f
| 31,378
|
py
|
Python
|
code/test_if_values_can_be_extracted_from_the_model.py
|
saloniagarwal0403/SSCNN
|
30dafb61e5822d0011516c1d600450818844c106
|
[
"MIT"
] | null | null | null |
code/test_if_values_can_be_extracted_from_the_model.py
|
saloniagarwal0403/SSCNN
|
30dafb61e5822d0011516c1d600450818844c106
|
[
"MIT"
] | null | null | null |
code/test_if_values_can_be_extracted_from_the_model.py
|
saloniagarwal0403/SSCNN
|
30dafb61e5822d0011516c1d600450818844c106
|
[
"MIT"
] | null | null | null |
# conda activate SurvivalAnalysis_January2021
# python Siamese_with_conv_parallel_processing_average_pooling_multiple__magnifications.py>Epoch_4_using_average.txt
import sklearn
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Activation, Add, BatchNormalization, Concatenate, Conv2D, Dense, Flatten, GlobalMaxPooling2D, Lambda, MaxPooling2D, Reshape, Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
import CreateInputFeatureMaps_average_pooling_multiple_magnifications
from sklearn.model_selection import train_test_split
from tensorflow.python.framework import ops
import datetime, os
import random
import matplotlib
import pickle
import csv
import xlrd
from sklearn.metrics import accuracy_score
from sklearn.model_selection import KFold
import warnings
warnings.filterwarnings('ignore')
matplotlib.use('Agg')
random.seed(0)
from sklearn.decomposition import PCA
import gc
from sklearn.base import clone
from multiprocessing import Pool
def plot_training(history, acc_val_image_filename):
acc = history.history['c_index']
val_acc = history.history['val_c_index']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.figure()
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.plot(epochs, acc, 'b.', label='Training accuracy')
ax1.plot(epochs, val_acc, 'r-', label='Validation accuracy')
ax1.set_title('Training and validation accuracy')
ax2.plot(epochs, loss, 'b.', label='Training loss')
ax2.plot(epochs, val_loss, 'r-', label='Validation loss')
ax2.set_title('Training and validation loss')
plt.legend()
plt.savefig(acc_val_image_filename)
def loss4(y_true, y_pred):
temp = y_true*y_pred
valid_idx = tf.math.greater(0.0,temp)
valid_y_pred = tf.where(valid_idx, y_pred, 0.0)
valid_y_true = tf.where(valid_idx, y_true, 0.0)
loss1 = tf.keras.losses.MSE(valid_y_true, valid_y_pred)
y_pred2 = tf.where(valid_idx, 0.0, y_pred)
y_true2 = tf.where(valid_idx, 0.0, y_true)
valid_idx2 = tf.math.greater(tf.math.abs(y_true2),tf.math.abs(y_pred2))
valid_y_pred2 = tf.where(valid_idx2, tf.math.abs(y_true2), 0.0)
valid_y_true2 = tf.where(valid_idx2, tf.math.abs(y_pred2), 0.0)
loss2 = tf.keras.losses.MSE(valid_y_true2, valid_y_pred2)
valid_idx3 = tf.math.greater(365.0,tf.math.abs(y_pred))
valid_loss3 = tf.where(valid_idx3, 1/(tf.math.abs(y_pred)+0.00000001), 0.0)
loss3 = tf.math.reduce_sum(valid_loss3)
return loss1+loss2+loss3
def loss5(y_true, y_pred):
loss1 = tf.keras.losses.MSE(y_true, y_pred)
valid_idx3 = tf.math.greater(365.0,tf.math.abs(y_pred))
valid_loss3 = tf.where(valid_idx3, 1/(tf.math.abs(y_pred)+0.00000001), 0.0)
loss3 = tf.math.reduce_sum(valid_loss3)
return loss1+loss3
def loss6(y_true, y_pred):
temp = y_true*y_pred
valid_idx = tf.math.greater(0.0,temp)
valid_y_pred = tf.where(valid_idx, y_pred, 0.0)
valid_y_true = tf.where(valid_idx, y_true, 0.0)
loss1 = tf.keras.losses.MSE(valid_y_true, valid_y_pred)
y_pred2 = tf.where(valid_idx, 0.0, y_pred)
y_true2 = tf.where(valid_idx, 0.0, y_true)
valid_idx2 = tf.math.greater(tf.math.abs(y_true2),tf.math.abs(y_pred2))
valid_y_pred2 = tf.where(valid_idx2, tf.math.abs(y_true2), 0.0)
valid_y_true2 = tf.where(valid_idx2, tf.math.abs(y_pred2), 0.0)
loss2 = tf.keras.losses.MSE(valid_y_true2, valid_y_pred2)
# valid_idx3 = tf.math.greater(365.0,tf.math.abs(y_pred))
# valid_loss3 = tf.where(valid_idx3, 1/(tf.math.abs(y_pred)+0.00000001), 0.0)
loss3 = tf.math.reduce_sum(tf.math.log(1/(tf.math.abs(y_pred)+0.00000001)))
return loss1+loss2+loss3
def get_X_Y_columns(this_df):
Y = this_df[['Time','Occurence']]
this_df = this_df.drop(columns= ['Time','Occurence'])
return this_df, Y
def get_features(X):
train_features = []
X_filenames = X['filenames'].iloc[:]
shuf = np.arange(0,len(X_filenames))
random.shuffle(shuf)
for i in shuf[0:100]:
filepaths_i = X_filenames.iloc[i]
for filepath_i in filepaths_i:
# print("Working with file: ",i," with path ",filepath_i)
train_features.extend(CreateInputFeatureMaps_average_pooling_multiple_magnifications.get_model_predictions(filepath_i))
return train_features
def pca_features_extraction(X, pca, n_pca_f, tensors_size, saving_folder):
X_filenames = X['filenames'].iloc[:]
X = X.drop(columns= ["filenames"])
arguments_for_pooling = []
count = 0
#For storing the PCA generated maps uncoment the following for loop and comment rest of the code in this fn
for i in range(len(X_filenames)):
###In parallel store all the filesFeatureMaps
filepaths_i = X_filenames.iloc[i]
for filepath_i in filepaths_i:
CreateInputFeatureMaps_average_pooling_multiple_magnifications.create_tensors(filepath_i, pca, n_pca_f, tensors_size, saving_folder)
def get_features_dataset(X, Y, tensors_size,saving_folder):
X_filenames = X['filenames'].iloc[:]
X = X.drop(columns= ["filenames"])
image_features = []
clinical_features = []
case_index = []
y_value = []
for i in range(len(X)):
filepaths_i = X_filenames.iloc[i]
for k in filepaths_i:
img_a = tf.cast(X.iloc[i,:],tf.float32) ## retrieveing all the columns except last as it is for filename
original_image_features_pickle_file_name = os.path.split(k)[-1]
with open(os.path.join(saving_folder,original_image_features_pickle_file_name), 'rb') as handle:
image_features_i = pickle.load(handle)
image_features.append(image_features_i)
clinical_features.append(img_a)
y_value.append([Y["Time"].iloc[i],Y["Occurence"].iloc[i]])
case_index.append(i)
return image_features, clinical_features, y_value, case_index
def model_def(number_channel,no_clinical_features,first_conv_layer_number_filers,second_conv_layer_number_filers,first_layer_neurons,second_layer_neurons):
image_input = Input(shape=(None,None,number_channel))
conv1 = tf.keras.layers.Conv2D(first_conv_layer_number_filers, (3,3), activation='relu')(image_input)
conv2 = tf.keras.layers.Conv2D(second_conv_layer_number_filers, (3,3), activation='relu')(conv1)
pool = tf.keras.layers.GlobalAveragePooling2D()(conv2)
# tf.keras.layers.Conv1D(first_conv_layer_number_filers,(1), activation='relu'),
# tf.keras.layers.Flatten(),
clinical_input = Input(shape=(no_clinical_features))
concatenate_layer = tf.keras.layers.concatenate([pool,clinical_input])
first_dense = tf.keras.layers.Dense(first_layer_neurons, activation='relu',kernel_regularizer='l1_l2')(concatenate_layer)
dp1 = tf.keras.layers.Dropout(0.2)(first_dense)
second_dense = tf.keras.layers.Dense(second_layer_neurons, activation='relu',kernel_regularizer='l1_l2')(dp1)
dp2 = tf.keras.layers.Dropout(0.2)(second_dense)
output = tf.keras.layers.Dense(1,kernel_regularizer='l1_l2')(dp2)
return Model(inputs=[image_input,clinical_input], outputs=output)
def build_model(number_channel,no_clinical_features,first_conv_layer_number_filers,second_conv_layer_number_filers,first_layer_neurons,second_layer_neurons):
model = model_def(number_channel,no_clinical_features,first_conv_layer_number_filers,second_conv_layer_number_filers,first_layer_neurons,second_layer_neurons)
print(model.summary())
########################
# SIAMESE NEURAL NETWORK
########################
# Complete model is constructed by calling the branch model on each input image,
# and then the head model on the resulting 512-vectors.
img_a = Input(shape=(None,None,number_channel))
img_b = Input(shape=(None,None,number_channel))
clinical_a = Input(shape=(no_clinical_features))
clinical_b = Input(shape=(no_clinical_features))
xa = model([img_a,clinical_a])
xb = model([img_b,clinical_b])
# x = Lambda(lambda x: tf.cast(K.exp(-x[1]) - K.exp(-x[0]), tf.float32))([xa, xb])
# x = Lambda(lambda x:x[1] - x[0])([xa, xb])
subtracted = tf.keras.layers.Subtract()([xa, xb])
# probability_output = tf.keras.activations.sigmoid(subtracted)
# x = Lambda(lambda x:tf.concat(x,1))([xa, xb])
# x = tf.cast(xb-xa, tf.float32)
model_f = Model(inputs=[img_a,img_b,clinical_a,clinical_b], outputs=[subtracted])
return model_f
def c_index_prediction(y_true, y_pred):
correct=0
for i in range(len(y_true)):
if (y_true[i]*y_pred[i])>0:
correct+=1
total = len(y_pred)
return float(correct)/float(total)
def c_index(y_true, y_pred):
temp = y_true*y_pred
valid_idx = tf.math.greater(temp,0.0)
correct_tensor = tf.where(valid_idx, 1.0, 0.0)
return tf.reduce_mean(correct_tensor)
def case_wise_soft_voting(predictions, true_values, i_j_pairs):
i_j_pairs_dict = {}
for i in range(len(i_j_pairs)):
this_pair = (i_j_pairs[i][0],i_j_pairs[i][1])
if this_pair in i_j_pairs_dict:
prediction,true_value = i_j_pairs_dict[this_pair]
prediction = prediction+predictions[i]
i_j_pairs_dict[this_pair] = [prediction,true_value]
else:
i_j_pairs_dict[this_pair] = [predictions[i],true_values[i]]
y_true = []
y_pred = []
for k,v in i_j_pairs_dict.items():
y_pred.append(v[0])
y_true.append(v[1])
return c_index_prediction(y_true,y_pred)
def case_wise_voting(predictions, true_values, i_j_pairs):
i_j_pairs_dict = {}
for i in range(len(i_j_pairs)):
this_pair = (i_j_pairs[i][0],i_j_pairs[i][1])
if this_pair in i_j_pairs_dict:
votes_1,votes_neg_1,true_value = i_j_pairs_dict[this_pair]
if predictions[i]>0:
votes_1+=1
else:
votes_neg_1+=1
i_j_pairs_dict[this_pair] = [votes_1,votes_neg_1,true_value]
else:
if predictions[i]>0:
votes_1=1
votes_neg_1=0
else:
votes_neg_1=1
votes_1=0
i_j_pairs_dict[this_pair] = [votes_1,votes_neg_1,true_values[i]]
y_true = []
y_pred = []
for k,v in i_j_pairs_dict.items():
if v[0]>v[1]:
y_pred.append(1)
else:
y_pred.append(-1)
y_true.append(v[2])
return c_index_prediction(y_true,y_pred)
def c_index_calculation(preds, y_true, case_indexs,DAYS_DIFF):
permisible_true = []
permissible_predited = []
i_j_pairs = []
for i in range(len(preds)):
for j in range(i+1,len(preds)):
if (y_true[i][1]==True and (y_true[i][0]<(y_true[j][0]+DAYS_DIFF))) or (y_true[j][1]==True and (y_true[j][0]<(y_true[i][0]+DAYS_DIFF))):
permisible_true.append(y_true[i][0]-y_true[j][0])
permissible_predited.append(preds[i]-preds[j])
i_j_pairs.append([case_indexs[i],case_indexs[j]])
return [c_index_prediction(permisible_true,permissible_predited), case_wise_voting(permissible_predited,permisible_true,i_j_pairs), case_wise_voting(permissible_predited,permisible_true,i_j_pairs)]
def shuffle(image_features):
image_features=np.asarray(image_features)
image_features_shuffled = np.reshape(np.copy(image_features), (image_features.shape[0],image_features.shape[1] * image_features.shape[2], image_features.shape[3]))
image_features_shuffled_new = []
for i_case in range(image_features.shape[0]):
temp = image_features_shuffled[i_case]
np.random.shuffle(temp)
temp_reshaped = np.reshape(temp, image_features.shape[1:])
image_features_shuffled_new.append(temp_reshaped)
image_features_shuffled_new=np.asarray(image_features_shuffled_new)
return image_features_shuffled_new
def prediction_by_average_over_wsi(VE_before_AP,VE_after_AP,image_features,clinical_features,y_values, case_indexs,DAYS_DIFF):
features_before_AP = VE_before_AP.predict(image_features)
idx_dict = {}
for i in range(len(case_indexs)):
if case_indexs[i] in idx_dict:
this_case_features = idx_dict[case_indexs[i]]
this_case_current = list(np.reshape(features_before_AP[i], (features_before_AP[i].shape[0]*features_before_AP[i].shape[1],features_before_AP[i].shape[2])))
for t in this_case_current:
this_case_features.append(t)
idx_dict[case_indexs[i]] = this_case_features
else:
idx_dict[case_indexs[i]] = list(np.reshape(features_before_AP[i], (features_before_AP[i].shape[0]*features_before_AP[i].shape[1],features_before_AP[i].shape[2])))
for i in idx_dict.keys():
this_case_features = idx_dict[i]
this_case_features_avg = np.mean(np.asarray(this_case_features), axis=0)
idx_dict[i] = this_case_features_avg
y_trues = {}
idx_dic = {}
for i in range(len(case_indexs)):
this_case_clinical_features = clinical_features[i]
if case_indexs[i] not in idx_dic:
idx_dic[case_indexs[i]] = tf.cast(np.concatenate((np.array(idx_dict[case_indexs[i]]),np.array(this_case_clinical_features))),tf.float32)
y_trues[case_indexs[i]] = y_values[i]
y_model_preds = VE_after_AP.predict(np.array(list(idx_dic.values())))
y_true = []
y_pred = []
for i in idx_dic.keys():
y_true.append(y_trues[list(idx_dic.keys())[i]])
y_pred.append(y_model_preds[i])
with open("Values_feature.pickle", 'wb') as handle:
pickle.dump([y_pred, y_true],handle)
return c_index_calculation(y_pred, y_true, list(idx_dic.keys()),DAYS_DIFF)[0]
# TODO: Check how to inset the image feature maps in dataframe
os.environ["CUDA_VISIBLE_DEVICES"]='1'
BATCH_SIZE = 16
DAYS_DIFF = 365
output_files_folder = os.path.join(r"/home","sxa171531","images","TCGA-GBM","output_files")
train_pickle_filename = os.path.join(output_files_folder,'train.pickle')
val_pickle_filename = os.path.join(output_files_folder,'val.pickle')
test_pickle_filename = os.path.join(output_files_folder,'test.pickle')
with open(train_pickle_filename, 'rb') as handle:
df_train = pickle.load(handle)
with open(test_pickle_filename, 'rb') as handle:
df_test = pickle.load(handle)
with open(val_pickle_filename, 'rb') as handle:
df_val =pickle.load(handle)
df_train_X, df_train_Y = get_X_Y_columns(df_train)
df_val_X, df_val_Y = get_X_Y_columns(df_val)
df_test_X, df_test_Y = get_X_Y_columns(df_test)
predictions = []
fields = ['no_pca','no_training_pairs', 'conv_f', 'conv_s', 'f', 's', 'lr',
'wsi_train', 'wsi_val', 'wsi_test',
'case_voting_train', 'case_voting_val', 'case_voting_test',
'case_soft-voting_train', 'case_soft-voting_val', 'case_soft-voting_test',
'SHUFFLED',
'wsi_train', 'wsi_val', 'wsi_test',
'case_voting_train', 'case_voting_val', 'case_voting_test',
'case_soft-voting_train', 'case_soft-voting_val', 'case_soft-voting_test',
'AVGMAPS',
'train', 'val', 'test',
'','0 DAYS_DIFF',
'wsi_train_0_days_diff', 'wsi_val_0_days_diff', 'wsi_test_0_days_diff',
'case_voting_train_0_days_diff', 'case_voting_val_0_days_diff', 'case_voting_test_0_days_diff',
'case_soft-voting_train_0_days_diff', 'case_soft-voting_val_0_days_diff', 'case_soft-voting_test_0_days_diff',
'SHUFFLED',
'wsi_train', 'wsi_val', 'wsi_test',
'case_voting_train', 'case_voting_val', 'case_voting_test',
'case_soft-voting_train', 'case_soft-voting_val', 'case_soft-voting_test',
'AVGMAPS',
'train', 'val', 'test'
]
csv_filename = os.path.join(output_files_folder,"Results-testing.csv")
with open(csv_filename, 'w') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(fields)
conv_first_layer_filters = [256] # previously 32
conv_second_layer_filters = [256] #previously 32
first_layer_neurons = [8] #previously 32
second_layer_neurons = [4] #previously 32
lrs = [0.0001]
results = []
number_pca_features = [8]
tensors_size = 100
cvs_file_inputs = []
for n_pca_f in number_pca_features:
### FOR BUILDING VALID DATASETS
gc.disable()
train_folder = os.path.join(output_files_folder,'PCA_features_train_'+str(n_pca_f))
val_folder = os.path.join(output_files_folder,'PCA_features_val_'+str(n_pca_f))
test_folder = os.path.join(output_files_folder,'PCA_features_test_'+str(n_pca_f))
image_features_train, clinical_features_train, y_value_train, case_index_train = get_features_dataset(df_train_X, df_train_Y, tensors_size,train_folder)
image_features_val, clinical_features_val, y_value_val, case_index_val = get_features_dataset(df_val_X, df_val_Y, tensors_size,val_folder)
image_features_test, clinical_features_test, y_value_test, case_index_test = get_features_dataset(df_test_X, df_test_Y, tensors_size,test_folder)
image_features_train_shuffled = shuffle(image_features_train)
image_features_val_shuffled = shuffle(image_features_val)
image_features_test_shuffled = shuffle(image_features_test)
for count_i in [50000]:
for conv_f in conv_first_layer_filters:
for conv_s in conv_second_layer_filters:
for f in first_layer_neurons:
for s in second_layer_neurons:
for lr in lrs:
this_config_predictions = []
this_config_results = [n_pca_f,count_i,conv_f,conv_s,f,s,lr]
weights_folder = os.path.join(output_files_folder,"weights")
filepath= os.path.join(weights_folder, "Siamese_with_max_pooling_separate_training_first"+str("-"+str(count_i)+"-"+str(n_pca_f)+"-"+"loss4"+"-"+str(conv_f)+"-"+str(conv_s)+"-"+str(f)+"-"+str(s)+"-"+str(lr))+".h5")
base_model = build_model(n_pca_f,len(clinical_features_train[0]),conv_f,conv_s,f,s)
base_model.compile(optimizer=tf.keras.optimizers.Adam(lr = lr),
loss = loss4,
metrics=[c_index])
base_model.load_weights(filepath)
value_extractor = base_model.layers[-2]
pred_test = value_extractor.predict([np.array(image_features_test), np.array(clinical_features_test)])
softvoting_values = {}
for i in range(len(case_index_test)):
idx = case_index_test[i]
if idx in softvoting_values:
temp = softvoting_values[idx]
temp[0] = temp[0]+pred_test[i]
temp[1] = temp[1]+1
softvoting_values[idx] = temp
else:
softvoting_values[idx] = [pred_test[i],1,y_value_test[i]]
with open("Values"+str(count_i)+"_softvoting.pickle", 'wb') as handle:
pickle.dump(list(softvoting_values.values()),handle)
wsi_test, case_voting_test, case_soft_voting_test = c_index_calculation(pred_test, y_value_test, case_index_test,DAYS_DIFF)
pred_val = value_extractor.predict([np.array(image_features_val), np.array(clinical_features_val)])
wsi_val, case_voting_val, case_soft_voting_val = c_index_calculation(pred_val, y_value_val, case_index_val,DAYS_DIFF)
pred_train = value_extractor.predict([np.array(image_features_train), np.array(clinical_features_train)])
wsi_train, case_voting_train, case_soft_voting_train = c_index_calculation(pred_train, y_value_train, case_index_train,DAYS_DIFF)
this_config_results.append(wsi_train)
this_config_results.append(wsi_val)
this_config_results.append(wsi_test)
this_config_results.append(case_voting_train)
this_config_results.append(case_voting_val)
this_config_results.append(case_voting_test)
this_config_results.append(case_soft_voting_train)
this_config_results.append(case_soft_voting_val)
this_config_results.append(case_soft_voting_test)
this_config_results.append(" ")
pred_test_shuffled = value_extractor.predict([np.array(image_features_test_shuffled), np.array(clinical_features_test)])
wsi_test_shuffled , case_voting_test_shuffled , case_soft_voting_test_shuffled = c_index_calculation(pred_test_shuffled , y_value_test, case_index_test,DAYS_DIFF)
pred_val_shuffled = value_extractor.predict([np.array(image_features_val_shuffled), np.array(clinical_features_val)])
wsi_val_shuffled , case_voting_val_shuffled , case_soft_voting_val_shuffled = c_index_calculation(pred_val_shuffled , y_value_val, case_index_val,DAYS_DIFF)
pred_train_shuffled = value_extractor.predict([np.array(image_features_train_shuffled), np.array(clinical_features_train)])
wsi_train_shuffled , case_voting_train_shuffled , case_soft_voting_train_shuffled = c_index_calculation(pred_train_shuffled , y_value_train, case_index_train,DAYS_DIFF)
this_config_results.append(wsi_train_shuffled )
this_config_results.append(wsi_val_shuffled )
this_config_results.append(wsi_test_shuffled )
this_config_results.append(case_voting_train_shuffled )
this_config_results.append(case_voting_val_shuffled )
this_config_results.append(case_voting_test_shuffled )
this_config_results.append(case_soft_voting_train_shuffled)
this_config_results.append(case_soft_voting_val_shuffled)
this_config_results.append(case_soft_voting_test_shuffled)
this_config_results.append(" ")#Averagemaps
inputs = tf.keras.Input(shape=(None,None, n_pca_f))
x1 = value_extractor.layers[1](inputs)
# x2 = value_extractor.layers[2](x1)
outputs = value_extractor.layers[2](x1)
VE_before_AP = tf.keras.Model(inputs, outputs)
inputs = tf.keras.Input(shape=(conv_s+len(clinical_features_train[0])))
x1 = value_extractor.layers[-5](inputs)
x2 = value_extractor.layers[-3](x1)
outputs = value_extractor.layers[-1](x2)
VE_after_AP = Model(inputs=inputs, outputs=outputs)
wsi_train_avg = prediction_by_average_over_wsi(VE_before_AP,VE_after_AP,np.array(image_features_train),np.array(clinical_features_train),y_value_train, case_index_train,DAYS_DIFF)
wsi_val_avg = prediction_by_average_over_wsi(VE_before_AP,VE_after_AP,np.array(image_features_val),np.array(clinical_features_val),y_value_val, case_index_val,DAYS_DIFF)
wsi_test_avg = prediction_by_average_over_wsi(VE_before_AP,VE_after_AP,np.array(image_features_test),np.array(clinical_features_test),y_value_test, case_index_test,DAYS_DIFF)
this_config_results.append(wsi_train_avg)
this_config_results.append(wsi_val_avg)
this_config_results.append(wsi_test_avg)
this_config_results.append(" ")
this_config_results.append(" ")
wsi_test_0_days_diff, case_voting_test_0_days_diff, case_soft_voting_test_0_days_diff = c_index_calculation(pred_test, y_value_test, case_index_test,0)
wsi_val_0_days_diff, case_voting_val_0_days_diff, case_soft_voting_val_0_days_diff = c_index_calculation(pred_val, y_value_val, case_index_val,0)
wsi_train_0_days_diff, case_voting_train_0_days_diff, case_soft_voting_train_0_days_diff = c_index_calculation(pred_train, y_value_train, case_index_train,0)
this_config_results.append(wsi_train_0_days_diff)
this_config_results.append(wsi_val_0_days_diff)
this_config_results.append(wsi_test_0_days_diff)
this_config_results.append(case_voting_train_0_days_diff)
this_config_results.append(case_voting_val_0_days_diff)
this_config_results.append(case_voting_test_0_days_diff)
this_config_results.append(case_soft_voting_train_0_days_diff)
this_config_results.append(case_soft_voting_val_0_days_diff)
this_config_results.append(case_soft_voting_test_0_days_diff)
this_config_results.append(" ")
pred_test_shuffled = value_extractor.predict([np.array(image_features_test_shuffled), np.array(clinical_features_test)])
wsi_test_shuffled , case_voting_test_shuffled , case_soft_voting_test_shuffled = c_index_calculation(pred_test_shuffled , y_value_test, case_index_test,0)
pred_val_shuffled = value_extractor.predict([np.array(image_features_val_shuffled), np.array(clinical_features_val)])
wsi_val_shuffled , case_voting_val_shuffled , case_soft_voting_val_shuffled = c_index_calculation(pred_val_shuffled , y_value_val, case_index_val,0)
pred_train_shuffled = value_extractor.predict([np.array(image_features_train_shuffled), np.array(clinical_features_train)])
wsi_train_shuffled , case_voting_train_shuffled , case_soft_voting_train_shuffled = c_index_calculation(pred_train_shuffled , y_value_train, case_index_train,0)
this_config_results.append(wsi_train_shuffled )
this_config_results.append(wsi_val_shuffled )
this_config_results.append(wsi_test_shuffled )
this_config_results.append(case_voting_train_shuffled )
this_config_results.append(case_voting_val_shuffled )
this_config_results.append(case_voting_test_shuffled )
this_config_results.append(case_soft_voting_train_shuffled)
this_config_results.append(case_soft_voting_val_shuffled)
this_config_results.append(case_soft_voting_test_shuffled)
this_config_results.append(" ")#Averagemaps
inputs = tf.keras.Input(shape=(None,None, n_pca_f))
x1 = value_extractor.layers[1](inputs)
outputs = value_extractor.layers[2](x1)
VE_before_AP = tf.keras.Model(inputs, outputs)
inputs = tf.keras.Input(shape=(conv_s+len(clinical_features_train[0])))
x1 = value_extractor.layers[-5](inputs)
x2 = value_extractor.layers[-3](x1)
outputs = value_extractor.layers[-1](x2)
VE_after_AP = Model(inputs=inputs, outputs=outputs)
wsi_train_avg = prediction_by_average_over_wsi(VE_before_AP,VE_after_AP,np.array(image_features_train),np.array(clinical_features_train),y_value_train, case_index_train,0)
wsi_val_avg = prediction_by_average_over_wsi(VE_before_AP,VE_after_AP,np.array(image_features_val),np.array(clinical_features_val),y_value_val, case_index_val,0)
wsi_test_avg = prediction_by_average_over_wsi(VE_before_AP,VE_after_AP,np.array(image_features_test),np.array(clinical_features_test),y_value_test, case_index_test,0)
this_config_results.append(wsi_train_avg)
this_config_results.append(wsi_val_avg)
this_config_results.append(wsi_test_avg)
wsi_test_avg = prediction_by_average_over_wsi(VE_before_AP,VE_after_AP,np.array(image_features_test_shuffled),np.array(clinical_features_test),y_value_test, case_index_test,0)
this_config_results.append(wsi_test_avg)
wsi_test_avg = prediction_by_average_over_wsi(VE_before_AP,VE_after_AP,np.zeros(np.shape(image_features_test)),np.array(clinical_features_test),y_value_test, case_index_test,0)
this_config_results.append(wsi_test_avg)
wsi_test_avg = prediction_by_average_over_wsi(VE_before_AP,VE_after_AP,np.array(image_features_test),np.zeros(np.shape(clinical_features_test)),y_value_test, case_index_test,0)
this_config_results.append(wsi_test_avg)
pred_test = value_extractor.predict([np.zeros(np.shape(image_features_test)), np.array(clinical_features_test)])
wsi_test , case_voting_test , case_soft_voting_test = c_index_calculation(pred_test , y_value_test, case_index_test,0)
this_config_results.append(["No WSI features",wsi_test , case_voting_test , case_soft_voting_test])
pred_test = value_extractor.predict([np.array(image_features_test), np.zeros(np.shape(clinical_features_test))])
wsi_test , case_voting_test , case_soft_voting_test = c_index_calculation(pred_test , y_value_test, case_index_test,0)
this_config_results.append(["No clinical features",wsi_test , case_voting_test , case_soft_voting_test])
print(this_config_results)
predictions.append(this_config_results)
with open(csv_filename, 'w') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(fields)
csvwriter.writerows(predictions)
| 59.31569
| 242
| 0.658073
|
d9a25dbb08cbe6ac64813d33d0b8d7ff004eea0a
| 4,212
|
py
|
Python
|
code/import-cat-from-coco.py
|
faisalthaheem/simanno-scripts
|
fed6779dab60c11daed14ea8d42f67f789252389
|
[
"Apache-2.0"
] | null | null | null |
code/import-cat-from-coco.py
|
faisalthaheem/simanno-scripts
|
fed6779dab60c11daed14ea8d42f67f789252389
|
[
"Apache-2.0"
] | null | null | null |
code/import-cat-from-coco.py
|
faisalthaheem/simanno-scripts
|
fed6779dab60c11daed14ea8d42f67f789252389
|
[
"Apache-2.0"
] | null | null | null |
from pycocotools.coco import COCO
import sys
import os
import argparse
import sqlite3
import json
import shutil
# Sample invocation
# python3 import-cat-from-coco.py -t val -c car -li 1 -af $CARS_FROM_COCO_PATH/annotations/instances_val2017.json -dp $CARS_FROM_COCO_PATH -sp $COCO_PATH/val2017/
# python3 import-cat-from-coco.py -t train -c car -li 1 -af $COCO_PATH/annotations/instances_train2017.json -dp $CARS_FROM_COCO_PATH -sp $COCO_PATH/train2017/
ap = argparse.ArgumentParser()
ap.add_argument("-c", "--category", required=True,
help="The category to extract.")
ap.add_argument("-t", "--type", required=True,
help="Type to assign to images, accepted values are train or val, default is train.")
ap.add_argument("-li", "--lblid", required=True,
help="The label id to be used for this category.")
ap.add_argument("-af", "--annotationfile", required=True,
help="Path to annotation file.")
ap.add_argument("-sp", "--srcpath", required=True,
help="Path of source containing images.")
ap.add_argument("-dp", "--destpath", required=True,
help="Path to destination to create the db and save images to.")
args = vars(ap.parse_args())
print("Checking paths...")
#ensure that provided paths exist or create where necessary
#check that source paths exist
if False == os.path.exists(args['annotationfile']):
print("The path [{}] does not exist.".format(args['annotationfile']))
sys.exit(0)
if False == os.path.exists(args['srcpath']):
print("The path [{}] does not exist.".format(args['srcpath']))
sys.exit(0)
#create dest path
DEST_PATH_IMAGES = os.path.join(args['destpath'], args['type'])
DEST_PATH_DB = os.path.join(args['destpath'], '{}.db'.format(args['type']))
os.makedirs(exist_ok=True, name=DEST_PATH_IMAGES)
if True == os.path.exists(DEST_PATH_DB):
print("The db already existing at [{}], removed to be recreated.".format(DEST_PATH_DB))
os.remove(DEST_PATH_DB)
print("Setting up db...")
#open db
db_conn = sqlite3.connect(DEST_PATH_DB,1000)
db_cursor = db_conn.cursor()
print("Begin processing")
coco = COCO(args['annotationfile'])
category_id = coco.getCatIds(catNms=[args['category']])
annotation_ids = coco.getAnnIds(catIds=category_id, iscrowd=None)
all_annotations = coco.loadAnns(annotation_ids)
init_script = ""
with open("./db_init_script_sql", "r") as init_script_sql:
init_script = init_script_sql.read()
db_conn.executescript(init_script)
#begin copying files
#aggregate
metadata_aggregate = {}
for i in range(0, len(all_annotations)):
cur_ann = all_annotations[i]
cbbox = cur_ann["bbox"]
cimg_info = coco.loadImgs(cur_ann["image_id"])
metadata = None
if cimg_info[0]["file_name"] in metadata_aggregate:
metadata = metadata_aggregate[cimg_info[0]["file_name"]]
else:
metadata = {
"file_name": cimg_info[0]["file_name"],
"height": cimg_info[0]["height"],
"width": cimg_info[0]["width"],
"imgareas": []
}
metadata_aggregate[cimg_info[0]["file_name"]] = metadata
imgarea_id = len(metadata['imgareas']) + 1
imgarea = {
"id": imgarea_id,
"x": int(cbbox[0]),
"y": int(cbbox[1]),
"z": 100,
"width": int(cbbox[2]),
"height": int(cbbox[3]),
"lblid": args['lblid'],
"lbltxt": args['category']
}
metadata['imgareas'].append(imgarea)
num_records_inserted = 0
for k,v in metadata_aggregate.items():
query = "INSERT INTO annotations(filename, imheight, imwidth, isreviewed, imgareas) values('{}',{},{},1,'{}')".format(
k,
v["height"],
v["width"],
json.dumps(v['imgareas'])
)
try:
#copy file
dst_file_path = os.path.join(DEST_PATH_IMAGES, k)
if False == os.path.exists(dst_file_path):
src_file_path = os.path.join(args['srcpath'], k)
shutil.copyfile(src_file_path, dst_file_path)
#insert to db
db_cursor.execute(query)
num_records_inserted += db_cursor.rowcount
except:
print(query)
db_conn.commit()
db_cursor.close()
db_conn.close()
print("Finished. Inserted [{}] records.".format(num_records_inserted))
| 30.970588
| 162
| 0.666667
|
aa279f918e4b24ce6fae0ac1f00043c816b67a5d
| 4,156
|
py
|
Python
|
instabot_py/models/consolidate_model.py
|
rickymoli/instabot.py
|
8c66d76fc8841f1cb47fad148789bef2dc52f2be
|
[
"MIT"
] | null | null | null |
instabot_py/models/consolidate_model.py
|
rickymoli/instabot.py
|
8c66d76fc8841f1cb47fad148789bef2dc52f2be
|
[
"MIT"
] | null | null | null |
instabot_py/models/consolidate_model.py
|
rickymoli/instabot.py
|
8c66d76fc8841f1cb47fad148789bef2dc52f2be
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pymongo
class ConsolidateModel:
def __init__(self):
conexion = pymongo.MongoClient('localhost',27017)
self.db = conexion.instabot
def saveFollowers(self, date, news, lost):
collection = self.db.consolidate_followers
collection.remove({'date':date})
collection.insert_one({'date':date,'news':news,'lost':lost})
def addLikerRecurrentFollowers(self, date, num):
collection = self.db.consolidate_followers
item = collection.find({'date':date})
if item.count() > 0:
if 'likers_recurrent' in item[0]:
num = item[0]['likers_recurrent'] + num
collection.update({'date':date},{'$set':{'likers_recurrent':num}})
def saveFollowersTag(self, data):
collection = self.db.consolidate_followers_tags
collection.remove({'date':data[0]['date']}, multi=True)
collection.insert_many(data)
def addNewFollowersTag(self, tag, date, num):
collection = self.db.consolidate_followers_tags
item = collection.find({'name':tag,'date':date})
if item.count() > 0:
fields = {'likers_new':0,'likers_recurrent':0}
if 'likers_new' in item[0]:
fields['likers_new'] = item[0]['likers_new'] + num
else:
fields['likers_new'] = num
if 'likers_recurrent' in item[0]:
fields['likers_recurrent'] = item[0]['likers_recurrent']
collection.update({'name':tag,'date':date},{'$set':fields})
def addRecurrentFollowersTag(self, tag, date, num):
collection = self.db.consolidate_followers_tags
item = collection.find({'name':tag,'date':date})
if item.count() > 0:
fields = {'likers_new':0,'likers_recurrent':0}
if 'likers_recurrent' in item[0]:
fields['likers_recurrent'] = item[0]['likers_recurrent'] + num
else:
fields['likers_recurrent'] = num
if 'likers_new' in item[0]:
fields['likers_new'] = item[0]['likers_new']
collection.update({'name':tag,'date':date},{'$set':fields})
def addNewFollowersMedia(self, media_id, date, num):
collection = self.db.consolidate_followers_medias
item = collection.find({'media_id':media_id,'date':date})
if item.count() > 0:
fields = {'new':0,'recurrent':0}
if 'new' in item[0]:
fields['new'] = item[0]['new'] + num
else:
fields['new'] = num
if 'recurrent' in item[0]:
fields['recurrent'] = item[0]['recurrent']
collection.update({'media_id':media_id,'date':date},{'$set':fields})
else:
collection.insert({'media_id':media_id,'date':date,'new':num,'recurrent':0})
def addRecurrentFollowersMedia(self, media_id, date, num):
collection = self.db.consolidate_followers_medias
item = collection.find({'media_id':media_id,'date':date})
if item.count() > 0:
fields = {'new':0,'recurrent':0}
if 'recurrent' in item[0]:
fields['recurrent'] = item[0]['recurrent'] + num
else:
fields['recurrent'] = num
if 'new' in item[0]:
fields['new'] = item[0]['new']
collection.update({'media_id':media_id,'date':date},{'$set':fields})
else:
collection.insert({'media_id':media_id,'date':date,'new':0,'recurrent':num})
def removeFollowersMedia(self, date):
collection = self.db.consolidate_followers_medias
collection.remove({'date':date}, multi=True)
def getTagsByDate(self, date):
collection = self.db.consolidate_followers_tags
return collection.find({'date':date})
def updateTags(self, find, data):
collection = self.db.consolidate_followers_tags
collection.update(find,{'$set':data})
def updateFollowers(self, date, data):
collection = self.db.consolidate_followers
collection.update({'date':date},{'$set':data})
| 41.56
| 88
| 0.588065
|
f769ed288604d3a7e00eef6361e241054626384d
| 2,523
|
py
|
Python
|
gala/dynamics/tests/test_actionangle_staeckel.py
|
zilishen/gala
|
f7184e6b09fbc42a349f6b5a2bca6242f1e9936e
|
[
"MIT"
] | 1
|
2020-11-20T18:27:25.000Z
|
2020-11-20T18:27:25.000Z
|
gala/dynamics/tests/test_actionangle_staeckel.py
|
zilishen/gala
|
f7184e6b09fbc42a349f6b5a2bca6242f1e9936e
|
[
"MIT"
] | 3
|
2021-07-26T15:07:25.000Z
|
2021-09-13T15:04:27.000Z
|
gala/dynamics/tests/test_actionangle_staeckel.py
|
nstarman/gala
|
5415c817a7cc5e1a5086217332466ffc7af16ab3
|
[
"MIT"
] | null | null | null |
# Third-party
from astropy.constants import G
import astropy.units as u
import numpy as np
import pytest
# gala
from gala.dynamics import get_staeckel_fudge_delta, PhaseSpacePosition
import gala.potential as gp
from gala.units import galactic
from .helpers import HAS_GALPY
@pytest.mark.skipif(not HAS_GALPY,
reason="requires galpy to run this test")
def test_staeckel_fudge_delta():
import galpy.potential as galpy_pot
from galpy.actionAngle import estimateDeltaStaeckel
ro = 8.1 * u.kpc
vo = 229 * u.km/u.s
paired_potentials = []
# Miyamoto-Nagai
potential = gp.MiyamotoNagaiPotential(m=6e10*u.Msun, a=3*u.kpc, b=0.3*u.kpc,
units=galactic)
amp = (G * potential.parameters['m']).to_value(vo**2 * ro)
a = potential.parameters['a'].to_value(ro)
b = potential.parameters['b'].to_value(ro)
galpy_potential = galpy_pot.MiyamotoNagaiPotential(amp=amp, a=a, b=b,
ro=ro, vo=vo)
paired_potentials.append((potential, galpy_potential))
# Hernquist
potential = gp.HernquistPotential(m=6e10*u.Msun, c=0.3*u.kpc,
units=galactic)
amp = (G * potential.parameters['m']).to_value(vo**2 * ro)
a = potential.parameters['c'].to_value(ro)
galpy_potential = galpy_pot.HernquistPotential(amp=amp, a=a,
ro=ro, vo=vo)
paired_potentials.append((potential, galpy_potential))
# NFW
potential = gp.NFWPotential(m=6e11*u.Msun, r_s=15.6*u.kpc,
units=galactic)
amp = (G * potential.parameters['m']).to_value(vo**2 * ro)
a = potential.parameters['r_s'].to_value(ro)
galpy_potential = galpy_pot.NFWPotential(amp=amp, a=a, ro=ro, vo=vo)
paired_potentials.append((potential, galpy_potential))
# TEST:
N = 1024
rnd = np.random.default_rng(42)
w = PhaseSpacePosition(pos=rnd.uniform(-10, 10, size=(3, N)) * u.kpc,
vel=rnd.uniform(-100, 100, size=(3, N)) * u.km/u.s)
R = w.cylindrical.rho.to_value(ro)
z = w.z.to_value(ro)
for p, galpy_p in paired_potentials:
galpy_deltas = estimateDeltaStaeckel(galpy_p, R, z,
no_median=True)
gala_deltas = get_staeckel_fudge_delta(p, w).value
print(p, np.allclose(gala_deltas, galpy_deltas))
assert np.allclose(gala_deltas, galpy_deltas, atol=1e-6)
| 37.656716
| 80
| 0.614744
|
95275c8ddadcfd52aba31af49decce4212c99918
| 11,877
|
py
|
Python
|
python/federatedml/model_selection/data_split/data_split.py
|
kakasu/FATE
|
cfc61ef268154e08a9e7125c047c318c5e5eb42a
|
[
"Apache-2.0"
] | 2
|
2020-11-21T11:25:08.000Z
|
2020-11-21T11:25:11.000Z
|
python/federatedml/model_selection/data_split/data_split.py
|
TroubleMaker1994/FATE
|
23ad848bcc7ae7f304a376d3f46f4af26872c8a2
|
[
"Apache-2.0"
] | null | null | null |
python/federatedml/model_selection/data_split/data_split.py
|
TroubleMaker1994/FATE
|
23ad848bcc7ae7f304a376d3f46f4af26872c8a2
|
[
"Apache-2.0"
] | 1
|
2021-02-03T08:23:42.000Z
|
2021-02-03T08:23:42.000Z
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import collections
from sklearn.model_selection import train_test_split
from fate_flow.entity.metric import Metric, MetricMeta
from federatedml.feature.binning.base_binning import BaseBinning
from federatedml.model_base import ModelBase
from federatedml.param.data_split_param import DataSplitParam
from federatedml.util import LOGGER
from federatedml.util import data_io
from federatedml.util.consts import FLOAT_ZERO
ROUND_NUM = 3
class DataSplitter(ModelBase):
def __init__(self):
super().__init__()
self.metric_name = "data_split"
self.metric_namespace = "train"
self.metric_type = "DATA_SPLIT"
self.model_param = DataSplitParam()
self.role = None
self.need_transform = None
def _init_model(self, params):
self.random_state = params.random_state
self.test_size = params.test_size
self.train_size = params.train_size
self.validate_size = params.validate_size
self.stratified = params.stratified
self.shuffle = params.shuffle
self.split_points = params.split_points
if self.split_points:
self.split_points = sorted(self.split_points)
self.need_run = params.need_run
@staticmethod
def _safe_divide(n, d):
result = n / d if d > FLOAT_ZERO else 0.0
if result >= 1:
result = 1.0
return result
def _split(self, ids, y, test_size, train_size):
if test_size <= FLOAT_ZERO:
return ids, [], y, []
if train_size <= FLOAT_ZERO:
return [], ids, [], y
stratify = y if self.stratified else None
if not isinstance(test_size, int):
train_size = round(train_size * len(ids))
test_size = len(ids) - train_size
id_train, id_test, y_train, y_test = train_test_split(ids, y,
test_size=test_size, train_size=train_size,
random_state=self.random_state,
shuffle=self.shuffle, stratify=stratify)
return id_train, id_test, y_train, y_test
def _get_ids(self, data_inst):
ids = sorted([i for i, v in data_inst.mapValues(lambda v: None).collect()])
return ids
def _get_y(self, data_inst):
if self.stratified:
y = [v for i, v in data_inst.mapValues(lambda v: v.label).collect()]
if self.need_transform:
y = self.transform_regression_label(data_inst)
else:
# make dummy y
y = [0] * (data_inst.count())
return y
def check_need_transform(self):
if self.split_points is not None:
if len(self.split_points) == 0:
self.need_transform = False
else:
# only need to produce binned labels if stratified split needed
if self.stratified:
self.need_transform = True
return
@staticmethod
def get_train_test_size(train_size, test_size):
LOGGER.debug(f"original train is {train_size}, original test_size is {test_size}")
# return original set size if int
if isinstance(test_size, int) and isinstance(train_size, int):
return train_size, test_size
total_size = test_size + train_size
new_train_size = DataSplitter._safe_divide(train_size, total_size)
new_test_size = DataSplitter._safe_divide(test_size, total_size)
LOGGER.debug(f"new_train_size is {new_train_size}, new_test_size is {new_test_size}")
return new_train_size, new_test_size
def param_validator(self, data_inst):
"""
Validate & transform param inputs
"""
# check if need label transform
self.check_need_transform()
# check & transform data set sizes
n_count = data_inst.count()
if isinstance(self.test_size, float) or isinstance(self.train_size, float) or isinstance(self.validate_size,
float):
total_size = 1.0
else:
total_size = n_count
if self.train_size is None:
if self.validate_size is None:
self.train_size = total_size - self.test_size
self.validate_size = total_size - (self.test_size + self.train_size)
else:
self.train_size = total_size - (self.test_size + self.validate_size)
elif self.test_size is None:
if self.validate_size is None:
self.test_size = total_size - self.train_size
self.validate_size = total_size - (self.test_size + self.train_size)
else:
self.test_size = total_size - (self.validate_size + self.train_size)
elif self.validate_size is None:
if self.train_size is None:
self.train_size = total_size - self.test_size
self.validate_size = total_size - (self.test_size + self.train_size)
if abs((abs(self.train_size) + abs(self.test_size) + abs(self.validate_size)) - total_size) > FLOAT_ZERO:
raise ValueError(f"train_size, test_size, validate_size should sum up to 1.0 or data count")
return
def transform_regression_label(self, data_inst):
edge = self.split_points[-1] + 1
split_points_bin = self.split_points + [edge]
bin_labels = data_inst.mapValues(lambda v: BaseBinning.get_bin_num(v.label, split_points_bin))
binned_y = [v for k, v in bin_labels.collect()]
return binned_y
@staticmethod
def get_class_freq(y, split_points=None, label_names=None):
"""
get frequency info of a given y set; only called when stratified is true
:param y: list, y sample
:param split_points: list, split points used to bin regression values
:param label_names: list, label names of all data
:return: dict
"""
freq_dict = collections.Counter(y)
freq_keys = freq_dict.keys()
# continuous label
if split_points is not None:
label_count = len(split_points) + 1
# fill in count for missing bins
if len(freq_keys) < label_count:
for i in range(label_count):
if i not in freq_keys:
freq_dict[i] = 0
# categorical label
else:
if label_names is None:
raise ValueError("No label values collected.")
label_count = len(label_names)
# fill in count for missing labels
if len(freq_keys) < label_count:
for label in label_names:
if label not in freq_keys:
freq_dict[label] = 0
return freq_dict
def callback_count_info(self, id_train, id_validate, id_test, all_metas):
"""
Tool to callback returned data count & ratio information
Parameters
----------
id_train: list, id of data set
id_validate: list, id of data set
id_test: list, id of data set
all_metas: dict, all meta info
Returns
-------
None
"""
metas = {}
train_count = len(id_train)
metas["train"] = train_count
validate_count = len(id_validate)
metas["validate"] = validate_count
test_count = len(id_test)
metas["test"] = test_count
original_count = train_count + validate_count + test_count
metas["original"] = original_count
metric_name = f"{self.metric_name}_count_info"
all_metas[metric_name] = metas
metas = {}
train_ratio = train_count / original_count
validate_ratio = validate_count / original_count
test_ratio = test_count / original_count
metas["train"] = round(train_ratio, ROUND_NUM)
metas["validate"] = round(validate_ratio, ROUND_NUM)
metas["test"] = round(test_ratio, ROUND_NUM)
metric_name = f"{self.metric_name}_ratio_info"
all_metas[metric_name] = metas
# stratified
all_metas["stratified"] = self.stratified
return all_metas
def callback_label_info(self, y_train, y_validate, y_test, all_metas):
"""
Tool to callback returned data label information
Parameters
----------
y_train: list, y
y_validate: list, y
y_test: list, y
all_metas: dict, all meta info
Returns
-------
None
"""
metas = {}
y_all = y_train + y_validate + y_test
label_names = None
if self.split_points is None:
label_names = list(set(y_all))
original_freq_dict = DataSplitter.get_class_freq(y_all, self.split_points, label_names)
metas["original"] = original_freq_dict
train_freq_dict = DataSplitter.get_class_freq(y_train, self.split_points, label_names)
metas["train"] = train_freq_dict
validate_freq_dict = DataSplitter.get_class_freq(y_validate, self.split_points, label_names)
metas["validate"] = validate_freq_dict
test_freq_dict = DataSplitter.get_class_freq(y_test, self.split_points, label_names)
metas["test"] = test_freq_dict
if self.split_points is not None:
metas["split_points"] = self.split_points
metas["continuous_label"] = True
else:
metas["label_names"] = label_names
metas["continuous_label"] = False
metric_name = f"{self.metric_name}_label_info"
all_metas[metric_name] = metas
return all_metas
def callback(self, metas):
metric = [Metric(self.metric_name, 0)]
self.callback_metric(metric_name=self.metric_name, metric_namespace=self.metric_namespace, metric_data=metric)
self.tracker.set_metric_meta(metric_name=self.metric_name, metric_namespace=self.metric_namespace,
metric_meta=MetricMeta(name=self.metric_name, metric_type=self.metric_type,
extra_metas=metas))
@staticmethod
def _match_id(data_inst, ids):
return data_inst.filter(lambda k, v: k in ids)
@staticmethod
def _set_output_table_schema(data_inst, schema):
if schema is not None and data_inst.count() > 0:
data_io.set_schema(data_inst, schema)
def split_data(self, data_inst, id_train, id_validate, id_test):
train_data = DataSplitter._match_id(data_inst, id_train)
validate_data = DataSplitter._match_id(data_inst, id_validate)
test_data = DataSplitter._match_id(data_inst, id_test)
schema = getattr(data_inst, "schema", None)
self._set_output_table_schema(train_data, schema)
self._set_output_table_schema(validate_data, schema)
self._set_output_table_schema(test_data, schema)
return train_data, validate_data, test_data
def fit(self, data_inst):
LOGGER.debug("fit method in data_split should not be called here.")
return
| 38.189711
| 118
| 0.623474
|
538da682e18d6402bc81761b5e76fa08e912dc5d
| 301
|
py
|
Python
|
django_socketio/example_project/urls.py
|
Reachpodofficial/django-socketio
|
d7d7a1a52b689c15de3dc54f5d20dc3eb293d771
|
[
"BSD-2-Clause"
] | 790
|
2015-01-01T17:57:37.000Z
|
2022-03-18T21:54:57.000Z
|
django_socketio/example_project/urls.py
|
Reachpodofficial/django-socketio
|
d7d7a1a52b689c15de3dc54f5d20dc3eb293d771
|
[
"BSD-2-Clause"
] | 59
|
2015-01-19T10:55:25.000Z
|
2021-09-07T09:12:38.000Z
|
django_socketio/example_project/urls.py
|
Reachpodofficial/django-socketio
|
d7d7a1a52b689c15de3dc54f5d20dc3eb293d771
|
[
"BSD-2-Clause"
] | 224
|
2015-01-09T02:44:57.000Z
|
2021-09-05T07:06:30.000Z
|
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url("", include("django_socketio.urls")),
url("", include("chat.urls")),
)
| 20.066667
| 51
| 0.697674
|
63acb73f567ce7a23c4a4394992a5b19ec91db07
| 1,230
|
py
|
Python
|
Text - character/text_io.py
|
pranshumalviya2/playwith_rnn
|
f9d4c851d9aef08ac265992daac137fa9b9b7a5e
|
[
"MIT"
] | 1
|
2017-07-23T18:19:39.000Z
|
2017-07-23T18:19:39.000Z
|
Text - character/text_io.py
|
pranshumalviya2/playwith_rnn
|
f9d4c851d9aef08ac265992daac137fa9b9b7a5e
|
[
"MIT"
] | 1
|
2017-11-29T06:39:12.000Z
|
2017-12-01T10:28:07.000Z
|
Text - character/text_io.py
|
pranshumalviya2/playwith_rnn
|
f9d4c851d9aef08ac265992daac137fa9b9b7a5e
|
[
"MIT"
] | null | null | null |
import numpy as np
from keras.utils import np_utils
# import sys
class inp(object):
def __init__(self, inp='karpathy', seq=10):
self.inp = inp
self.seq = seq
self.inpFile = self.inp+".txt"
self.outFile = self.inp+"-out.txt"
self.content = [x.lower() for x in open(self.inpFile).read()]
self.vocab = sorted(list(set(self.content)))
self.vocab.append
self.vocab_indices = dict((c, i) for i, c in enumerate(self.vocab))
self.indices_vocab = dict((i, c) for i, c in enumerate(self.vocab))
self.dataX = []
self.dataY = []
def get_content(self):
return self.content
def get_vocab(self):
return self.vocab,self.vocab_indices,self.indices_vocab
def text_seq(self):
for i in range(0, len(self.content) - self.seq, 1):
seq_in = self.content[i:i + self.seq]
seq_out = self.content[i + self.seq]
self.dataX.append([self.vocab_indices[char] for char in seq_in])
self.dataY.append(self.vocab_indices[seq_out])
def rnn_input(self):
n_patterns = len(self.dataX)
X = np.reshape(self.dataX, (n_patterns, self.seq, 1))
X = X / float(len(self.vocab))
y = np_utils.to_categorical(self.dataY)
return X,y
def save(self,new):
with open(self.outFile, 'w') as f:
f.write(new)
f.close()
| 29.285714
| 69
| 0.685366
|
f1135e3a07546c99e39585eb681d758aadc33f55
| 2,268
|
py
|
Python
|
propnet/web/tests/test_web.py
|
nile0316/propnet
|
3e1f1476c70a878c6eb43587c328d108b0e2a410
|
[
"BSD-3-Clause-LBNL"
] | 57
|
2018-01-09T14:56:20.000Z
|
2022-02-24T11:44:42.000Z
|
propnet/web/tests/test_web.py
|
ruriboshi/propnet
|
770703fb4fc344f785f89c02f26b31ea5733d2bd
|
[
"BSD-3-Clause-LBNL"
] | 214
|
2017-09-26T23:31:09.000Z
|
2022-03-14T04:50:58.000Z
|
propnet/web/tests/test_web.py
|
nile0316/propnet
|
3e1f1476c70a878c6eb43587c328d108b0e2a410
|
[
"BSD-3-Clause-LBNL"
] | 26
|
2017-10-29T21:34:22.000Z
|
2022-01-12T05:59:12.000Z
|
import unittest
import json
from propnet.core.graph import Graph
from propnet.core.registry import Registry
from propnet.models import add_builtin_models_to_registry
import os
no_store_file = os.environ.get('PROPNET_STORE_FILE') is None
if not no_store_file:
from propnet.web.app import app, symbol_layout, model_layout
from propnet.web.utils import graph_conversion
routes = [
'/'
]
@unittest.skipIf(no_store_file,
"No data store provided. Skipping web tests.")
class WebTest(unittest.TestCase):
"""
Base class for dash unittests
"""
@classmethod
def setUpClass(cls):
Registry.clear_all_registries()
add_builtin_models_to_registry()
def setUp(self):
self.app = app
self.client = self.app.server.test_client()
def test_home(self):
home = self.client.get('/')
self.assertEqual(home.status_code, 200)
def test_models(self):
models = self.client.get('/models')
self.assertEqual(models.status_code, 200)
def test_property(self):
properties = self.client.get('/property')
self.assertEqual(properties.status_code, 200)
def test_symbol_layout(self):
layout = symbol_layout("applied_stress")
self.assertEqual(layout.children[0], "Applied stress")
# test symbol layout for single node
layout = symbol_layout("grain_diameter")
self.assertTrue(layout.children[0], "Average grain diameter")
def test_model_layout(self):
layout = model_layout("density_relations")
self.assertTrue(layout.children[0], "Atomic Density")
def test_graph_conversion(self):
graph = Graph()
converted = graph_conversion(graph.get_networkx_graph())
serialized = json.dumps(converted)
self.assertIsNotNone(serialized)
# Ensure that there are both nodes and proper edges
self.assertIn('Band gap', [n['data']['label']
for n in converted if n['group'] == 'nodes'])
self.assertIn({'source': 'band_gap', "target": "Is Metallic"},
[n['data'] for n in converted if n['group'] == 'edges'])
def tearDown(self):
pass
if __name__ == "__main__":
unittest.main()
| 30.648649
| 80
| 0.655203
|
a20938a460ebf408c0d2952a2375a4621b6f836d
| 6,848
|
py
|
Python
|
mxtools/governor.py
|
mskinner5278/mxtools
|
4dda89890ffe669778354b9752cc47c4fac5f4c4
|
[
"BSD-3-Clause"
] | null | null | null |
mxtools/governor.py
|
mskinner5278/mxtools
|
4dda89890ffe669778354b9752cc47c4fac5f4c4
|
[
"BSD-3-Clause"
] | 4
|
2021-11-24T19:26:30.000Z
|
2022-03-21T20:52:52.000Z
|
mxtools/governor.py
|
mskinner5278/mxtools
|
4dda89890ffe669778354b9752cc47c4fac5f4c4
|
[
"BSD-3-Clause"
] | 3
|
2021-11-18T18:10:34.000Z
|
2021-11-23T18:18:27.000Z
|
"""
Rationale:
The Governor is a dynamic beast. The PVs it exposes depend
on the configuration files given to it. Therefore, it would be
hopelessly tedious to manually keep a Governor ophyd object in
sync with the Governor IOC. There are at least two solutions to
this issue:
1. Have the IOC auto-generate Governor ohpyd objects
This would require the IOC to produce a Python file (perhaps via
Jinja) and then to, somehow, synchronize this file with the rest
of the startup files for the beamline.
2. Have the ophyd object be dynamically generated at startup time
No modifications to the IOC would be required, as long as the IOC
exports enough metadata. The downside of this approach is that
there is some encapsulation leakage (direct cagets) and there's
a risk that the Governor IOC won't be up when this function runs.
We take approach #2 here, with the hope that the benefits will
outweight the drawbacks.
NOTE: given that the Governor ophyd object is created dynamically,
the Governor IOC *must be running* when this file runs.
The overall available auto-generated API will be as follows
(all leafs are EpicsSignals):
govs = _make_governors('XF:19IDC-ES', name='govs')
#
# Global Governor control
#
# Controls whether any Governor is active or not:
govs.sel.active
# Selects which Governor to use ("Human", "Robot"):
govs.sel.config
# Alias for the Robot configuration
gov_rbt = govs.gov.Robot
#
# Meta-data
#
# Current state
gov_rbt.state
# All existing states
gov_rbt.states
# All existing devices
gov_rbt.devices
# All currently reachable states
gov_rbt.reachable
# All targets of the device "bsy"
gov_rbt.dev.bsy.targets
#
# Per-device configuration
#
# Position for target "Down" of device "bsy"
gov_rbt.dev.bsy.target_Down
# Low limit of device "bsy" when at state "SE"
gov_rbt.dev.bsy.at_SE.low
# Pos for high limit of device "bsy" when at state "SE":
gov_rbt.dev.bsy.at_SE.high
#
# Changing state
#
# Attempt to move the Governor to the SE state
# (behaves as a positioner)
RE(bps.abs_set(gov_rbt, 'SE', wait=True))
"""
from typing import Dict, List
from ophyd import Component as Cpt
from ophyd import Device
from ophyd import DynamicDeviceComponent as DDCpt
from ophyd import EpicsSignal, EpicsSignalRO, PVPositionerPC, get_cl
class GovernorPositioner(PVPositionerPC):
"""Mixin to control the Governor state as a positioner"""
setpoint = Cpt(EpicsSignal, "}Cmd:Go-Cmd")
readback = Cpt(EpicsSignalRO, "}Sts:State-I")
done = Cpt(EpicsSignalRO, "}Sts:Busy-Sts")
done_value = 0
class GovernorMeta(Device):
"""Mixin to expose metadata for the Governor"""
# Metadata
# Current state: str
state = Cpt(EpicsSignalRO, "}Sts:State-I")
# All available states: List[str]
states = Cpt(EpicsSignalRO, "}Sts:States-I")
# States that are reachable from current state: List[str]
reachable = Cpt(EpicsSignalRO, "}Sts:Reach-I")
# All existing "devices": List[str]
devices = Cpt(EpicsSignalRO, "}Sts:Devs-I")
class GovernorDriver(Device):
# Active: enum ["Inactive", "Active"]
# controls whether any Governor can make any changes.
# When moving the robot, it is useful to set
# active = "Inactive" beforehand to prevent the
# goniometer from moving and causing a crash.
active = Cpt(EpicsSignal, "Active-Sel")
# Config: enum with available governors
# (typically ["Human", "Robot"], but depends on the
# configuration file)
# Select which Governor to use.
config = Cpt(EpicsSignal, "Config-Sel", string=True)
class GovernorDeviceLimits(Device):
low = Cpt(EpicsSignal, "LLim-Pos")
high = Cpt(EpicsSignal, "HLim-Pos")
def _make_governor_device(targets: List[str], states: List[str]) -> type:
"""Returns a dynamically created class that represents a
Governor device, with its existing targets and limits."""
targets_attr = [("targets", Cpt(EpicsSignal, "Sts:Tgts-I"))]
# Targets of a device. A target is a named position.
# Example PV: XF:19IDC-ES{Gov:Robot-Dev:cxy}Pos:Near-Pos
# Target named "Near" for the cxy device.
target_attrs = [(f"target_{target}", Cpt(EpicsSignal, f"Pos:{target}-Pos")) for target in targets]
# Limits of a device for each state.
# Example PVs: XF:19IDC-ES{Gov:Robot-Dev:cxy}SA:LLim-Pos
# XF:19IDC-ES{Gov:Robot-Dev:cxy}SA:HLim-Pos
# Low and High limits for the cxy device at state SA
limit_attrs = [(f"at_{state}", Cpt(GovernorDeviceLimits, f"{state}:")) for state in states]
return type("GovernorDevice", (Device,), dict(targets_attr + target_attrs + limit_attrs))
def _make_governor(prefix: str) -> type:
"""Returns a dynamically created class that represents a
single Governor configuration (example: "Robot")
"""
cl = get_cl()
# Fetch all Governor device names
devices: List[str] = cl.caget(f"{prefix}}}Sts:Devs-I")
# Fetch all Governor state names
states: List[str] = cl.caget(f"{prefix}}}Sts:States-I")
# Fetch all existing target names for each device
device_targets: Dict[str, List[str]] = {
device: cl.caget(f"{prefix}-Dev:{device}}}Sts:Tgts-I") for device in devices
}
class Governor(GovernorPositioner, GovernorMeta):
dev = DDCpt(
{
device: (
_make_governor_device(targets, states),
f"-Dev:{device}}}",
dict(),
)
for device, targets in device_targets.items()
}
)
return Governor
def _make_governors(prefix: str, name: str) -> "Governors": # noqa: F821
"""Returns a dynamically created object that represents
all available Governors, and allows switching between
them, as well as deactivating them.
"""
cl = get_cl()
gov_names: List[str] = cl.caget(f"{prefix}{{Gov}}Sts:Configs-I")
# If there is only one Governor, cl.caget will return str
# instead of a list with a single str
if isinstance(gov_names, str):
gov_names = [gov_names]
try:
gov_prefixes: List[str] = [f"{prefix}{{Gov:{name}" for name in gov_names]
except: # noqa: E722
# Iteration failed, likely there is no Governor available
gov_names = []
gov_prefixes = []
class Governors(Device):
sel = Cpt(GovernorDriver, f"{prefix}{{Gov}}")
gov = DDCpt(
{
gov_name: (_make_governor(gov_prefix), gov_prefix, dict())
for gov_name, gov_prefix in zip(gov_names, gov_prefixes)
}
)
return Governors("", name=name)
| 30.435556
| 102
| 0.65844
|
819e70addddb3290dffdeb3c906eab23eb0bcb75
| 38
|
py
|
Python
|
onelinerizer/__init__.py
|
mayl8822/onelinerizer
|
bad341f261d35e56872b4c22297a44dc6d5cfab3
|
[
"MIT"
] | 1,062
|
2015-11-18T01:04:33.000Z
|
2022-03-29T07:13:30.000Z
|
onelinerizer/__init__.py
|
mayl8822/onelinerizer
|
bad341f261d35e56872b4c22297a44dc6d5cfab3
|
[
"MIT"
] | 26
|
2015-11-17T06:58:07.000Z
|
2022-01-15T18:11:16.000Z
|
onelinerizer/__init__.py
|
mayl8822/onelinerizer
|
bad341f261d35e56872b4c22297a44dc6d5cfab3
|
[
"MIT"
] | 100
|
2015-11-17T09:01:22.000Z
|
2021-09-12T13:58:28.000Z
|
from .onelinerizer import onelinerize
| 19
| 37
| 0.868421
|
815eff50687af154b4c3cffe270bfdf8250af154
| 13,953
|
py
|
Python
|
QRSDetectorOnline.py
|
TeamCanaria/fatigue-analysis
|
cbd151b2cdac06fbc824f9942d4b453350bc6487
|
[
"MIT"
] | 1
|
2019-06-28T17:42:19.000Z
|
2019-06-28T17:42:19.000Z
|
QRSDetectorOnline.py
|
adipurapunya/ecg-qrs-detector-GPU-python
|
7c5221f63a7c21ecbaf53fa49cf83123a23d430f
|
[
"MIT"
] | null | null | null |
QRSDetectorOnline.py
|
adipurapunya/ecg-qrs-detector-GPU-python
|
7c5221f63a7c21ecbaf53fa49cf83123a23d430f
|
[
"MIT"
] | 1
|
2019-11-26T08:10:36.000Z
|
2019-11-26T08:10:36.000Z
|
import serial
import numpy as np
from collections import deque
from time import gmtime, strftime
from scipy.signal import butter, lfilter
LOG_DIR = "logs/"
class QRSDetectorOnline(object):
"""
Python Online ECG QRS Detector based on the Pan-Tomkins algorithm.
Michał Sznajder (Jagiellonian University) - technical contact (msznajder@gmail.com)
Marta Łukowska (Jagiellonian University)
The module is online Python implementation of QRS complex detection in the ECG signal based
on the Pan-Tomkins algorithm: Pan J, Tompkins W.J., A real-time QRS detection algorithm,
IEEE Transactions on Biomedical Engineering, Vol. BME-32, No. 3, March 1985, pp. 230-236.
The QRS complex corresponds to the depolarization of the right and left ventricles of the human heart. It is the most visually obvious part of the ECG signal. QRS complex detection is essential for time-domain ECG signal analyses, namely heart rate variability. It makes it possible to compute inter-beat interval (RR interval) values that correspond to the time between two consecutive R peaks. Thus, a QRS complex detector is an ECG-based heart contraction detector.
Online version detects QRS complexes in a real-time acquired ECG signal. Therefore, it requires an ECG device to be plugged in and receiving a signal in real-time.
This implementation of a QRS Complex Detector is by no means a certified medical tool and should not be used in health monitoring. It was created and used for experimental purposes in psychophysiology and psychology.
You can find more information in module documentation:
https://github.com/c-labpl/qrs_detector
If you use these modules in a research project, please consider citing it:
https://zenodo.org/record/583770
If you use these modules in any other project, please refer to MIT open-source license.
MIT License
Copyright (c) 2017 Michał Sznajder, Marta Łukowska
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
def __init__(self, port, baud_rate):
"""
QRSDetector class initialisation method.
:param str port: port to which ECG device is connected
:param str baud_rate: baud rate of data received from ECG device
"""
# Configuration parameters.
self.signal_frequency = 250 # Set ECG device frequency in samples per second here.
self.number_of_samples_stored = 200 # Change proportionally when adjusting frequency (in samples).
self.possible_measurement_upper_limit = 10 # ECG device physiologically upper measurement limit.
self.filter_lowcut = 0.0
self.filter_highcut = 15.0
self.filter_order = 1
self.integration_window = 15 # Change proportionally when adjusting frequency (in samples).
self.findpeaks_limit = 0.35
self.findpeaks_spacing = 50 # Change proportionally when adjusting frequency (in samples).
self.detection_window = 40 # Change proportionally when adjusting frequency (in samples).
self.refractory_period = 120 # Change proportionally when adjusting frequency (in samples).
self.qrs_peak_filtering_factor = 0.125
self.noise_peak_filtering_factor = 0.125
self.qrs_noise_diff_weight = 0.25
# Measurements and calculated values.
self.timestamp = 0
self.measurement = 0
self.detected_qrs = 0
self.most_recent_measurements = deque([0], self.number_of_samples_stored)
self.samples_since_last_detected_qrs = 0
self.qrs_peak_value = 0.0
self.noise_peak_value = 0.0
self.threshold_value = 0.0
# Data logging.
self.log_path = "{:s}QRS_online_detector_log_{:s}.csv".format(LOG_DIR, strftime("%Y_%m_%d_%H_%M_%S", gmtime()))
# Connect to ECG device and start the detector.
self.connect_to_ecg(port=port, baud_rate=baud_rate)
"""Setting connection to ECG device methods."""
def connect_to_ecg(self, port, baud_rate):
"""
Method responsible for connecting to ECG device and starting reading ECG measurements.
:param str port: port to which ECG device is connected
:param str baud_rate: baud rate of data received from ECG device
"""
try:
serial_port = serial.Serial(port, baud_rate)
self.log_data(self.log_path, "timestamp,ecg_measurement,qrs_detected\n")
print("Connected! Starting reading ECG measurements.")
except serial.SerialException:
print("Cannot connect to provided port!")
return
while True:
raw_measurement = serial_port.readline()
self.process_measurement(raw_measurement=raw_measurement)
if self.timestamp != 0:
self.log_data(self.log_path, "{:d},{:.10f},{:d}\n".format(int(self.timestamp),
self.measurement,
self.detected_qrs))
"""ECG measurements data processing methods."""
def process_measurement(self, raw_measurement):
"""
Method responsible for parsing and initial processing of ECG measured data sample.
:param str raw_measurement: ECG most recent raw measurement in "timestamp,measurement" format
"""
raw_measurement_split = raw_measurement.decode().rstrip().split(',')
# Parsing raw ECG data - modify this part in accordance to your device data format.
if len(raw_measurement_split) != 2:
return
try:
self.detected_qrs = 0
self.timestamp = float(raw_measurement_split[0])
self.measurement = float(raw_measurement_split[1])
except ValueError:
return
# Not physiologically possible ECG measurements rejection - filtering out device measurements errors.
if self.measurement > self.possible_measurement_upper_limit:
return
# Appending measurements to deque used for rotating most recent samples for further analysis and detection.
self.most_recent_measurements.append(self.measurement)
self.detect_peaks(self.most_recent_measurements)
def detect_peaks(self, most_recent_measurements):
"""
Method responsible for extracting peaks from recently received ECG measurements through processing.
:param deque most_recent_measurements: most recent ECG measurements array
"""
# Measurements filtering - 0-15 Hz band pass filter.
filtered_ecg_measurements = self.bandpass_filter(most_recent_measurements, lowcut=self.filter_lowcut,
highcut=self.filter_highcut, signal_freq=self.signal_frequency,
filter_order=self.filter_order)
# Derivative - provides QRS slope information.
differentiated_ecg_measurements = np.ediff1d(filtered_ecg_measurements)
# Squaring - intensifies values received in derivative.
squared_ecg_measurements = differentiated_ecg_measurements ** 2
# Moving-window integration.
integrated_ecg_measurements = np.convolve(squared_ecg_measurements, np.ones(self.integration_window))
# Fiducial mark - peak detection on integrated measurements.
detected_peaks_indices = self.findpeaks(data=integrated_ecg_measurements,
limit=self.findpeaks_limit,
spacing=self.findpeaks_spacing)
detected_peaks_indices = detected_peaks_indices[
detected_peaks_indices > self.number_of_samples_stored - self.detection_window]
detected_peaks_values = integrated_ecg_measurements[detected_peaks_indices]
self.detect_qrs(detected_peaks_values=detected_peaks_values)
"""QRS detection methods."""
def detect_qrs(self, detected_peaks_values):
"""
Method responsible for classifying detected ECG measurements peaks either as noise or as QRS complex (heart beat).
:param array detected_peaks_values: detected peaks values array
"""
self.samples_since_last_detected_qrs += 1
# After a valid QRS complex detection, there is a 200 ms refractory period before next one can be detected.
if self.samples_since_last_detected_qrs > self.refractory_period:
# Check whether any peak was detected in analysed samples window.
if len(detected_peaks_values) > 0:
# Take the last one detected in analysed samples window as the most recent.
most_recent_peak_value = detected_peaks_values[-1]
# Peak must be classified either as a noise peak or a QRS peak.
# To be classified as a QRS peak it must exceed dynamically set threshold value.
if most_recent_peak_value > self.threshold_value:
self.handle_detection()
self.samples_since_last_detected_qrs = 0
# We mark QRS detection with '1' flag in 'qrs_detected' log column ('0' otherwise).
self.detected_qrs = 1
# Adjust QRS peak value used later for setting QRS-noise threshold.
self.qrs_peak_value = self.qrs_peak_filtering_factor * most_recent_peak_value + \
(1 - self.qrs_peak_filtering_factor) * self.qrs_peak_value
else:
# Adjust noise peak value used later for setting QRS-noise threshold.
self.noise_peak_value = self.noise_peak_filtering_factor * most_recent_peak_value + \
(1 - self.noise_peak_filtering_factor) * self.noise_peak_value
# Adjust QRS-noise threshold value based on previously detected QRS or noise peaks value.
self.threshold_value = self.noise_peak_value + \
self.qrs_noise_diff_weight * (self.qrs_peak_value - self.noise_peak_value)
def handle_detection(self):
"""
Method responsible for generating any kind of response for detected QRS complex.
"""
print("Pulse")
"""Tools methods."""
def log_data(self, path, data):
"""
Method responsible for logging measured ECG and detection results to a log file.
:param str path: path to a log file
:param str data: data line to log
"""
with open(path, "a") as fin:
fin.write(data)
def bandpass_filter(self, data, lowcut, highcut, signal_freq, filter_order):
"""
Method responsible for creating and applying Butterworth filter.
:param deque data: raw data
:param float lowcut: filter lowcut frequency value
:param float highcut: filter highcut frequency value
:param int signal_freq: signal frequency in samples per second (Hz)
:param int filter_order: filter order
:return array: filtered data
"""
"""Constructs signal filter and uses it to given data set."""
nyquist_freq = 0.5 * signal_freq
low = lowcut / nyquist_freq
high = highcut / nyquist_freq
b, a = butter(filter_order, [low, high], btype="band")
y = lfilter(b, a, data)
return y
def findpeaks(self, data, spacing=1, limit=None):
"""
Janko Slavic peak detection algorithm and implementation.
https://github.com/jankoslavic/py-tools/tree/master/findpeaks
Finds peaks in `data` which are of `spacing` width and >=`limit`.
:param ndarray data: data
:param float spacing: minimum spacing to the next peak (should be 1 or more)
:param float limit: peaks should have value greater or equal
:return array: detected peaks indexes array
"""
len = data.size
x = np.zeros(len + 2 * spacing)
x[:spacing] = data[0] - 1.e-6
x[-spacing:] = data[-1] - 1.e-6
x[spacing:spacing + len] = data
peak_candidate = np.zeros(len)
peak_candidate[:] = True
for s in range(spacing):
start = spacing - s - 1
h_b = x[start: start + len] # before
start = spacing
h_c = x[start: start + len] # central
start = spacing + s + 1
h_a = x[start: start + len] # after
peak_candidate = np.logical_and(peak_candidate, np.logical_and(h_c > h_b, h_c > h_a))
ind = np.argwhere(peak_candidate)
ind = ind.reshape(ind.size)
if limit is not None:
ind = ind[data[ind] > limit]
return ind
if __name__ == "__main__":
qrs_detector = QRSDetectorOnline(port="/dev/cu.usbmodem14311", baud_rate="115200")
| 47.459184
| 472
| 0.662582
|
3d648ffab67768efa1c2f05e13890f00651d48d6
| 3,256
|
py
|
Python
|
tests/mime/message/headers/encoding_test.py
|
plq/flanker
|
d71c36eca84ff102a32d8d9c6188900828409018
|
[
"Apache-2.0"
] | null | null | null |
tests/mime/message/headers/encoding_test.py
|
plq/flanker
|
d71c36eca84ff102a32d8d9c6188900828409018
|
[
"Apache-2.0"
] | null | null | null |
tests/mime/message/headers/encoding_test.py
|
plq/flanker
|
d71c36eca84ff102a32d8d9c6188900828409018
|
[
"Apache-2.0"
] | null | null | null |
# coding:utf-8
from email.header import Header
from nose.tools import eq_, ok_
from mock import patch, Mock
from flanker.mime.message import headers
from flanker.mime.message.headers.encoding import (_encode_unstructured,
encode_string)
from flanker.mime.message import part
from flanker.mime import create
from tests import LONG_HEADER, ENCODED_HEADER
def encodings_test():
s = (u"Это сообщение с длинным сабжектом "
u"специально чтобы проверить кодировки")
eq_(s, headers.mime_to_unicode(headers.to_mime('Subject', s)))
s = "this is sample ascii string"
eq_(s, headers.to_mime('Subject',s))
eq_(s, headers.mime_to_unicode(s))
s = ("This is a long subject with commas, bob, Jay, suzy, tom, over"
" 75,250,234 times!")
folded_s = ("This is a long subject with commas, bob, Jay, suzy, tom, over"
"\n 75,250,234 times!")
eq_(folded_s, headers.to_mime('Subject', s))
def encode_address_test():
eq_('john.smith@example.com', headers.to_mime('To', 'john.smith@example.com'))
eq_('"John Smith" <john.smith@example.com>', headers.to_mime('To', '"John Smith" <john.smith@example.com>'))
eq_('Федот <стрелец@письмо.рф>', headers.to_mime('To', 'Федот <стрелец@письмо.рф>'))
eq_('=?utf-8?b?0KTQtdC00L7Rgg==?= <foo@xn--h1aigbl0e.xn--p1ai>', headers.to_mime('To', 'Федот <foo@письмо.рф>'))
def string_maxlinelen_test():
"""
If the encoded string is longer then the maximum line length, which is 76,
by default then it is broken down into lines. But a maximum line length
value can be provided in the `maxlinelen` parameter.
"""
eq_("very\n loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong",
encode_string(None, "very loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong"))
eq_("very loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong",
encode_string(None, "very loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong", maxlinelen=78))
@patch.object(part.MimePart, 'was_changed', Mock(return_value=True))
def max_header_length_test():
message = create.from_string(LONG_HEADER)
# this used to fail because exceeded max depth recursion
message.to_string()
ascii_subject = "This is simple ascii subject"
eq_(Header(ascii_subject.encode("ascii"), "ascii", header_name="Subject"),
_encode_unstructured("Subject", ascii_subject))
unicode_subject = (u"Это сообщение с длинным сабжектом "
u"специально чтобы проверить кодировки")
eq_(Header(unicode_subject.encode("utf-8"), "utf-8", header_name="Subject"),
_encode_unstructured("Subject", unicode_subject))
def add_header_preserve_original_encoding_test():
message = create.from_string(ENCODED_HEADER)
# save original encoded from header
original_from = message.headers.getraw('from')
# check if the raw header was not decoded
ok_('=?UTF-8?B?Rm9vLCBCYXI=?=' in original_from)
# add a header
message.headers.add('foo', 'bar')
# check original encoded header is still in the mime string
ok_(original_from in message.to_string())
| 38.305882
| 125
| 0.708845
|
efe83e0dd7203eca1c9154458900e09417a2739d
| 472
|
py
|
Python
|
conan_build_helper/headeronly.py
|
blockspacer/conan_build_helper
|
55dfafa6d20079ff13dbc45ccee4f104f53c9632
|
[
"MIT"
] | 1
|
2022-02-08T20:15:01.000Z
|
2022-02-08T20:15:01.000Z
|
conan_build_helper/headeronly.py
|
blockspacer/conan_build_helper
|
55dfafa6d20079ff13dbc45ccee4f104f53c9632
|
[
"MIT"
] | null | null | null |
conan_build_helper/headeronly.py
|
blockspacer/conan_build_helper
|
55dfafa6d20079ff13dbc45ccee4f104f53c9632
|
[
"MIT"
] | 1
|
2020-05-19T01:03:20.000Z
|
2020-05-19T01:03:20.000Z
|
from conans import ConanFile
from conan_build_helper.require_scm import RequireScm
import os
def package_headers(conanfile):
include_dir = conanfile._repository_include_dir_required
for ext in ['*.h', '*.hpp', '*.hxx', '*.hcc']:
conanfile.copy(ext, dst='include', src=include_dir)
class HeaderOnlyPackage(ConanFile, RequireScm):
def package(self):
package_headers(self)
def package_id(self):
self.info.header_only()
| 27.764706
| 64
| 0.697034
|
6998193cfd5da0fc5412702d9f4125e021f01004
| 1,080
|
py
|
Python
|
KalmanFilter.py
|
davidliyutong/VideoAnalyzer
|
5cf3d589facab559269394fb65f162972b977d65
|
[
"MIT"
] | null | null | null |
KalmanFilter.py
|
davidliyutong/VideoAnalyzer
|
5cf3d589facab559269394fb65f162972b977d65
|
[
"MIT"
] | null | null | null |
KalmanFilter.py
|
davidliyutong/VideoAnalyzer
|
5cf3d589facab559269394fb65f162972b977d65
|
[
"MIT"
] | null | null | null |
class KalmanFilter:
def __init__(self, Q=0.001, R=0.01):
self.Q = Q
self.R = R
self.P_k_k1 = 1
self.Kg = 0
self.P_k1_k1 = 1
self.x_k_k1 = 0
self.ADC_OLD_Value = 0
self.Z_k = 0
self.kalman_adc_old = 0
def kalman(self, ADC_Value):
self.Z_k = ADC_Value
if (abs(self.kalman_adc_old - ADC_Value) >= 60):
self.x_k1_k1 = ADC_Value * 0.382 + self.kalman_adc_old * 0.618
else:
self.x_k1_k1 = self.kalman_adc_old;
self.x_k_k1 = self.x_k1_k1
self.P_k_k1 = self.P_k1_k1 + self.Q
self.Kg = self.P_k_k1 / (self.P_k_k1 + self.R)
kalman_adc = self.x_k_k1 + self.Kg * (self.Z_k - self.kalman_adc_old)
self.P_k1_k1 = (1 - self.Kg) * self.P_k_k1
self.P_k_k1 = self.P_k1_k1
self.kalman_adc_old = kalman_adc
return kalman_adc
def filter(self, ADC_Value_list):
adc = []
for i in range(len(ADC_Value_list)):
adc.append(self.kalman(ADC_Value_list[i]))
return adc
| 27
| 77
| 0.567593
|
2346e04d04da6ed47b6bb8995ea3ed2b9d389c20
| 504
|
py
|
Python
|
normalizer.py
|
somiljain7/unsupervised-learning
|
2d698e5ba25ede8e43e47dfe3352db1f2d978278
|
[
"Apache-2.0"
] | null | null | null |
normalizer.py
|
somiljain7/unsupervised-learning
|
2d698e5ba25ede8e43e47dfe3352db1f2d978278
|
[
"Apache-2.0"
] | null | null | null |
normalizer.py
|
somiljain7/unsupervised-learning
|
2d698e5ba25ede8e43e47dfe3352db1f2d978278
|
[
"Apache-2.0"
] | null | null | null |
# Import Normalizer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from sklearn.preprocessing import Normalizer
# Create a normalizer: normalizer
normalizer = Normalizer()
# Create a KMeans model with 10 clusters: kmeans
kmeans = KMeans(n_clusters=10)
# Make a pipeline chaining normalizer and kmeans: pipeline
pipeline = make_pipeline(normalizer, kmeans)
# Fit pipeline to the daily price movements
pipeline.fit(movements)
| 36
| 58
| 0.827381
|
5e7e5d051fdcf60d3ab8dc486a4f29b58b181513
| 1,001
|
py
|
Python
|
pyopenproject/business/services/command/version/find_projects.py
|
webu/pyopenproject
|
40b2cb9fe0fa3f89bc0fe2a3be323422d9ecf966
|
[
"MIT"
] | 5
|
2021-02-25T15:54:28.000Z
|
2021-04-22T15:43:36.000Z
|
pyopenproject/business/services/command/version/find_projects.py
|
webu/pyopenproject
|
40b2cb9fe0fa3f89bc0fe2a3be323422d9ecf966
|
[
"MIT"
] | 7
|
2021-03-15T16:26:23.000Z
|
2022-03-16T13:45:18.000Z
|
pyopenproject/business/services/command/version/find_projects.py
|
webu/pyopenproject
|
40b2cb9fe0fa3f89bc0fe2a3be323422d9ecf966
|
[
"MIT"
] | 6
|
2021-06-18T18:59:11.000Z
|
2022-03-27T04:58:52.000Z
|
from pyopenproject.api_connection.exceptions.request_exception import RequestError
from pyopenproject.api_connection.requests.get_request import GetRequest
from pyopenproject.business.exception.business_error import BusinessError
from pyopenproject.business.services.command.find_list_command import FindListCommand
from pyopenproject.business.services.command.version.version_command import VersionCommand
from pyopenproject.model.project import Project
class FindProjects(VersionCommand):
def __init__(self, connection):
super().__init__(connection)
def execute(self):
try:
request = GetRequest(self.connection, f"{self.CONTEXT}/available_projects")
return FindListCommand(self.connection, request, Project).execute()
# for tEntry in json_obj["_embedded"]["elements"]:
# yield p.Project(tEntry)
except RequestError as re:
raise BusinessError("Error finding projects available for versions") from re
| 45.5
| 90
| 0.764236
|
ee680b9c4a7b300f726473268b415ef16807ef4a
| 13,460
|
py
|
Python
|
mmdet/models/detectors/base.py
|
JulioZhao97/mmdetection-LOGODet
|
4e06357c1e88c74e0f3aafc30f9a503e2833f1a8
|
[
"Apache-2.0"
] | null | null | null |
mmdet/models/detectors/base.py
|
JulioZhao97/mmdetection-LOGODet
|
4e06357c1e88c74e0f3aafc30f9a503e2833f1a8
|
[
"Apache-2.0"
] | null | null | null |
mmdet/models/detectors/base.py
|
JulioZhao97/mmdetection-LOGODet
|
4e06357c1e88c74e0f3aafc30f9a503e2833f1a8
|
[
"Apache-2.0"
] | null | null | null |
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
import mmcv
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
from mmcv.runner import auto_fp16
from mmcv.utils import print_log
from mmdet.utils import get_root_logger
class BaseDetector(nn.Module, metaclass=ABCMeta):
"""Base class for detectors."""
def __init__(self):
super(BaseDetector, self).__init__()
self.fp16_enabled = False
@property
def with_neck(self):
"""bool: whether the detector has a neck"""
return hasattr(self, 'neck') and self.neck is not None
# TODO: these properties need to be carefully handled
# for both single stage & two stage detectors
@property
def with_shared_head(self):
"""bool: whether the detector has a shared head in the RoI Head"""
return hasattr(self, 'roi_head') and self.roi_head.with_shared_head
@property
def with_bbox(self):
"""bool: whether the detector has a bbox head"""
return ((hasattr(self, 'roi_head') and self.roi_head.with_bbox)
or (hasattr(self, 'bbox_head') and self.bbox_head is not None))
@property
def with_mask(self):
"""bool: whether the detector has a mask head"""
return ((hasattr(self, 'roi_head') and self.roi_head.with_mask)
or (hasattr(self, 'mask_head') and self.mask_head is not None))
@abstractmethod
def extract_feat(self, imgs):
"""Extract features from images."""
pass
def extract_feats(self, imgs):
"""Extract features from multiple images.
Args:
imgs (list[torch.Tensor]): A list of images. The images are
augmented from the same image but in different ways.
Returns:
list[torch.Tensor]: Features of different images
"""
assert isinstance(imgs, list)
return [self.extract_feat(img) for img in imgs]
@abstractmethod
def forward_train(self, imgs, img_metas, **kwargs):
"""
Args:
img (list[Tensor]): List of tensors of shape (1, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys, see
:class:`mmdet.datasets.pipelines.Collect`.
kwargs (keyword arguments): Specific to concrete implementation.
"""
pass
async def async_simple_test(self, img, img_metas, **kwargs):
raise NotImplementedError
@abstractmethod
def simple_test(self, img, img_metas, **kwargs):
pass
@abstractmethod
def aug_test(self, imgs, img_metas, **kwargs):
"""Test function with test time augmentation."""
pass
def init_weights(self, pretrained=None):
"""Initialize the weights in detector.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
if pretrained is not None:
logger = get_root_logger()
print_log(f'load model from: {pretrained}', logger=logger)
async def aforward_test(self, *, img, img_metas, **kwargs):
for var, name in [(img, 'img'), (img_metas, 'img_metas')]:
if not isinstance(var, list):
raise TypeError(f'{name} must be a list, but got {type(var)}')
num_augs = len(img)
if num_augs != len(img_metas):
raise ValueError(f'num of augmentations ({len(img)}) '
f'!= num of image metas ({len(img_metas)})')
# TODO: remove the restriction of samples_per_gpu == 1 when prepared
samples_per_gpu = img[0].size(0)
assert samples_per_gpu == 1
if num_augs == 1:
return await self.async_simple_test(img[0], img_metas[0], **kwargs)
else:
raise NotImplementedError
# cpp
def forward_test(self, imgs, img_metas, **kwargs):
"""
Args:
imgs (List[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains all images in the batch.
img_metas (List[List[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch.
"""
for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
if not isinstance(var, list):
raise TypeError(f'{name} must be a list, but got {type(var)}')
num_augs = len(imgs)
if num_augs != len(img_metas):
raise ValueError(f'num of augmentations ({len(imgs)}) '
f'!= num of image meta ({len(img_metas)})')
if num_augs == 1:
# proposals (List[List[Tensor]]): the outer list indicates
# test-time augs (multiscale, flip, etc.) and the inner list
# indicates images in a batch.
# The Tensor should have a shape Px4, where P is the number of
# proposals.
if 'proposals' in kwargs:
kwargs['proposals'] = kwargs['proposals'][0]
return self.simple_test(imgs[0], img_metas[0], **kwargs)
else:
assert imgs[0].size(0) == 1, 'aug test does not support ' \
'inference with batch size ' \
f'{imgs[0].size(0)}'
# TODO: support test augmentation for predefined proposals
assert 'proposals' not in kwargs
return self.aug_test(imgs, img_metas, **kwargs)
'''
def forward_test(self, imgs):
return self.simple_test(imgs)
'''
@auto_fp16(apply_to=('img', ))
def forward(self, img, img_metas, return_loss=True, **kwargs):
"""Calls either :func:`forward_train` or :func:`forward_test` depending
on whether ``return_loss`` is ``True``.
Note this setting will change the expected inputs. When
``return_loss=True``, img and img_meta are single-nested (i.e. Tensor
and List[dict]), and when ``resturn_loss=False``, img and img_meta
should be double nested (i.e. List[Tensor], List[List[dict]]), with
the outer list indicating test time augmentations.
"""
if return_loss:
return self.forward_train(img, img_metas, **kwargs)
else:
return self.forward_test(img, img_metas, **kwargs)
'''
def forward(self, img):
return self.forward_test(img)
'''
def _parse_losses(self, losses):
"""Parse the raw outputs (losses) of the network.
Args:
losses (dict): Raw output of the network, which usually contain
losses and other necessary infomation.
Returns:
tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor \
which may be a weighted sum of all losses, log_vars contains \
all the variables to be sent to the logger.
"""
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
else:
raise TypeError(
f'{loss_name} is not a tensor or list of tensors')
loss = sum(_value for _key, _value in log_vars.items()
if 'loss' in _key)
log_vars['loss'] = loss
for loss_name, loss_value in log_vars.items():
# reduce loss when distributed training
if dist.is_available() and dist.is_initialized():
loss_value = loss_value.data.clone()
dist.all_reduce(loss_value.div_(dist.get_world_size()))
log_vars[loss_name] = loss_value.item()
return loss, log_vars
def train_step(self, data, optimizer):
"""The iteration step during training.
This method defines an iteration step during training, except for the
back propagation and optimizer updating, which are done in an optimizer
hook. Note that in some complicated cases or models, the whole process
including back propagation and optimizer updating is also defined in
this method, such as GAN.
Args:
data (dict): The output of dataloader.
optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
runner is passed to ``train_step()``. This argument is unused
and reserved.
Returns:
dict: It should contain at least 3 keys: ``loss``, ``log_vars``, \
``num_samples``.
- ``loss`` is a tensor for back propagation, which can be a \
weighted sum of multiple losses.
- ``log_vars`` contains all the variables to be sent to the
logger.
- ``num_samples`` indicates the batch size (when the model is \
DDP, it means the batch size on each GPU), which is used for \
averaging the logs.
"""
losses = self(**data)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss, log_vars=log_vars, num_samples=len(data['img_metas']))
return outputs
def val_step(self, data, optimizer):
"""The iteration step during validation.
This method shares the same signature as :func:`train_step`, but used
during val epochs. Note that the evaluation after training epochs is
not implemented with this method, but an evaluation hook.
"""
losses = self(**data)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss, log_vars=log_vars, num_samples=len(data['img_metas']))
return outputs
def show_result(self,
img,
result,
score_thr=0.3,
bbox_color='green',
text_color='green',
thickness=1,
font_scale=0.5,
win_name='',
show=False,
wait_time=0,
out_file=None):
"""Draw `result` over `img`.
Args:
img (str or Tensor): The image to be displayed.
result (Tensor or tuple): The results to draw over `img`
bbox_result or (bbox_result, segm_result).
score_thr (float, optional): Minimum score of bboxes to be shown.
Default: 0.3.
bbox_color (str or tuple or :obj:`Color`): Color of bbox lines.
text_color (str or tuple or :obj:`Color`): Color of texts.
thickness (int): Thickness of lines.
font_scale (float): Font scales of texts.
win_name (str): The window name.
wait_time (int): Value of waitKey param.
Default: 0.
show (bool): Whether to show the image.
Default: False.
out_file (str or None): The filename to write the image.
Default: None.
Returns:
img (Tensor): Only if not `show` or `out_file`
"""
img = mmcv.imread(img)
img = img.copy()
if isinstance(result, tuple):
bbox_result, segm_result = result
if isinstance(segm_result, tuple):
segm_result = segm_result[0] # ms rcnn
else:
bbox_result, segm_result = result, None
bboxes = np.vstack(bbox_result)
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
# draw segmentation masks
if segm_result is not None and len(labels) > 0: # non empty
segms = mmcv.concat_list(segm_result)
inds = np.where(bboxes[:, -1] > score_thr)[0]
np.random.seed(42)
color_masks = [
np.random.randint(0, 256, (1, 3), dtype=np.uint8)
for _ in range(max(labels) + 1)
]
for i in inds:
i = int(i)
color_mask = color_masks[labels[i]]
mask = segms[i].astype(bool)
img[mask] = img[mask] * 0.5 + color_mask * 0.5
# if out_file specified, do not show image in window
if out_file is not None:
show = False
# draw bounding boxes
mmcv.imshow_det_bboxes(
img,
bboxes,
labels,
class_names=self.CLASSES,
score_thr=score_thr,
bbox_color=bbox_color,
text_color=text_color,
thickness=thickness,
font_scale=font_scale,
win_name=win_name,
show=show,
wait_time=wait_time,
out_file=out_file)
if not (show or out_file):
return img
| 38.130312
| 79
| 0.572214
|
0d0f1077d4ad6a93998a2d687b0f777a78e7f649
| 775
|
py
|
Python
|
marker/tests/test_filter.py
|
wang-ye/marker
|
3e2d2005c2ae355084a7ad44a55cb59b7e151039
|
[
"MIT"
] | null | null | null |
marker/tests/test_filter.py
|
wang-ye/marker
|
3e2d2005c2ae355084a7ad44a55cb59b7e151039
|
[
"MIT"
] | null | null | null |
marker/tests/test_filter.py
|
wang-ye/marker
|
3e2d2005c2ae355084a7ad44a55cb59b7e151039
|
[
"MIT"
] | null | null | null |
from marker.filter import filter_commands
from marker.command import Command
import unittest
class FilterTest(unittest.TestCase):
def setUp(self):
cmd1 = Command('marker_cmd', alias='marker', description='test mark commands')
cmd2 = Command('painter_cmd', alias='painter', description='mark commands')
cmd3 = Command('drawer_cmd', alias='drawer', description='markdrawer commands')
self.cmds = [cmd1, cmd2, cmd3]
def test_filter_commands_alias(self):
assert filter_commands(self.cmds, 'paint') == [self.cmds[1]]
def test_filter_with_desp(self):
assert filter_commands(self.cmds, 'markdrawer') == [self.cmds[2]]
def test_filter_empty_result(self):
assert filter_commands(self.cmds, 'print') == []
| 33.695652
| 87
| 0.694194
|
34d2dfc116d395a36989d9cc3fba9f72ac7996a5
| 5,594
|
py
|
Python
|
mars/oscar/backends/ray/utils.py
|
wjsi/mars
|
a69fb19edfe748d4393b90ff2c4941a76c084596
|
[
"Apache-2.0"
] | 1
|
2022-02-02T03:03:48.000Z
|
2022-02-02T03:03:48.000Z
|
mars/oscar/backends/ray/utils.py
|
wjsi/mars
|
a69fb19edfe748d4393b90ff2c4941a76c084596
|
[
"Apache-2.0"
] | null | null | null |
mars/oscar/backends/ray/utils.py
|
wjsi/mars
|
a69fb19edfe748d4393b90ff2c4941a76c084596
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import asyncio
import logging
import posixpath
from urllib.parse import urlparse, unquote
from ....utils import lazy_import
ray = lazy_import("ray")
logger = logging.getLogger(__name__)
def get_placement_group(pg_name): # pragma: no cover
if hasattr(ray.util, "get_placement_group"):
return ray.util.get_placement_group(pg_name)
else:
logger.warning(
"Current installed ray version doesn't support named placement group. "
"Actor will be created on arbitrary node randomly."
)
return None
def process_address_to_placement(address):
"""
Parameters
----------
address: str
The address of an actor pool which running in a ray actor. It's also
the name of the ray actor. address ex: ray://${pg_name}/${bundle_index}/${process_index}
Returns
-------
tuple
A tuple consisting of placement group name, bundle index, process index.
"""
name, parts = _address_to_placement(address)
if not parts or len(parts) != 2:
raise ValueError(
f"Only bundle index and process index path are allowed in ray "
f"address {address} but got {parts}."
)
bundle_index, process_index = parts
return name, int(bundle_index), int(process_index)
def node_address_to_placement(address):
"""
Parameters
----------
address : str
The address of a node. ex: ray://${pg_name}/${bundle_index}
Returns
-------
tuple
A tuple consisting of placement group name, bundle index.
"""
name, parts = _address_to_placement(address)
if not parts or len(parts) != 1:
raise ValueError(
f"Only bundle index path is allowed in ray address {address} but got {parts}"
)
bundle_index = parts[0]
return name, int(bundle_index)
def _address_to_placement(address):
"""
Parameters
----------
address : str
The address of a node or an actor pool which running in a ray actor.
Returns
-------
tuple
A tuple consisting of placement group name, bundle index, process index.
"""
parsed_url = urlparse(unquote(address))
if parsed_url.scheme != "ray":
raise ValueError(f"The address scheme is not ray: {address}")
# os.path.split will not handle backslashes (\) correctly,
# so we use the posixpath.
parts = []
if parsed_url.netloc:
tmp = parsed_url.path
while tmp and tmp != "/":
tmp2, item = posixpath.split(tmp)
parts.append(item)
if tmp2 != tmp:
tmp = tmp2
else:
parts.append(tmp2)
break
parts = list(reversed(parts))
return parsed_url.netloc, parts
def process_placement_to_address(
pg_name: str, bundle_index: int, process_index: int = 0
):
return f"ray://{pg_name}/{bundle_index}/{process_index}"
def node_placement_to_address(pg_name, bundle_index):
return f"ray://{pg_name}/{bundle_index}"
def addresses_to_placement_group_info(address_to_resources):
bundles = {}
pg_name = None
for address, bundle_resources in address_to_resources.items():
name, bundle_index = node_address_to_placement(address)
if pg_name is None:
pg_name = name
else:
if name != pg_name:
raise ValueError(
"All addresses should have consistent placement group names."
)
bundles[bundle_index] = bundle_resources
sorted_bundle_keys = sorted(bundles.keys())
if sorted_bundle_keys != list(range(len(address_to_resources))):
raise ValueError("The addresses contains invalid bundle.")
bundles = [bundles[k] for k in sorted_bundle_keys]
if not pg_name:
raise ValueError("Can't find a valid placement group name.")
return pg_name, bundles
def placement_group_info_to_addresses(pg_name, bundles):
addresses = {}
for bundle_index, bundle_resources in enumerate(bundles):
address = node_placement_to_address(pg_name, bundle_index)
addresses[address] = bundle_resources
return addresses
async def kill_and_wait(
actor_handle: "ray.actor.ActorHandle", no_restart=False, timeout: float = 30
):
if "COV_CORE_SOURCE" in os.environ: # pragma: no cover
try:
# must clean up first, or coverage info lost
await actor_handle.cleanup.remote()
except: # noqa: E722 # nosec # pylint: disable=bare-except
pass
r = actor_handle.wait.remote(timeout)
ray.kill(actor_handle, no_restart=no_restart)
ready, _ = await asyncio.wait([r], timeout=timeout)
if ready:
try:
await r
except ray.exceptions.RayActorError:
return # We expect a RayActorError, it indicated that the actor is died.
raise Exception(
f"The actor {actor_handle} is not died after ray.kill {timeout} seconds."
)
| 31.784091
| 96
| 0.653736
|
58b1fa775dab39aa03f96ee23701e32ce6c55ac9
| 51,868
|
py
|
Python
|
djstripe/models/billing.py
|
fdintino/dj-stripe
|
6f1cdd4fc1a40cad82163b8fbc5de1f566d8d117
|
[
"MIT"
] | null | null | null |
djstripe/models/billing.py
|
fdintino/dj-stripe
|
6f1cdd4fc1a40cad82163b8fbc5de1f566d8d117
|
[
"MIT"
] | null | null | null |
djstripe/models/billing.py
|
fdintino/dj-stripe
|
6f1cdd4fc1a40cad82163b8fbc5de1f566d8d117
|
[
"MIT"
] | null | null | null |
from copy import deepcopy
import stripe
from django.db import models
from django.utils import timezone
from django.utils.text import format_lazy
from django.utils.translation import gettext_lazy as _
from stripe.error import InvalidRequestError
from .. import enums
from .. import settings as djstripe_settings
from ..fields import (
JSONField,
StripeCurrencyCodeField,
StripeDateTimeField,
StripeDecimalCurrencyAmountField,
StripeEnumField,
StripeIdField,
StripePercentField,
StripeQuantumCurrencyAmountField,
)
from ..managers import SubscriptionManager
from ..utils import QuerySetMock, get_friendly_currency_amount
from .base import StripeModel
class Coupon(StripeModel):
id = StripeIdField(max_length=500)
amount_off = StripeDecimalCurrencyAmountField(
null=True,
blank=True,
help_text="Amount (as decimal) that will be taken off the subtotal of any "
"invoices for this customer.",
)
currency = StripeCurrencyCodeField(null=True, blank=True)
duration = StripeEnumField(
enum=enums.CouponDuration,
help_text=(
"Describes how long a customer who applies this coupon "
"will get the discount."
),
)
duration_in_months = models.PositiveIntegerField(
null=True,
blank=True,
help_text="If `duration` is `repeating`, the number of months "
"the coupon applies.",
)
max_redemptions = models.PositiveIntegerField(
null=True,
blank=True,
help_text="Maximum number of times this coupon can be redeemed, in total, "
"before it is no longer valid.",
)
name = models.TextField(
max_length=5000,
default="",
blank=True,
help_text=(
"Name of the coupon displayed to customers on for instance invoices "
"or receipts."
),
)
percent_off = StripePercentField(
null=True,
blank=True,
help_text=(
"Percent that will be taken off the subtotal of any invoices for "
"this customer for the duration of the coupon. "
"For example, a coupon with percent_off of 50 will make a "
"$100 invoice $50 instead."
),
)
redeem_by = StripeDateTimeField(
null=True,
blank=True,
help_text="Date after which the coupon can no longer be redeemed. "
"Max 5 years in the future.",
)
times_redeemed = models.PositiveIntegerField(
editable=False,
default=0,
help_text="Number of times this coupon has been applied to a customer.",
)
# valid = models.BooleanField(editable=False)
# XXX
DURATION_FOREVER = "forever"
DURATION_ONCE = "once"
DURATION_REPEATING = "repeating"
class Meta:
unique_together = ("id", "livemode")
stripe_class = stripe.Coupon
stripe_dashboard_item_name = "coupons"
def __str__(self):
if self.name:
return self.name
return self.human_readable
@property
def human_readable_amount(self):
if self.percent_off:
amount = "{percent_off}%".format(percent_off=self.percent_off)
else:
amount = get_friendly_currency_amount(self.amount_off or 0, self.currency)
return "{amount} off".format(amount=amount)
@property
def human_readable(self):
if self.duration == self.DURATION_REPEATING:
if self.duration_in_months == 1:
duration = "for {duration_in_months} month"
else:
duration = "for {duration_in_months} months"
duration = duration.format(duration_in_months=self.duration_in_months)
else:
duration = self.duration
return "{amount} {duration}".format(
amount=self.human_readable_amount, duration=duration
)
class Invoice(StripeModel):
"""
Invoices are statements of what a customer owes for a particular billing
period, including subscriptions, invoice items, and any automatic proration
adjustments if necessary.
Once an invoice is created, payment is automatically attempted. Note that
the payment, while automatic, does not happen exactly at the time of invoice
creation. If you have configured webhooks, the invoice will wait until one
hour after the last webhook is successfully sent (or the last webhook times
out after failing).
Any customer credit on the account is applied before determining how much is
due for that invoice (the amount that will be actually charged).
If the amount due for the invoice is less than 50 cents (the minimum for a
charge), we add the amount to the customer's running account balance to be
added to the next invoice. If this amount is negative, it will act as a
credit to offset the next invoice. Note that the customer account balance
does not include unpaid invoices; it only includes balances that need to be
taken into account when calculating the amount due for the next invoice.
Stripe documentation: https://stripe.com/docs/api/python#invoices
"""
stripe_class = stripe.Invoice
stripe_dashboard_item_name = "invoices"
amount_due = StripeDecimalCurrencyAmountField(
help_text="Final amount due (as decimal) at this time for this invoice. "
"If the invoice's total is smaller than the minimum charge amount, "
"for example, or if there is account credit that can be applied to the "
"invoice, the amount_due may be 0. If there is a positive starting_balance "
"for the invoice (the customer owes money), the amount_due will also take that "
"into account. The charge that gets generated for the invoice will be for "
"the amount specified in amount_due."
)
amount_paid = StripeDecimalCurrencyAmountField(
null=True, # XXX: This is not nullable, but it's a new field
help_text="The amount, (as decimal), that was paid.",
)
amount_remaining = StripeDecimalCurrencyAmountField(
null=True, # XXX: This is not nullable, but it's a new field
help_text="The amount remaining, (as decimal), that is due.",
)
auto_advance = models.NullBooleanField(
help_text="Controls whether Stripe will perform automatic collection of the "
"invoice. When false, the invoice’s state will not automatically "
"advance without an explicit action."
)
application_fee_amount = StripeDecimalCurrencyAmountField(
null=True,
help_text="The fee (as decimal) that will be applied to the invoice and "
"transferred to the application owner's "
"Stripe account when the invoice is paid.",
)
attempt_count = models.IntegerField(
help_text="Number of payment attempts made for this invoice, "
"from the perspective of the payment retry schedule. "
"Any payment attempt counts as the first attempt, and subsequently "
"only automatic retries increment the attempt count. "
"In other words, manual payment attempts after the first attempt do not affect "
"the retry schedule."
)
attempted = models.BooleanField(
default=False,
help_text="Whether or not an attempt has been made to pay the invoice. "
"An invoice is not attempted until 1 hour after the ``invoice.created`` "
"webhook, for example, so you might not want to display that invoice as "
"unpaid to your users.",
)
billing = StripeEnumField(
enum=enums.InvoiceBilling,
null=True,
help_text=(
"When charging automatically, Stripe will attempt to pay this invoice "
"using the default source attached to the customer. "
"When sending an invoice, Stripe will email this invoice to the customer "
"with payment instructions."
),
)
charge = models.OneToOneField(
"Charge",
on_delete=models.CASCADE,
null=True,
related_name="latest_invoice",
help_text="The latest charge generated for this invoice, if any.",
)
# deprecated, will be removed in 2.2
closed = models.NullBooleanField(
default=False,
help_text="Whether or not the invoice is still trying to collect payment."
" An invoice is closed if it's either paid or it has been marked closed. "
"A closed invoice will no longer attempt to collect payment.",
)
currency = StripeCurrencyCodeField()
customer = models.ForeignKey(
"Customer",
on_delete=models.CASCADE,
related_name="invoices",
help_text="The customer associated with this invoice.",
)
# TODO: discount
due_date = StripeDateTimeField(
null=True,
help_text=(
"The date on which payment for this invoice is due. "
"This value will be null for invoices where billing=charge_automatically."
),
)
ending_balance = StripeQuantumCurrencyAmountField(
null=True,
help_text="Ending customer balance (in cents) after attempting to pay invoice. "
"If the invoice has not been attempted yet, this will be null.",
)
# deprecated, will be removed in 2.2
forgiven = models.NullBooleanField(
default=False,
help_text="Whether or not the invoice has been forgiven. "
"Forgiving an invoice instructs us to update the subscription status as "
"if the invoice were successfully paid. Once an invoice has been forgiven, "
"it cannot be unforgiven or reopened.",
)
hosted_invoice_url = models.TextField(
max_length=799,
default="",
blank=True,
help_text="The URL for the hosted invoice page, which allows customers to view "
"and pay an invoice. If the invoice has not been frozen yet, "
"this will be null.",
)
invoice_pdf = models.TextField(
max_length=799,
default="",
blank=True,
help_text=(
"The link to download the PDF for the invoice. "
"If the invoice has not been frozen yet, this will be null."
),
)
next_payment_attempt = StripeDateTimeField(
null=True, help_text="The time at which payment will next be attempted."
)
number = models.CharField(
max_length=64,
default="",
blank=True,
help_text=(
"A unique, identifying string that appears on emails sent to the customer "
"for this invoice. "
"This starts with the customer’s unique invoice_prefix if it is specified."
),
)
paid = models.BooleanField(
default=False, help_text="The time at which payment will next be attempted."
)
payment_intent = models.OneToOneField(
"PaymentIntent",
on_delete=models.CASCADE,
null=True,
help_text=(
"The PaymentIntent associated with this invoice. "
"The PaymentIntent is generated when the invoice is finalized, "
"and can then be used to pay the invoice."
"Note that voiding an invoice will cancel the PaymentIntent"
),
)
period_end = StripeDateTimeField(
help_text="End of the usage period during which invoice items were "
"added to this invoice."
)
period_start = StripeDateTimeField(
help_text="Start of the usage period during which invoice items were "
"added to this invoice."
)
receipt_number = models.CharField(
max_length=64,
null=True,
help_text=(
"This is the transaction number that appears on email receipts "
"sent for this invoice."
),
)
starting_balance = StripeQuantumCurrencyAmountField(
help_text="Starting customer balance (in cents) before attempting to pay "
"invoice. If the invoice has not been attempted yet, this will be the "
"current customer balance."
)
statement_descriptor = models.CharField(
max_length=22,
default="",
blank=True,
help_text="An arbitrary string to be displayed on your customer's "
"credit card statement. The statement description may not include <>\"' "
"characters, and will appear on your customer's statement in capital letters. "
"Non-ASCII characters are automatically stripped. "
"While most banks display this information consistently, "
"some may display it incorrectly or not at all.",
)
status_transitions = JSONField(null=True, blank=True)
subscription = models.ForeignKey(
"Subscription",
null=True,
related_name="invoices",
on_delete=models.SET_NULL,
help_text="The subscription that this invoice was prepared for, if any.",
)
subscription_proration_date = StripeDateTimeField(
null=True,
blank=True,
help_text="Only set for upcoming invoices that preview prorations. "
"The time used to calculate prorations.",
)
subtotal = StripeDecimalCurrencyAmountField(
help_text="Total (as decimal) of all subscriptions, invoice items, "
"and prorations on the invoice before any discount or tax is applied."
)
tax = StripeDecimalCurrencyAmountField(
null=True,
blank=True,
help_text="The amount (as decimal) of tax included in the total, calculated "
"from ``tax_percent`` and the subtotal. If no "
"``tax_percent`` is defined, this value will be null.",
)
tax_percent = StripePercentField(
null=True,
help_text="This percentage of the subtotal has been added to the total amount "
"of the invoice, including invoice line items and discounts. "
"This field is inherited from the subscription's ``tax_percent`` field, "
"but can be changed before the invoice is paid. This field defaults to null.",
)
total = StripeDecimalCurrencyAmountField("Total (as decimal) after discount.")
webhooks_delivered_at = StripeDateTimeField(
null=True,
help_text=(
"The time at which webhooks for this invoice were successfully delivered "
"(if the invoice had no webhooks to deliver, this will match `date`). "
"Invoice payment is delayed until webhooks are delivered, or until all "
"webhook delivery attempts have been exhausted."
),
)
class Meta(object):
ordering = ["-created"]
def __str__(self):
return "Invoice #{number}".format(
number=self.number or self.receipt_number or self.id
)
@classmethod
def _manipulate_stripe_object_hook(cls, data):
data = super()._manipulate_stripe_object_hook(data)
# Invoice.closed and .forgiven deprecated in API 2018-11-08 -
# see https://stripe.com/docs/upgrades#2018-11-08
if "closed" not in data:
# TODO - drop this in 2.2, use auto_advance instead
# https://stripe.com/docs/billing/invoices/migrating-new-invoice-states#autoadvance
if "auto_advance" in data:
data["closed"] = not data["auto_advance"]
else:
data["closed"] = False
if "forgiven" not in data:
# TODO - drop this in 2.2, use status == "uncollectible" instead
if "status" in data:
data["forgiven"] = data["status"] == "uncollectible"
else:
data["forgiven"] = False
return data
@classmethod
def upcoming(
cls,
api_key=djstripe_settings.STRIPE_SECRET_KEY,
customer=None,
coupon=None,
subscription=None,
subscription_plan=None,
subscription_prorate=None,
subscription_proration_date=None,
subscription_quantity=None,
subscription_trial_end=None,
**kwargs
):
"""
Gets the upcoming preview invoice (singular) for a customer.
At any time, you can preview the upcoming
invoice for a customer. This will show you all the charges that are
pending, including subscription renewal charges, invoice item charges,
etc. It will also show you any discount that is applicable to the
customer. (Source: https://stripe.com/docs/api#upcoming_invoice)
.. important:: Note that when you are viewing an upcoming invoice,
you are simply viewing a preview.
:param customer: The identifier of the customer whose upcoming invoice \
you'd like to retrieve.
:type customer: Customer or string (customer ID)
:param coupon: The code of the coupon to apply.
:type coupon: str
:param subscription: The identifier of the subscription to retrieve an \
invoice for.
:type subscription: Subscription or string (subscription ID)
:param subscription_plan: If set, the invoice returned will preview \
updating the subscription given to this plan, or creating a new \
subscription to this plan if no subscription is given.
:type subscription_plan: Plan or string (plan ID)
:param subscription_prorate: If previewing an update to a subscription, \
this decides whether the preview will show the result of applying \
prorations or not.
:type subscription_prorate: bool
:param subscription_proration_date: If previewing an update to a \
subscription, and doing proration, subscription_proration_date forces \
the proration to be calculated as though the update was done at the \
specified time.
:type subscription_proration_date: datetime
:param subscription_quantity: If provided, the invoice returned will \
preview updating or creating a subscription with that quantity.
:type subscription_quantity: int
:param subscription_trial_end: If provided, the invoice returned will \
preview updating or creating a subscription with that trial end.
:type subscription_trial_end: datetime
:returns: The upcoming preview invoice.
:rtype: UpcomingInvoice
"""
# Convert Customer to id
if customer is not None and isinstance(customer, StripeModel):
customer = customer.id
# Convert Subscription to id
if subscription is not None and isinstance(subscription, StripeModel):
subscription = subscription.id
# Convert Plan to id
if subscription_plan is not None and isinstance(subscription_plan, StripeModel):
subscription_plan = subscription_plan.id
try:
upcoming_stripe_invoice = cls.stripe_class.upcoming(
api_key=api_key,
customer=customer,
coupon=coupon,
subscription=subscription,
subscription_plan=subscription_plan,
subscription_prorate=subscription_prorate,
subscription_proration_date=subscription_proration_date,
subscription_quantity=subscription_quantity,
subscription_trial_end=subscription_trial_end,
**kwargs
)
except InvalidRequestError as exc:
if str(exc) != "Nothing to invoice for customer":
raise
return
# Workaround for "id" being missing (upcoming invoices don't persist).
upcoming_stripe_invoice["id"] = "upcoming"
return UpcomingInvoice._create_from_stripe_object(
upcoming_stripe_invoice, save=False
)
def retry(self):
""" Retry payment on this invoice if it isn't paid, closed, or forgiven."""
if not self.paid and not self.forgiven and not self.closed:
stripe_invoice = self.api_retrieve()
updated_stripe_invoice = (
stripe_invoice.pay()
) # pay() throws an exception if the charge is not successful.
type(self).sync_from_stripe_data(updated_stripe_invoice)
return True
return False
STATUS_PAID = "Paid"
STATUS_FORGIVEN = "Forgiven"
STATUS_CLOSED = "Closed"
STATUS_OPEN = "Open"
@property
def status(self):
"""
Attempts to label this invoice with a status.
Note that an invoice can be more than one of the choices.
We just set a priority on which status appears.
"""
if self.paid:
return self.STATUS_PAID
if self.forgiven:
return self.STATUS_FORGIVEN
if self.closed:
return self.STATUS_CLOSED
return self.STATUS_OPEN
def get_stripe_dashboard_url(self):
return self.customer.get_stripe_dashboard_url()
def _attach_objects_post_save_hook(self, cls, data, pending_relations=None):
super()._attach_objects_post_save_hook(
cls, data, pending_relations=pending_relations
)
# InvoiceItems need a saved invoice because they're associated via a
# RelatedManager, so this must be done as part of the post save hook.
cls._stripe_object_to_invoice_items(
target_cls=InvoiceItem, data=data, invoice=self
)
@property
def plan(self):
""" Gets the associated plan for this invoice.
In order to provide a consistent view of invoices, the plan object
should be taken from the first invoice item that has one, rather than
using the plan associated with the subscription.
Subscriptions (and their associated plan) are updated by the customer
and represent what is current, but invoice items are immutable within
the invoice and stay static/unchanged.
In other words, a plan retrieved from an invoice item will represent
the plan as it was at the time an invoice was issued. The plan
retrieved from the subscription will be the currently active plan.
:returns: The associated plan for the invoice.
:rtype: ``djstripe.Plan``
"""
for invoiceitem in self.invoiceitems.all():
if invoiceitem.plan:
return invoiceitem.plan
if self.subscription:
return self.subscription.plan
class UpcomingInvoice(Invoice):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._invoiceitems = []
def get_stripe_dashboard_url(self):
return ""
def _attach_objects_hook(self, cls, data):
super()._attach_objects_hook(cls, data)
self._invoiceitems = cls._stripe_object_to_invoice_items(
target_cls=InvoiceItem, data=data, invoice=self
)
@property
def invoiceitems(self):
"""
Gets the invoice items associated with this upcoming invoice.
This differs from normal (non-upcoming) invoices, in that upcoming
invoices are in-memory and do not persist to the database. Therefore,
all of the data comes from the Stripe API itself.
Instead of returning a normal queryset for the invoiceitems, this will
return a mock of a queryset, but with the data fetched from Stripe - It
will act like a normal queryset, but mutation will silently fail.
"""
return QuerySetMock.from_iterable(InvoiceItem, self._invoiceitems)
@property
def id(self):
return None
@id.setter
def id(self, value):
return # noop
def save(self, *args, **kwargs):
return # noop
class InvoiceItem(StripeModel):
"""
Sometimes you want to add a charge or credit to a customer but only actually
charge the customer's card at the end of a regular billing cycle.
This is useful for combining several charges to minimize per-transaction fees
or having Stripe tabulate your usage-based billing totals.
Stripe documentation: https://stripe.com/docs/api/python#invoiceitems
"""
stripe_class = stripe.InvoiceItem
amount = StripeDecimalCurrencyAmountField(help_text="Amount invoiced (as decimal).")
currency = StripeCurrencyCodeField()
customer = models.ForeignKey(
"Customer",
on_delete=models.CASCADE,
related_name="invoiceitems",
help_text="The customer associated with this invoiceitem.",
)
date = StripeDateTimeField(help_text="The date on the invoiceitem.")
discountable = models.BooleanField(
default=False,
help_text="If True, discounts will apply to this invoice item. "
"Always False for prorations.",
)
invoice = models.ForeignKey(
"Invoice",
on_delete=models.CASCADE,
null=True,
related_name="invoiceitems",
help_text="The invoice to which this invoiceitem is attached.",
)
period = JSONField()
period_end = StripeDateTimeField(
help_text="Might be the date when this invoiceitem's invoice was sent."
)
period_start = StripeDateTimeField(
help_text="Might be the date when this invoiceitem was added to the invoice"
)
plan = models.ForeignKey(
"Plan",
null=True,
related_name="invoiceitems",
on_delete=models.SET_NULL,
help_text="If the invoice item is a proration, the plan of the subscription "
"for which the proration was computed.",
)
proration = models.BooleanField(
default=False,
help_text="Whether or not the invoice item was created automatically as a "
"proration adjustment when the customer switched plans.",
)
quantity = models.IntegerField(
null=True,
blank=True,
help_text="If the invoice item is a proration, the quantity of the "
"subscription for which the proration was computed.",
)
subscription = models.ForeignKey(
"Subscription",
null=True,
related_name="invoiceitems",
on_delete=models.SET_NULL,
help_text="The subscription that this invoice item has been created for, "
"if any.",
)
# XXX: subscription_item
@classmethod
def _manipulate_stripe_object_hook(cls, data):
data["period_start"] = data["period"]["start"]
data["period_end"] = data["period"]["end"]
return data
@classmethod
def sync_from_stripe_data(cls, data):
invoice_data = data.get("invoice")
if invoice_data:
# sync the Invoice first if it doesn't yet exist in our DB
# to avoid recursive Charge/Invoice loop
invoice_id = cls._id_from_data(invoice_data)
if not Invoice.objects.filter(id=invoice_id).exists():
if invoice_id == invoice_data:
# we only have the id, fetch the full data
invoice_data = Invoice(id=invoice_id).api_retrieve()
Invoice.sync_from_stripe_data(data=invoice_data)
return super().sync_from_stripe_data(data)
def __str__(self):
if self.plan and self.plan.product:
return self.plan.product.name or str(self.plan)
return super().__str__()
@classmethod
def is_valid_object(cls, data):
return data["object"] in ("invoiceitem", "line_item")
def get_stripe_dashboard_url(self):
return self.invoice.get_stripe_dashboard_url()
def str_parts(self):
return [
"amount={amount}".format(amount=self.amount),
"date={date}".format(date=self.date),
] + super().str_parts()
class Plan(StripeModel):
"""
A subscription plan contains the pricing information for different
products and feature levels on your site.
Stripe documentation: https://stripe.com/docs/api/python#plans)
"""
stripe_class = stripe.Plan
stripe_dashboard_item_name = "plans"
active = models.BooleanField(
help_text="Whether the plan is currently available for new subscriptions."
)
aggregate_usage = StripeEnumField(
enum=enums.PlanAggregateUsage,
default="",
blank=True,
help_text=(
"Specifies a usage aggregation strategy for plans of usage_type=metered. "
"Allowed values are `sum` for summing up all usage during a period, "
"`last_during_period` for picking the last usage record reported within a "
"period, `last_ever` for picking the last usage record ever (across period "
"bounds) or max which picks the usage record with the maximum reported "
"usage during a period. Defaults to `sum`."
),
)
amount = StripeDecimalCurrencyAmountField(
null=True,
blank=True,
help_text="Amount (as decimal) to be charged on the interval specified.",
)
billing_scheme = StripeEnumField(
enum=enums.PlanBillingScheme,
default="",
blank=True,
help_text=(
"Describes how to compute the price per period. "
"Either `per_unit` or `tiered`. "
"`per_unit` indicates that the fixed amount (specified in amount) "
"will be charged per unit in quantity "
"(for plans with `usage_type=licensed`), or per unit of total "
"usage (for plans with `usage_type=metered`). "
"`tiered` indicates that the unit pricing will be computed using "
"a tiering strategy as defined using the tiers and tiers_mode attributes."
),
)
currency = StripeCurrencyCodeField()
interval = StripeEnumField(
enum=enums.PlanInterval,
help_text="The frequency with which a subscription should be billed.",
)
interval_count = models.IntegerField(
null=True,
help_text=(
"The number of intervals (specified in the interval property) "
"between each subscription billing."
),
)
nickname = models.TextField(
max_length=5000,
default="",
blank=True,
help_text="A brief description of the plan, hidden from customers.",
)
product = models.ForeignKey(
"Product",
on_delete=models.SET_NULL,
null=True,
help_text="The product whose pricing this plan determines.",
)
tiers = JSONField(
null=True,
blank=True,
help_text=(
"Each element represents a pricing tier. "
"This parameter requires `billing_scheme` to be set to `tiered`."
),
)
tiers_mode = StripeEnumField(
enum=enums.PlanTiersMode,
null=True,
blank=True,
help_text=(
"Defines if the tiering price should be `graduated` or `volume` based. "
"In `volume`-based tiering, the maximum quantity within a period "
"determines the per unit price, in `graduated` tiering pricing can "
"successively change as the quantity grows."
),
)
transform_usage = JSONField(
null=True,
blank=True,
help_text=(
"Apply a transformation to the reported usage or set quantity "
"before computing the billed price. Cannot be combined with `tiers`."
),
)
trial_period_days = models.IntegerField(
null=True,
help_text=(
"Number of trial period days granted when subscribing a customer "
"to this plan. Null if the plan has no trial period."
),
)
usage_type = StripeEnumField(
enum=enums.PlanUsageType,
default=enums.PlanUsageType.licensed,
help_text=(
"Configures how the quantity per period should be determined, "
"can be either `metered` or `licensed`. `licensed` will automatically "
"bill the `quantity` set for a plan when adding it to a subscription, "
"`metered` will aggregate the total usage based on usage records. "
"Defaults to `licensed`."
),
)
# Legacy fields (pre 2017-08-15)
name = models.TextField(
null=True,
blank=True,
help_text="Name of the plan, to be displayed on invoices and in "
"the web interface.",
)
statement_descriptor = models.CharField(
max_length=22,
null=True,
blank=True,
help_text="An arbitrary string to be displayed on your customer's credit card "
"statement. The statement description may not include <>\"' characters, "
"and will appear on your customer's statement in capital letters. "
"Non-ASCII characters are automatically stripped. "
"While most banks display this information consistently, some may display it "
"incorrectly or not at all.",
)
class Meta(object):
ordering = ["amount"]
@classmethod
def get_or_create(cls, **kwargs):
""" Get or create a Plan."""
try:
return Plan.objects.get(id=kwargs["id"]), False
except Plan.DoesNotExist:
return cls.create(**kwargs), True
@classmethod
def create(cls, **kwargs):
# A few minor things are changed in the api-version of the create call
api_kwargs = dict(kwargs)
api_kwargs["amount"] = int(api_kwargs["amount"] * 100)
if isinstance(api_kwargs.get("product"), StripeModel):
api_kwargs["product"] = api_kwargs["product"].id
stripe_plan = cls._api_create(**api_kwargs)
plan = cls.sync_from_stripe_data(stripe_plan)
return plan
def __str__(self):
return self.name or self.nickname or self.id
@property
def amount_in_cents(self):
return int(self.amount * 100)
@property
def human_readable_price(self):
amount = get_friendly_currency_amount(self.amount, self.currency)
interval_count = self.interval_count
if interval_count == 1:
interval = {
"day": _("day"),
"week": _("week"),
"month": _("month"),
"year": _("year"),
}[self.interval]
template = _("{amount}/{interval}")
else:
interval = {
"day": _("days"),
"week": _("weeks"),
"month": _("months"),
"year": _("years"),
}[self.interval]
template = _("{amount} every {interval_count} {interval}")
return format_lazy(
template, amount=amount, interval=interval, interval_count=interval_count
)
# TODO: Move this type of update to the model's save() method
# so it happens automatically
# Also, block other fields from being saved.
def update_name(self):
"""
Update the name of the Plan in Stripe and in the db.
Assumes the object being called has the name attribute already
reset, but has not been saved.
Stripe does not allow for update of any other Plan attributes besides name.
"""
p = self.api_retrieve()
p.name = self.name
p.save()
self.save()
class Subscription(StripeModel):
"""
Subscriptions allow you to charge a customer's card on a recurring basis.
A subscription ties a customer to a particular plan you've created.
A subscription still in its trial period is ``trialing`` and moves to ``active``
when the trial period is over.
When payment to renew the subscription fails, the subscription becomes ``past_due``.
After Stripe has exhausted all payment retry attempts, the subscription ends up
with a status of either ``canceled`` or ``unpaid`` depending on your retry settings.
Note that when a subscription has a status of ``unpaid``, no subsequent invoices
will be attempted (invoices will be created, but then immediately
automatically closed.
Additionally, updating customer card details will not lead to Stripe retrying the
latest invoice.).
After receiving updated card details from a customer, you may choose to reopen and
pay their closed invoices.
Stripe documentation: https://stripe.com/docs/api/python#subscriptions
"""
stripe_class = stripe.Subscription
stripe_dashboard_item_name = "subscriptions"
application_fee_percent = StripePercentField(
null=True,
blank=True,
help_text="A positive decimal that represents the fee percentage of the "
"subscription invoice amount that will be transferred to the application "
"owner's Stripe account each billing period.",
)
billing = StripeEnumField(
enum=enums.InvoiceBilling,
help_text="Either `charge_automatically`, or `send_invoice`. When charging "
"automatically, Stripe will attempt to pay this subscription at the end of the "
"cycle using the default source attached to the customer. "
"When sending an invoice, Stripe will email your customer an invoice with "
"payment instructions.",
)
billing_cycle_anchor = StripeDateTimeField(
null=True,
blank=True,
help_text=(
"Determines the date of the first full invoice, and, for plans "
"with `month` or `year` intervals, the day of the month for subsequent "
"invoices."
),
)
cancel_at_period_end = models.BooleanField(
default=False,
help_text="If the subscription has been canceled with the ``at_period_end`` "
"flag set to true, ``cancel_at_period_end`` on the subscription will be true. "
"You can use this attribute to determine whether a subscription that has a "
"status of active is scheduled to be canceled at the end of the "
"current period.",
)
canceled_at = StripeDateTimeField(
null=True,
blank=True,
help_text="If the subscription has been canceled, the date of that "
"cancellation. If the subscription was canceled with ``cancel_at_period_end``, "
"canceled_at will still reflect the date of the initial cancellation request, "
"not the end of the subscription period when the subscription is automatically "
"moved to a canceled state.",
)
current_period_end = StripeDateTimeField(
help_text="End of the current period for which the subscription has been "
"invoiced. At the end of this period, a new invoice will be created."
)
current_period_start = StripeDateTimeField(
help_text="Start of the current period for which the subscription has "
"been invoiced."
)
customer = models.ForeignKey(
"Customer",
on_delete=models.CASCADE,
related_name="subscriptions",
help_text="The customer associated with this subscription.",
)
days_until_due = models.IntegerField(
null=True,
blank=True,
help_text="Number of days a customer has to pay invoices generated by this "
"subscription. This value will be `null` for subscriptions where "
"`billing=charge_automatically`.",
)
# TODO: discount
ended_at = StripeDateTimeField(
null=True,
blank=True,
help_text="If the subscription has ended (either because it was canceled or "
"because the customer was switched to a subscription to a new plan), "
"the date the subscription ended.",
)
pending_setup_intent = models.ForeignKey(
"SetupIntent",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="setup_intents",
help_text="We can use this SetupIntent to collect user authentication "
"when creating a subscription without immediate payment or updating a "
"subscription’s payment method, allowing you to "
"optimize for off-session payments.",
)
plan = models.ForeignKey(
"Plan",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="subscriptions",
help_text="The plan associated with this subscription. This value will be "
"`null` for multi-plan subscriptions",
)
quantity = models.IntegerField(
null=True,
blank=True,
help_text="The quantity applied to this subscription. This value will be "
"`null` for multi-plan subscriptions",
)
start = StripeDateTimeField(
help_text="Date of the last substantial change to "
"this subscription. For example, a change to the items array, or a change "
"of status, will reset this timestamp."
)
status = StripeEnumField(
enum=enums.SubscriptionStatus, help_text="The status of this subscription."
)
tax_percent = StripePercentField(
null=True,
blank=True,
help_text="A positive decimal (with at most two decimal places) "
"between 1 and 100. This represents the percentage of the subscription "
"invoice subtotal that will be calculated and added as tax to the final "
"amount each billing period.",
)
trial_end = StripeDateTimeField(
null=True,
blank=True,
help_text="If the subscription has a trial, the end of that trial.",
)
trial_start = StripeDateTimeField(
null=True,
blank=True,
help_text="If the subscription has a trial, the beginning of that trial.",
)
objects = SubscriptionManager()
def __str__(self):
return "{customer} on {plan}".format(
customer=str(self.customer), plan=str(self.plan)
)
def update(
self,
plan=None,
application_fee_percent=None,
billing_cycle_anchor=None,
coupon=None,
prorate=djstripe_settings.PRORATION_POLICY,
proration_date=None,
metadata=None,
quantity=None,
tax_percent=None,
trial_end=None,
):
"""
See `Customer.subscribe() <#djstripe.models.Customer.subscribe>`__
:param plan: The plan to which to subscribe the customer.
:type plan: Plan or string (plan ID)
:param application_fee_percent:
:type application_fee_percent:
:param billing_cycle_anchor:
:type billing_cycle_anchor:
:param coupon:
:type coupon:
:param prorate: Whether or not to prorate when switching plans. Default is True.
:type prorate: boolean
:param proration_date:
If set, the proration will be calculated as though the subscription was
updated at the given time. This can be used to apply exactly the same
proration that was previewed with upcoming invoice endpoint.
It can also be used to implement custom proration logic, such as prorating
by day instead of by second, by providing the time that you
wish to use for proration calculations.
:type proration_date: datetime
:param metadata:
:type metadata:
:param quantity:
:type quantity:
:param tax_percent:
:type tax_percent:
:param trial_end:
:type trial_end:
.. note:: The default value for ``prorate`` is the DJSTRIPE_PRORATION_POLICY \
setting.
.. important:: Updating a subscription by changing the plan or quantity \
creates a new ``Subscription`` in \
Stripe (and dj-stripe).
"""
# Convert Plan to id
if plan is not None and isinstance(plan, StripeModel):
plan = plan.id
kwargs = deepcopy(locals())
del kwargs["self"]
stripe_subscription = self.api_retrieve()
for kwarg, value in kwargs.items():
if value is not None:
setattr(stripe_subscription, kwarg, value)
return Subscription.sync_from_stripe_data(stripe_subscription.save())
def extend(self, delta):
"""
Extends this subscription by the provided delta.
:param delta: The timedelta by which to extend this subscription.
:type delta: timedelta
"""
if delta.total_seconds() < 0:
raise ValueError("delta must be a positive timedelta.")
if self.trial_end is not None and self.trial_end > timezone.now():
period_end = self.trial_end
else:
period_end = self.current_period_end
period_end += delta
return self.update(prorate=False, trial_end=period_end)
def cancel(self, at_period_end=djstripe_settings.CANCELLATION_AT_PERIOD_END):
"""
Cancels this subscription. If you set the at_period_end parameter to true,
the subscription will remain active until the end of the period, at which point
it will be canceled and not renewed. By default, the subscription is terminated
immediately. In either case, the customer will not be charged again for
the subscription. Note, however, that any pending invoice items that you've
created will still be charged for at the end of the period unless manually
deleted. If you've set the subscription to cancel at period end,
any pending prorations will also be left in place and collected at the end of
the period, but if the subscription is set to cancel immediately,
pending prorations will be removed.
By default, all unpaid invoices for the customer will be closed upon
subscription cancellation. We do this in order to prevent unexpected payment
retries once the customer has canceled a subscription. However, you can
reopen the invoices manually after subscription cancellation to have us proceed
with automatic retries, or you could even re-attempt payment yourself on all
unpaid invoices before allowing the customer to cancel the
subscription at all.
:param at_period_end: A flag that if set to true will delay the cancellation \
of the subscription until the end of the current period. Default is False.
:type at_period_end: boolean
.. important:: If a subscription is canceled during a trial period, \
the ``at_period_end`` flag will be overridden to False so that the trial ends \
immediately and the customer's card isn't charged.
"""
# If plan has trial days and customer cancels before
# trial period ends, then end subscription now,
# i.e. at_period_end=False
if self.trial_end and self.trial_end > timezone.now():
at_period_end = False
if at_period_end:
stripe_subscription = self.api_retrieve()
stripe_subscription.cancel_at_period_end = True
stripe_subscription.save()
else:
try:
stripe_subscription = self._api_delete()
except InvalidRequestError as exc:
if "No such subscription:" in str(exc):
# cancel() works by deleting the subscription. The object still
# exists in Stripe however, and can still be retrieved.
# If the subscription was already canceled (status=canceled),
# that api_retrieve() call will fail with "No such subscription".
# However, this may also happen if the subscription legitimately
# does not exist, in which case the following line will re-raise.
stripe_subscription = self.api_retrieve()
else:
raise
return Subscription.sync_from_stripe_data(stripe_subscription)
def reactivate(self):
"""
Reactivates this subscription.
If a customer's subscription is canceled with ``at_period_end`` set to True and
it has not yet reached the end of the billing period, it can be reactivated.
Subscriptions canceled immediately cannot be reactivated.
(Source: https://stripe.com/docs/subscriptions/canceling-pausing)
.. warning:: Reactivating a fully canceled Subscription will fail silently. \
Be sure to check the returned Subscription's status.
"""
stripe_subscription = self.api_retrieve()
stripe_subscription.plan = self.plan.id
stripe_subscription.cancel_at_period_end = False
return Subscription.sync_from_stripe_data(stripe_subscription.save())
def is_period_current(self):
"""
Returns True if this subscription's period is current, false otherwise.
"""
return self.current_period_end > timezone.now() or (
self.trial_end and self.trial_end > timezone.now()
)
def is_status_current(self):
"""
Returns True if this subscription's status is current (active or trialing),
false otherwise.
"""
return self.status in ["trialing", "active"]
def is_status_temporarily_current(self):
"""
A status is temporarily current when the subscription is canceled with the
``at_period_end`` flag.
The subscription is still active, but is technically canceled and we're just
waiting for it to run out.
You could use this method to give customers limited service after they've
canceled. For example, a video on demand service could only allow customers
to download their libraries and do nothing else when their
subscription is temporarily current.
"""
return (
self.canceled_at
and self.cancel_at_period_end
and timezone.now() < self.current_period_end
)
def is_valid(self):
"""
Returns True if this subscription's status and period are current,
false otherwise.
"""
if not self.is_status_current():
return False
if not self.is_period_current():
return False
return True
def _attach_objects_post_save_hook(self, cls, data, pending_relations=None):
super()._attach_objects_post_save_hook(
cls, data, pending_relations=pending_relations
)
cls._stripe_object_to_subscription_items(
target_cls=SubscriptionItem, data=data, subscription=self
)
class SubscriptionItem(StripeModel):
"""
Subscription items allow you to create customer subscriptions
with more than one plan, making it easy to represent complex billing relationships.
Stripe documentation: https://stripe.com/docs/api#subscription_items
"""
stripe_class = stripe.SubscriptionItem
plan = models.ForeignKey(
"Plan",
on_delete=models.CASCADE,
related_name="subscription_items",
help_text="The plan the customer is subscribed to.",
)
quantity = models.PositiveIntegerField(
null=True,
blank=True,
help_text=(
"The quantity of the plan to which the customer should be subscribed."
),
)
subscription = models.ForeignKey(
"Subscription",
on_delete=models.CASCADE,
related_name="items",
help_text="The subscription this subscription item belongs to.",
)
class UsageRecord(StripeModel):
"""
Usage records allow you to continually report usage and metrics to
Stripe for metered billing of plans.
Stripe documentation: https://stripe.com/docs/api#usage_records
"""
quantity = models.PositiveIntegerField(
help_text=(
"The quantity of the plan to which the customer should be subscribed."
)
)
subscription_item = models.ForeignKey(
"SubscriptionItem",
on_delete=models.CASCADE,
related_name="usage_records",
help_text="The subscription item this usage record contains data for.",
)
| 38.054292
| 95
| 0.646391
|
efffd234c1325e35e7b472a8ec566f9eb5b4c784
| 702
|
py
|
Python
|
FunStuff/fun_draw_tree.py
|
XuhuaHuang/LearnPython
|
eb39f11147716193971dd5a8894e675daa1b9d01
|
[
"MIT"
] | null | null | null |
FunStuff/fun_draw_tree.py
|
XuhuaHuang/LearnPython
|
eb39f11147716193971dd5a8894e675daa1b9d01
|
[
"MIT"
] | null | null | null |
FunStuff/fun_draw_tree.py
|
XuhuaHuang/LearnPython
|
eb39f11147716193971dd5a8894e675daa1b9d01
|
[
"MIT"
] | null | null | null |
from turtle import *
def tree(plist, l, a, f):
"""plist is a list of pens
l is length of the branch
a is a half of the angle between 2 branches
f is factor by which branch is shortened
"""
if l > 5:
lst = []
for p in plist:
p.forward(1)
q = p.clone()
p.left(a)
q.right(a)
lst.append(p)
lst.append(q)
tree(lst, l * f, a, f)
def main():
t = Turtle()
t.color("green")
t.pensize(15)
t.hideturtle()
# FINISHED INITIALIZING
t.getscreen().tracer(30, 0)
t.left(90)
t.penup()
t.goto(60, 60)
t.pendown()
p = tree([t], 110, 65, 0.6375)
main()
| 18.972973
| 47
| 0.497151
|
8fc7bb90299e2dcfc22b5e384f1b5968f4b83bb9
| 8,271
|
py
|
Python
|
docs/conf.py
|
jayaram24/Termineter-Modified
|
2cab514ff1640809337c6fe17f24433bcdec2260
|
[
"MIT"
] | 185
|
2015-03-20T02:16:32.000Z
|
2020-04-07T23:58:46.000Z
|
docs/conf.py
|
pascal1989/termineter
|
d657d25d97c7739e650b951c396404e857e56625
|
[
"BSD-3-Clause"
] | 7
|
2015-06-18T15:27:12.000Z
|
2018-03-18T20:30:06.000Z
|
docs/conf.py
|
pascal1989/termineter
|
d657d25d97c7739e650b951c396404e857e56625
|
[
"BSD-3-Clause"
] | 80
|
2015-03-10T18:37:19.000Z
|
2020-04-01T00:24:26.000Z
|
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
GITHUB_BRANCH = 'dev'
GITHUB_REPO = 'securestate/termineter'
import sys
import os
_prj_root = os.path.dirname(__file__)
_prj_root = os.path.relpath(os.path.join('..', '..'), _prj_root)
_prj_root = os.path.abspath(_prj_root)
sys.path.insert(1, _prj_root)
del _prj_root, _pkg
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# -- General configuration ------------------------------------------------
needs_sphinx = '1.3'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
]
extlinks = {
'release': ("https://github.com/{0}/releases/tag/v%s".format(GITHUB_REPO), 'v')
}
def linkcode_resolve(domain, info):
if domain != 'py':
return None
if not info['module']:
return None
file_name = info['module'].replace('.', '/') + '.py'
return "https://github.com/{0}/blob/{1}/{2}".format(GITHUB_REPO, GITHUB_BRANCH, file_name)
intersphinx_mapping = {'smokezephyr': ('https://smoke-zephyr.readthedocs.org/en/latest/', None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Termineter'
copyright = '2011-2015, SecureState LLC'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = version.version.split('-')[0]
# The full version, including alpha/beta/rc tags.
release = version.distutils_version
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'termineter_doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Termineter.tex', u'Termineter Documentation', u'Spencer McIntyre', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'termineter', u'Termineter Documentation', [u'Spencer McIntyre'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Termineter', u'Termineter Documentation', u'Spencer McIntyre', 'Termineter', 'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 31.093985
| 142
| 0.714182
|
c3c3570c0f8217b3455bb02a08c438ea16eda9e8
| 20,249
|
py
|
Python
|
tensorflow/python/training/saving/saveable_object_util.py
|
yage99/tensorflow
|
c7fa71b32a3635eb25596ae80d007b41007769c4
|
[
"Apache-2.0"
] | 4
|
2020-06-28T08:25:36.000Z
|
2021-08-12T12:41:34.000Z
|
tensorflow/python/training/saving/saveable_object_util.py
|
yage99/tensorflow
|
c7fa71b32a3635eb25596ae80d007b41007769c4
|
[
"Apache-2.0"
] | 10
|
2021-08-03T08:42:38.000Z
|
2022-01-03T03:29:12.000Z
|
tensorflow/python/training/saving/saveable_object_util.py
|
yage99/tensorflow
|
c7fa71b32a3635eb25596ae80d007b41007769c4
|
[
"Apache-2.0"
] | 28
|
2020-02-10T07:03:06.000Z
|
2022-01-12T11:19:20.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for working with and creating SaveableObjects."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import six
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.saving import saveable_object
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import nest
from tensorflow.python.util import object_identity
# Op names which identify variable reads which should be saved.
_VARIABLE_OPS = set(["Variable",
"VariableV2",
"AutoReloadVariable",
"VarHandleOp",
"ReadVariableOp"])
def set_cpu0(device_string):
"""Creates a new device string based on `device_string` but using /CPU:0.
If the device is already on /CPU:0, this is a no-op.
Args:
device_string: A device string.
Returns:
A device string.
"""
parsed_device = pydev.DeviceSpec.from_string(device_string)
parsed_device = parsed_device.replace(device_type="CPU", device_index=0)
return parsed_device.to_string()
class ReferenceVariableSaveable(saveable_object.SaveableObject):
"""SaveableObject implementation that handles reference variables."""
def __init__(self, var, slice_spec, name):
spec = saveable_object.SaveSpec(var, slice_spec, name, dtype=var.dtype)
super(ReferenceVariableSaveable, self).__init__(var, [spec], name)
def restore(self, restored_tensors, restored_shapes):
restored_tensor = restored_tensors[0]
if restored_shapes is not None:
restored_tensor = array_ops.reshape(restored_tensor, restored_shapes[0])
return state_ops.assign(
self.op,
restored_tensor,
validate_shape=restored_shapes is None and
self.op.get_shape().is_fully_defined())
class ResourceVariableSaveable(saveable_object.SaveableObject):
"""SaveableObject implementation that handles ResourceVariables."""
def __init__(self, var, slice_spec, name):
self._var_device = var.device
self._var_shape = var.shape
if isinstance(var, ops.Tensor):
self.handle_op = var.op.inputs[0]
tensor = var
elif resource_variable_ops.is_resource_variable(var):
def _read_variable_closure(v):
def f():
with ops.device(v.device):
x = v.read_value()
# To allow variables placed on non-CPU devices to be checkpointed,
# we copy them to CPU on the same machine first.
with ops.device("/device:CPU:0"):
return array_ops.identity(x)
return f
self.handle_op = var.handle
tensor = _read_variable_closure(var)
else:
raise ValueError(
"Saveable is neither a resource variable nor a read operation."
" Got: %s" % repr(var))
spec = saveable_object.SaveSpec(tensor, slice_spec, name,
dtype=var.dtype, device=var.device)
super(ResourceVariableSaveable, self).__init__(var, [spec], name)
def restore(self, restored_tensors, restored_shapes):
restored_tensor = restored_tensors[0]
if restored_shapes is not None:
restored_tensor = array_ops.reshape(restored_tensor, restored_shapes[0])
# Copy the restored tensor to the variable's device.
with ops.device(self._var_device):
restored_tensor = array_ops.identity(restored_tensor)
return resource_variable_ops.shape_safe_assign_variable_handle(
self.handle_op, self._var_shape, restored_tensor)
def _tensor_comes_from_variable(v):
return isinstance(v, ops.Tensor) and v.op.type in _VARIABLE_OPS
def saveable_objects_for_op(op, name):
"""Create `SaveableObject`s from an operation.
Args:
op: A variable, operation, or SaveableObject to coerce into a
SaveableObject.
name: A string name for the SaveableObject.
Yields:
`SaveableObject`s which together save/restore `op`.
Raises:
TypeError: If `name` is not a string.
ValueError: For operations with no known conversion to SaveableObject.
"""
if not isinstance(name, six.string_types):
raise TypeError(
"names_to_saveables must be a dict mapping string names to "
"trackable operations. Name is not a string: %s" % name)
if isinstance(op, saveable_object.SaveableObject):
yield op
elif isinstance(op, (list, tuple, variables.PartitionedVariable)):
if isinstance(op, variables.PartitionedVariable):
op = list(op)
# A set of slices.
slice_name = None
# pylint: disable=protected-access
for variable in op:
if isinstance(variable, saveable_object.SaveableObject):
yield variable
continue
if not isinstance(variable, variables.Variable):
raise ValueError("Slices must all be Variables: %s" % variable)
if not variable._save_slice_info:
raise ValueError("Slices must all be slices: %s" % variable)
if slice_name is None:
slice_name = variable._save_slice_info.full_name
elif slice_name != variable._save_slice_info.full_name:
raise ValueError(
"Slices must all be from the same tensor: %s != %s" %
(slice_name, variable._save_slice_info.full_name))
if variable.op.type in ["Variable", "VariableV2",
"AutoReloadVariable"]:
yield ReferenceVariableSaveable(
variable, variable._save_slice_info.spec, name)
else:
yield ResourceVariableSaveable(
variable, variable._save_slice_info.spec, name)
# pylint: enable=protected-access
elif isinstance(op, trackable.Trackable) and not isinstance(
op, variables.Variable):
# pylint: disable=protected-access
for attr, factory in op._gather_saveables_for_checkpoint().items():
if attr == trackable.VARIABLE_VALUE_KEY:
# Keep original name for classes masquerading as variables.
full_name = name
else:
full_name = name + "_" + attr
op = (factory(full_name) if callable(factory) else factory)
for op in saveable_objects_for_op(op, op.name):
yield op
# pylint: enable=protected-access
else:
# A variable or tensor.
if isinstance(op, resource_variable_ops.BaseResourceVariable):
# pylint: disable=protected-access
if op._in_graph_mode:
variable = op._graph_element
else:
variable = op
# pylint: enable=protected-access
yield ResourceVariableSaveable(variable, "", name)
else:
if context.executing_eagerly():
raise ValueError("Can only save/restore ResourceVariables when "
"executing eagerly, got type: %s." % type(op))
variable = ops.convert_to_tensor(op, as_ref=True)
if not _tensor_comes_from_variable(variable):
raise TypeError("names_to_saveables must be a dict mapping string "
"names to Tensors/Variables. Not a variable: %s" %
variable)
if variable.op.type in ["Variable", "VariableV2",
"AutoReloadVariable"]:
yield ReferenceVariableSaveable(variable, "", name)
else:
yield ResourceVariableSaveable(
variable, "", name)
def op_list_to_dict(op_list, convert_variable_to_tensor=True):
"""Create a dictionary of names to operation lists.
Args:
op_list: A (nested) list, tuple, or set of Variables or SaveableObjects.
convert_variable_to_tensor: Whether or not to convert single Variables
with no slice info into Tensors.
Returns:
A dictionary of names to the operations that must be saved under
that name. Variables with save_slice_info are grouped together under the
same key in no particular order.
Raises:
TypeError: If the type of op_list or its elements is not supported.
ValueError: If at least two saveables share the same name.
"""
if not isinstance(op_list, (list, tuple, set)):
raise TypeError("Variables to save should be passed in a dict or a "
"list: %s" % op_list)
# List casting is necessary to support sets.
op_list = nest.flatten(list(op_list))
# When ResourceVariables are converted to Tensors, read ops are added to the
# graph. Sorting the op_list ensures that the resulting graph is always
# constructed in a deterministic way:
op_list = sorted(op_list, key=lambda x: x.name)
names_to_saveables = {}
# pylint: disable=protected-access
for var in op_list:
resource_or_ref_variable = (
isinstance(var, resource_variable_ops.BaseResourceVariable) or
isinstance(var, variables.RefVariable))
if isinstance(var, saveable_object.SaveableObject):
names_to_saveables[var.name] = var
elif isinstance(var, variables.PartitionedVariable):
if var.name in names_to_saveables:
raise ValueError("At least two variables have the same name: %s" %
var.name)
names_to_saveables[var.name] = var
elif isinstance(var, variables.Variable) and var._save_slice_info:
name = var._save_slice_info.full_name
if name in names_to_saveables:
if not isinstance(names_to_saveables[name], list):
raise ValueError("Mixing slices and non-slices with the same name: "
"%s" % name)
names_to_saveables[name].append(var)
else:
names_to_saveables[name] = [var]
elif isinstance(var, trackable.Trackable) and not resource_or_ref_variable:
trackable_saveables = [
(factory() if callable(factory) else factory)
for factory in var._gather_saveables_for_checkpoint().values()]
names_to_saveables.update(
op_list_to_dict(trackable_saveables))
else:
# Variables (reference and resource) have an _in_graph_mode property
# indicating whether they were created in a graph building context. We
# also get Tensors when graph building, which do not have this property.
if not getattr(var, "_in_graph_mode", True):
if not isinstance(var, resource_variable_ops.BaseResourceVariable):
raise ValueError(
"Can only save/restore ResourceVariables when eager execution "
"is enabled, type: %s." % type(var))
set_var = names_to_saveables.setdefault(var._shared_name, var)
if set_var is not var:
raise ValueError(
("Two different ResourceVariable objects with the same "
"shared_name '%s' were passed to the Saver. This likely means "
"that they were created in different Graphs or isoWlation "
"contexts, and may not be checkpointed together.") %
(var._shared_name,))
else:
if convert_variable_to_tensor:
if isinstance(var, resource_variable_ops.BaseResourceVariable):
var = var._graph_element # pylint: disable=protected-access
else:
var = ops.convert_to_tensor(var, as_ref=True)
if not _tensor_comes_from_variable(var):
raise TypeError("Variable to save is not a Variable: %s" % var)
if var.op.type == "ReadVariableOp":
name = var.op.inputs[0].op.name
else:
name = var.op.name
if name in names_to_saveables:
raise ValueError("At least two variables have the same name: %s" %
name)
names_to_saveables[name] = var
# pylint: enable=protected-access
return names_to_saveables
def _add_saveable(saveables, seen_ops, saveable):
"""Adds the saveable to the saveables list.
Args:
saveables: List to append the SaveableObject to.
seen_ops: Set of the ops of the saveables already processed. Used to
check that each saveable is only saved once.
saveable: The saveable.
Raises:
ValueError: If the saveable has already been processed.
"""
if saveable.op in seen_ops:
raise ValueError("The same saveable will be restored with two names: %s" %
saveable.name)
saveables.append(saveable)
seen_ops.add(saveable.op)
def validate_and_slice_inputs(names_to_saveables):
"""Returns the variables and names that will be used for a Saver.
Args:
names_to_saveables: A dict (k, v) where k is the name of an operation and
v is an operation to save or a BaseSaverBuilder.Saver.
Returns:
A list of SaveableObjects.
Raises:
TypeError: If any of the keys are not strings or any of the
values are not one of Tensor or Variable or a trackable operation.
ValueError: If the same operation is given in more than one value
(this also applies to slices of SlicedVariables).
"""
if not isinstance(names_to_saveables, dict):
names_to_saveables = op_list_to_dict(names_to_saveables)
saveables = []
seen_ops = object_identity.ObjectIdentitySet()
for name, op in sorted(names_to_saveables.items(),
# Avoid comparing ops, sort only by name.
key=lambda x: x[0]):
for converted_saveable_object in saveable_objects_for_op(op, name):
_add_saveable(saveables, seen_ops, converted_saveable_object)
return saveables
def trace_save_restore_functions(object_to_save):
"""Gathers all SaveableObjects and traces the save and restore ops."""
saveable_map = {} # Maps name -> (save function, restore function)
for name, saveable_factory in (
object_to_save._gather_saveables_for_checkpoint().items()): # pylint: disable=protected-access
if not callable(saveable_factory):
if isinstance(saveable_factory, saveable_object.SaveableObject):
logging.debug(
"Trackable {} should return callable factories, not SaveableObjects"
" in `_gather_saveables_for_checkpoint`. This could lead to "
"problems loading the SavedModel back into Python."
.format(object_to_save))
continue
if is_factory_for_restored_saveable_object(saveable_factory):
saveable_map[name] = (saveable_factory.keywords["save_function"],
saveable_factory.keywords["restore_function"])
else:
concrete_save_fn, concrete_restore_fn = _trace_save_and_restore_function(
saveable_factory, object_to_save)
if concrete_save_fn is not None:
saveable_map[name] = (concrete_save_fn, concrete_restore_fn)
return saveable_map
def _trace_save_and_restore_function(saveable_factory, object_to_save):
"""Traces the save and restore concrete functions."""
saveables = []
@def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.string)])
def save_fn(checkpoint_key):
maybe_saveable = saveable_factory(name=checkpoint_key)
if isinstance(maybe_saveable, saveable_object.SaveableObject):
maybe_saveable = [maybe_saveable]
saveables[:] = maybe_saveable
# Return list of all SaveSpecs created by the factory.
ret = []
for saveable in saveables:
for spec in saveable.specs:
ret.append({"name": spec.name, "tensor": spec.tensor,
"slice_spec": spec.slice_spec})
return ret
concrete_save_fn = save_fn.get_concrete_function()
if any(isinstance(saveable, trackable.PythonStateSaveable)
for saveable in saveables):
logging.warn(
"Note that object {} stores python values into the checkpoint. "
"These values will not be restored when loading the SavedModel "
"into python.".format(object_to_save))
return None, None
if any(isinstance(saveable, trackable.NoRestoreSaveable)
for saveable in saveables):
return None, None
restored_type_specs = []
tensor_structure = []
for saveable in saveables:
saveable_tensor_structure = []
tensor_structure.append(saveable_tensor_structure)
for spec in saveable.specs:
restored_type_specs.append(type_spec.type_spec_from_value(spec.tensor))
saveable_tensor_structure.append(spec.name)
@def_function.function(input_signature=restored_type_specs)
def restore_fn(*restored_tensors):
structured_restored_tensors = nest.pack_sequence_as(
tensor_structure, restored_tensors)
for saveable, restored_tensors in zip(saveables,
structured_restored_tensors):
saveable.restore(restored_tensors, restored_shapes=None)
return 1
concrete_restore_fn = restore_fn.get_concrete_function()
return concrete_save_fn, concrete_restore_fn
class RestoredSaveableObject(saveable_object.SaveableObject):
"""SaveableObject restored from SavedModel using the traced save/restore."""
def __init__(self, save_function, restore_function, name):
self.save_function = save_function
self.restore_function = restore_function
if tensor_util.is_tensor(name):
name_tensor = name
else:
with ops.init_scope():
name_tensor = constant_op.constant(name)
tensors = save_function(name_tensor)
specs = [saveable_object.SaveSpec(x["tensor"], x["slice_spec"], x["name"])
for x in tensors]
super(RestoredSaveableObject, self).__init__(None, specs, name)
def restore(self, restored_tensors, restored_shapes):
del restored_shapes # unused
return self.restore_function(
*[restored_tensors[i] for i in range(len(self.specs))])
def restored_saved_object_factory(save_function, restore_function):
return functools.partial(RestoredSaveableObject,
save_function=save_function,
restore_function=restore_function)
def create_saveable_object(factory, name, call_with_mapped_captures):
"""Creates a SaveableObject while potentially in a different graph.
When creating the frozen saver for SavedModel, the save and restore ops are
placed in a separate graph. Since RestoredSaveableObject uses tf.functions to
save and restore, the function captures must be mapped to the new graph.
Args:
factory: Factory method for creating the SaveableObject.
name: Checkpoint key of this SaveableObject.
call_with_mapped_captures: Helper that calls a tf.function while remapping
the captures.
Returns:
a SaveableObject.
"""
if (call_with_mapped_captures is None or
not is_factory_for_restored_saveable_object(factory)):
return factory(name=name)
concrete_save_fn = factory.keywords["save_function"]
def save_fn(name):
return call_with_mapped_captures(concrete_save_fn, [name])
concrete_restore_fn = factory.keywords["restore_function"]
def restore_fn(*restored_tensors):
return call_with_mapped_captures(concrete_restore_fn, restored_tensors)
return factory(save_function=save_fn, restore_function=restore_fn, name=name)
def is_factory_for_restored_saveable_object(factory):
return (isinstance(factory, functools.partial) and
factory.func is RestoredSaveableObject)
| 39.938856
| 101
| 0.704183
|
e4ad1a8990a53ccba36daabfd2907f006f69d8ad
| 13,651
|
py
|
Python
|
dfa/agent/topo_disc/topo_disc.py
|
CiscoSystems/fabric_enabler
|
d5318624dd15692197a7212ecd4b0ceea42dc73e
|
[
"Apache-2.0"
] | 1
|
2015-03-05T02:48:15.000Z
|
2015-03-05T02:48:15.000Z
|
dfa/agent/topo_disc/topo_disc.py
|
CiscoSystems/fabric_enabler
|
d5318624dd15692197a7212ecd4b0ceea42dc73e
|
[
"Apache-2.0"
] | 1
|
2016-05-25T22:13:43.000Z
|
2016-07-21T20:49:48.000Z
|
dfa/agent/topo_disc/topo_disc.py
|
CiscoSystems/fabric_enabler
|
d5318624dd15692197a7212ecd4b0ceea42dc73e
|
[
"Apache-2.0"
] | 2
|
2017-05-02T21:32:46.000Z
|
2018-08-22T16:52:40.000Z
|
# Copyright 2014 Cisco Systems.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Padmanabhan Krishnan, Cisco Systems, Inc.
"""This file contains the implementation of Topology Discovery of servers and
their associated leaf switches using Open source implementation of LLDP.
www.open-lldp.org
"""
from dfa.agent.topo_disc import pub_lldp_api as pub_lldp
from dfa.agent.topo_disc import topo_disc_constants as constants
from dfa.common import utils
from dfa.common import dfa_logger as logging
from dfa.common import dfa_sys_lib as sys_utils
LOG = logging.getLogger(__name__)
class TopoIntfAttr(object):
"""Class that stores the interface attributes. """
def __init__(self, protocol_interface, phy_interface):
"""Class Init. """
self.init_params(protocol_interface, phy_interface)
def init_params(self, protocol_interface, phy_interface):
"""Initializing parameters. """
self.lldp_cfgd = False
self.local_intf = protocol_interface
self.phy_interface = phy_interface
self.remote_evb_cfgd = False
self.remote_evb_mode = None
self.remote_mgmt_addr = None
self.remote_system_desc = None
self.remote_system_name = None
self.remote_port = None
self.remote_chassis_id_mac = None
self.remote_port_id_mac = None
self.local_evb_cfgd = False
self.local_evb_mode = None
self.local_mgmt_address = None
self.local_system_desc = None
self.local_system_name = None
self.local_port = None
self.local_chassis_id_mac = None
self.local_port_id_mac = None
self.db_retry_status = False
self.topo_send_cnt = 0
self.bond_interface = None
self.bond_member_ports = None
def update_lldp_status(self, status):
"""Update the LLDP cfg status. """
self.lldp_cfgd = status
def cmp_update_bond_intf(self, bond_interface):
"""Update the bond interface and its members.
Update the bond interface, if this interface is a part of bond
Return True if there's a change.
"""
if bond_interface != self.bond_interface:
self.bond_interface = bond_interface
self.bond_member_ports = sys_utils.get_member_ports(bond_interface)
return True
return False
def get_lldp_status(self):
"""Retrieve the LLDP cfg status. """
return self.lldp_cfgd
def get_db_retry_status(self):
"""Retrieve the RPC retru status.
This retrieves the number of times RPC was retried with the server.
"""
return self.db_retry_status
def get_phy_interface(self):
"""Retrieves the physical interface. """
return self.phy_interface
def store_db_retry_status(self, status):
"""This stores the number of times RPC was retried with the server. """
self.db_retry_status = status
def get_topo_disc_send_cnt(self):
"""Retrieve the topology status send count for this interface. """
return self.topo_send_cnt
def incr_topo_disc_send_cnt(self):
"""Increment the topology status send count for this interface. """
self.topo_send_cnt += 1
def reset_topo_disc_send_cnt(self):
"""Reset the topology status send count for this interface. """
self.topo_send_cnt = 0
def remote_evb_mode_uneq_store(self, remote_evb_mode):
"""Saves the EVB mode, if it is not the same as stored. """
if remote_evb_mode != self.remote_evb_mode:
self.remote_evb_mode = remote_evb_mode
return True
return False
def remote_evb_cfgd_uneq_store(self, remote_evb_cfgd):
"""This saves the EVB cfg, if it is not the same as stored. """
if remote_evb_cfgd != self.remote_evb_cfgd:
self.remote_evb_cfgd = remote_evb_cfgd
return True
return False
def remote_mgmt_addr_uneq_store(self, remote_mgmt_addr):
"""This function saves the MGMT address, if different from stored. """
if remote_mgmt_addr != self.remote_mgmt_addr:
self.remote_mgmt_addr = remote_mgmt_addr
return True
return False
def remote_sys_desc_uneq_store(self, remote_system_desc):
"""This function saves the system desc, if different from stored. """
if remote_system_desc != self.remote_system_desc:
self.remote_system_desc = remote_system_desc
return True
return False
def remote_sys_name_uneq_store(self, remote_system_name):
"""This function saves the system name, if different from stored. """
if remote_system_name != self.remote_system_name:
self.remote_system_name = remote_system_name
return True
return False
def remote_port_uneq_store(self, remote_port):
"""This function saves the port, if different from stored. """
if remote_port != self.remote_port:
self.remote_port = remote_port
return True
return False
def remote_chassis_id_mac_uneq_store(self, remote_chassis_id_mac):
"""This function saves the Chassis MAC, if different from stored. """
if remote_chassis_id_mac != self.remote_chassis_id_mac:
self.remote_chassis_id_mac = remote_chassis_id_mac
return True
return False
def remote_port_id_mac_uneq_store(self, remote_port_id_mac):
"""This function saves the port MAC, if different from stored. """
if remote_port_id_mac != self.remote_port_id_mac:
self.remote_port_id_mac = remote_port_id_mac
return True
return False
class TopoDiscPubApi(object):
topo_intf_obj_dict = {}
@classmethod
def store_obj(cls, intf, obj):
"""Stores the topo object. """
cls.topo_intf_obj_dict[intf] = obj
@classmethod
def get_lldp_status(cls, intf):
"""Retrieves the LLDP status. """
if intf not in cls.topo_intf_obj_dict:
LOG.error("Interface %s not configured at all", intf)
return False
intf_obj = cls.topo_intf_obj_dict.get(intf)
return intf_obj.get_lldp_status()
class TopoDisc(TopoDiscPubApi):
"""Topology Discovery Top level class once. """
def __init__(self, cb, root_helper, intf_list=None, all_intf=True):
"""Constructor.
cb => Callback in case any of the interface TLV changes.
intf_list => List of interfaces to be LLDP enabled and monitored.
all_intf => Boolean that signifies if all physical interfaces are to
be monitored. intf_list will be None, if this variable is True.
"""
self.pub_lldp = pub_lldp.LldpApi(root_helper)
if not all_intf:
self.intf_list = intf_list
else:
self.intf_list = sys_utils.get_all_run_phy_intf()
self.cb = cb
self.intf_attr = {}
self.cfg_lldp_interface_list(self.intf_list)
per_task = utils.PeriodicTask(constants.PERIODIC_TASK_INTERVAL,
self.period_disc_task)
per_task.run()
def cfg_intf(self, protocol_interface, phy_interface=None):
"""Called by application to add an interface to the list. """
self.intf_list.append(protocol_interface)
self.cfg_lldp_intf(protocol_interface, phy_interface)
def uncfg_intf(self, intf):
"""Called by application to remove an interface to the list. """
pass
# self.intf_list.remove(intf)
# Can't remove interface since DB in server may appear stale
# I can just remove the interface DB, but need to retry that till
# it succeeds, so it has to be in periodic loop
# So, currently leaving it as is, since LLDP frames won't be obtained
# over the bridge, the periodic handler will automatically remove the
# DB for this interface from server
# Do i need to uncfg LLDP, object need not be removed
def create_attr_obj(self, protocol_interface, phy_interface):
"""Creates the local interface attribute object and stores it. """
self.intf_attr[protocol_interface] = TopoIntfAttr(
protocol_interface, phy_interface)
self.store_obj(protocol_interface, self.intf_attr[protocol_interface])
def get_attr_obj(self, intf):
"""Retrieve the interface object. """
return self.intf_attr[intf]
def cmp_store_tlv_params(self, intf, tlv_data):
"""Compare and store the received TLV.
Compares the received TLV with stored TLV. Store the new TLV if it is
different. """
flag = False
attr_obj = self.get_attr_obj(intf)
remote_evb_mode = self.pub_lldp.get_remote_evb_mode(tlv_data)
if attr_obj.remote_evb_mode_uneq_store(remote_evb_mode):
flag = True
remote_evb_cfgd = self.pub_lldp.get_remote_evb_cfgd(tlv_data)
if attr_obj.remote_evb_cfgd_uneq_store(remote_evb_cfgd):
flag = True
remote_mgmt_addr = self.pub_lldp.get_remote_mgmt_addr(tlv_data)
if attr_obj.remote_mgmt_addr_uneq_store(remote_mgmt_addr):
flag = True
remote_sys_desc = self.pub_lldp.get_remote_sys_desc(tlv_data)
if attr_obj.remote_sys_desc_uneq_store(remote_sys_desc):
flag = True
remote_sys_name = self.pub_lldp.get_remote_sys_name(tlv_data)
if attr_obj.remote_sys_name_uneq_store(remote_sys_name):
flag = True
remote_port = self.pub_lldp.get_remote_port(tlv_data)
if attr_obj.remote_port_uneq_store(remote_port):
flag = True
remote_chassis_id_mac = self.pub_lldp.\
get_remote_chassis_id_mac(tlv_data)
if attr_obj.remote_chassis_id_mac_uneq_store(remote_chassis_id_mac):
flag = True
remote_port_id_mac = self.pub_lldp.get_remote_port_id_mac(tlv_data)
if attr_obj.remote_port_id_mac_uneq_store(remote_port_id_mac):
flag = True
return flag
def cfg_lldp_intf(self, protocol_interface, phy_interface=None):
"""Cfg LLDP on interface and create object. """
if phy_interface is None:
phy_interface = protocol_interface
self.create_attr_obj(protocol_interface, phy_interface)
ret = self.pub_lldp.enable_lldp(protocol_interface)
attr_obj = self.get_attr_obj(protocol_interface)
attr_obj.update_lldp_status(ret)
def cfg_lldp_interface_list(self, intf_list):
"""This routine configures LLDP on the given interfaces list. """
for intf in intf_list:
self.cfg_lldp_intf(intf)
def period_disc_task(self):
"""Periodic task that checks the interface TLV attributes. """
try:
self._periodic_task_int()
except Exception as exc:
LOG.error("Exception caught in periodic task %s", str(exc))
def _check_bond_interface_change(self, phy_interface, attr_obj):
"""Check if there's any change in bond interface.
First check if the interface passed itself is a bond-interface and then
retrieve the member list and compare.
Next, check if the interface passed is a part of the bond interface and
then retrieve the member list and compare.
"""
bond_phy = sys_utils.get_bond_intf(phy_interface)
if sys_utils.is_intf_bond(phy_interface):
bond_intf = phy_interface
else:
bond_intf = bond_phy
# This can be an addition or removal of the interface to a bond.
bond_intf_change = attr_obj.cmp_update_bond_intf(bond_intf)
return bond_intf_change
def _periodic_task_int(self):
"""Internal periodic task routine.
This routine retrieves the LLDP TLC's on all its configured interfaces.
If the retrieved TLC is different than the stored TLV, it invokes the
callback.
"""
for intf in self.intf_list:
attr_obj = self.get_attr_obj(intf)
status = attr_obj.get_lldp_status()
if not status:
ret = self.pub_lldp.enable_lldp(intf)
attr_obj.update_lldp_status(ret)
continue
bond_intf_change = self._check_bond_interface_change(
attr_obj.get_phy_interface(), attr_obj)
tlv_data = self.pub_lldp.get_lldp_tlv(intf)
# This should take care of storing the information of interest
if self.cmp_store_tlv_params(intf, tlv_data) or (
attr_obj.get_db_retry_status() or bond_intf_change or (
attr_obj.get_topo_disc_send_cnt() > (
constants.TOPO_DISC_SEND_THRESHOLD))):
# Passing the interface attribute object to CB
ret = self.cb(intf, attr_obj)
status = not ret
attr_obj.store_db_retry_status(status)
attr_obj.reset_topo_disc_send_cnt()
else:
attr_obj.incr_topo_disc_send_cnt()
| 40.15
| 79
| 0.668889
|
996f5f849d2c7cf3b7b45e81d15ce1face95458e
| 7,373
|
py
|
Python
|
ansible/modules/cloud/rackspace/rax_mon_alarm.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1
|
2022-01-25T22:52:58.000Z
|
2022-01-25T22:52:58.000Z
|
ansible/modules/cloud/rackspace/rax_mon_alarm.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
ansible/modules/cloud/rackspace/rax_mon_alarm.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_mon_alarm
short_description: Create or delete a Rackspace Cloud Monitoring alarm.
description:
- Create or delete a Rackspace Cloud Monitoring alarm that associates an
existing rax_mon_entity, rax_mon_check, and rax_mon_notification_plan with
criteria that specify what conditions will trigger which levels of
notifications. Rackspace monitoring module flow | rax_mon_entity ->
rax_mon_check -> rax_mon_notification -> rax_mon_notification_plan ->
*rax_mon_alarm*
version_added: "2.0"
options:
state:
description:
- Ensure that the alarm with this C(label) exists or does not exist.
choices: [ "present", "absent" ]
required: false
default: present
label:
description:
- Friendly name for this alarm, used to achieve idempotence. Must be a String
between 1 and 255 characters long.
required: true
entity_id:
description:
- ID of the entity this alarm is attached to. May be acquired by registering
the value of a rax_mon_entity task.
required: true
check_id:
description:
- ID of the check that should be alerted on. May be acquired by registering
the value of a rax_mon_check task.
required: true
notification_plan_id:
description:
- ID of the notification plan to trigger if this alarm fires. May be acquired
by registering the value of a rax_mon_notification_plan task.
required: true
criteria:
description:
- Alarm DSL that describes alerting conditions and their output states. Must
be between 1 and 16384 characters long. See
http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/alerts-language.html
for a reference on the alerting language.
disabled:
description:
- If yes, create this alarm, but leave it in an inactive state. Defaults to
no.
choices: [ "yes", "no" ]
metadata:
description:
- Arbitrary key/value pairs to accompany the alarm. Must be a hash of String
keys and values between 1 and 255 characters long.
author: Ash Wilson
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Alarm example
gather_facts: False
hosts: local
connection: local
tasks:
- name: Ensure that a specific alarm exists.
rax_mon_alarm:
credentials: ~/.rax_pub
state: present
label: uhoh
entity_id: "{{ the_entity['entity']['id'] }}"
check_id: "{{ the_check['check']['id'] }}"
notification_plan_id: "{{ defcon1['notification_plan']['id'] }}"
criteria: >
if (rate(metric['average']) > 10) {
return new AlarmStatus(WARNING);
}
return new AlarmStatus(OK);
register: the_alarm
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
def alarm(module, state, label, entity_id, check_id, notification_plan_id, criteria,
disabled, metadata):
if len(label) < 1 or len(label) > 255:
module.fail_json(msg='label must be between 1 and 255 characters long')
if criteria and len(criteria) < 1 or len(criteria) > 16384:
module.fail_json(msg='criteria must be between 1 and 16384 characters long')
# Coerce attributes.
changed = False
alarm = None
cm = pyrax.cloud_monitoring
if not cm:
module.fail_json(msg='Failed to instantiate client. This typically '
'indicates an invalid region or an incorrectly '
'capitalized region name.')
existing = [a for a in cm.list_alarms(entity_id) if a.label == label]
if existing:
alarm = existing[0]
if state == 'present':
should_create = False
should_update = False
should_delete = False
if len(existing) > 1:
module.fail_json(msg='%s existing alarms have the label %s.' %
(len(existing), label))
if alarm:
if check_id != alarm.check_id or notification_plan_id != alarm.notification_plan_id:
should_delete = should_create = True
should_update = (disabled and disabled != alarm.disabled) or \
(metadata and metadata != alarm.metadata) or \
(criteria and criteria != alarm.criteria)
if should_update and not should_delete:
cm.update_alarm(entity=entity_id, alarm=alarm,
criteria=criteria, disabled=disabled,
label=label, metadata=metadata)
changed = True
if should_delete:
alarm.delete()
changed = True
else:
should_create = True
if should_create:
alarm = cm.create_alarm(entity=entity_id, check=check_id,
notification_plan=notification_plan_id,
criteria=criteria, disabled=disabled, label=label,
metadata=metadata)
changed = True
else:
for a in existing:
a.delete()
changed = True
if alarm:
alarm_dict = {
"id": alarm.id,
"label": alarm.label,
"check_id": alarm.check_id,
"notification_plan_id": alarm.notification_plan_id,
"criteria": alarm.criteria,
"disabled": alarm.disabled,
"metadata": alarm.metadata
}
module.exit_json(changed=changed, alarm=alarm_dict)
else:
module.exit_json(changed=changed)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent']),
label=dict(required=True),
entity_id=dict(required=True),
check_id=dict(required=True),
notification_plan_id=dict(required=True),
criteria=dict(),
disabled=dict(type='bool', default=False),
metadata=dict(type='dict')
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
state = module.params.get('state')
label = module.params.get('label')
entity_id = module.params.get('entity_id')
check_id = module.params.get('check_id')
notification_plan_id = module.params.get('notification_plan_id')
criteria = module.params.get('criteria')
disabled = module.boolean(module.params.get('disabled'))
metadata = module.params.get('metadata')
setup_rax_module(module, pyrax)
alarm(module, state, label, entity_id, check_id, notification_plan_id,
criteria, disabled, metadata)
if __name__ == '__main__':
main()
| 33.06278
| 96
| 0.637732
|
92d617e93d45792cb99e11516f4b4931f3b768c4
| 4,039
|
py
|
Python
|
oaff/app/oaff/app/data/sources/postgresql/stac_hybrid/postgresql_feature_set_provider.py
|
JBurkinshaw/ogc-api-fast-features
|
4fc6ba3cc4df1600450fe4c9f35320b00c69f158
|
[
"MIT"
] | 19
|
2021-07-06T16:35:27.000Z
|
2022-02-08T04:59:21.000Z
|
oaff/app/oaff/app/data/sources/postgresql/stac_hybrid/postgresql_feature_set_provider.py
|
JBurkinshaw/ogc-api-fast-features
|
4fc6ba3cc4df1600450fe4c9f35320b00c69f158
|
[
"MIT"
] | 30
|
2021-07-14T04:13:11.000Z
|
2021-11-22T20:45:15.000Z
|
oaff/app/oaff/app/data/sources/postgresql/stac_hybrid/postgresql_feature_set_provider.py
|
JBurkinshaw/ogc-api-fast-features
|
4fc6ba3cc4df1600450fe4c9f35320b00c69f158
|
[
"MIT"
] | 6
|
2021-07-06T16:35:28.000Z
|
2021-09-17T19:24:49.000Z
|
from json import dumps
from typing import Callable, Dict, Final, List
from uuid import uuid4
import sqlalchemy as sa
from databases.core import Database
from oaff.app.data.retrieval.feature_set_provider import FeatureSetProvider
from oaff.app.data.sources.postgresql.stac_hybrid.postgresql_layer import PostgresqlLayer
from oaff.app.responses.models.collection_items_html import CollectionItemsHtml
from oaff.app.responses.models.link import Link, PageLinkRel
from oaff.app.util import now_as_rfc3339
class PostgresqlFeatureSetProvider(FeatureSetProvider):
FEATURES_PLACEHOLDER: Final = str(uuid4())
def __init__(
self,
db: Database,
id_set: sa.sql.expression.Select,
layer: PostgresqlLayer,
total_count: int,
):
self.db = db
self.id_set = id_set
self.layer = layer
self.total_count = total_count
async def as_geojson(
self,
links: List[Link],
page_links_provider: Callable[[int, int], Dict[PageLinkRel, Link]],
) -> str:
rows = [
row[0]
for row in await self.db.fetch_all(
# fmt: off
sa.select([
sa.text(f"""
JSON_BUILD_OBJECT(
'type', 'Feature',
'id', source."{self.layer.unique_field_name}",
'geometry', ST_AsGeoJSON(
source."{self.layer.geometry_field_name}"
)::JSONB,
'properties', TO_JSONB(source) - '{
self.layer.unique_field_name
}' - '{
self.layer.geometry_field_name
}'
)
""")
])
.select_from(
self.layer.model.alias("source").join(
self.id_set,
self.layer.model.alias("source").c[self.layer.unique_field_name]
== self.id_set.c["id"],
)
)
# fmt: on
)
]
return dumps(
{
"type": "FeatureCollection",
"features": self.FEATURES_PLACEHOLDER,
"links": [
dict(link)
for link in links
+ list(page_links_provider(self.total_count, len(rows)).values())
],
"numberMatched": self.total_count,
"numberReturned": len(rows),
"timeStamp": now_as_rfc3339(),
}
).replace(f'"{self.FEATURES_PLACEHOLDER}"', f'[{",".join(rows)}]')
async def as_html_compatible(
self, links: List[Link], page_links_provider: Callable[[int, int], List[Link]]
) -> CollectionItemsHtml:
rows = [
dict(row)
for row in await self.db.fetch_all(
sa.select(
[
col
for col in self.layer.model.c
if col.name != self.layer.geometry_field_name
]
).select_from(
self.layer.model.join(
self.id_set,
self.layer.model.primary_key.columns[self.layer.unique_field_name]
== self.id_set.c["id"],
)
)
)
]
page_links = page_links_provider(self.total_count, len(rows))
return CollectionItemsHtml(
format_links=links,
next_link=page_links[PageLinkRel.NEXT]
if PageLinkRel.NEXT in page_links
else None,
prev_link=page_links[PageLinkRel.PREV]
if PageLinkRel.PREV in page_links
else None,
features=rows,
collection_id=self.layer.id,
unique_field_name=self.layer.unique_field_name,
)
| 35.121739
| 90
| 0.501114
|
2372e4419d469adb34ada0cc77dd3dd7d2ab7374
| 1,734
|
py
|
Python
|
standardlib.py
|
Syk326/PythonTest
|
ee17c134009f690c2c8045fce0e56699d1a81775
|
[
"Unlicense"
] | null | null | null |
standardlib.py
|
Syk326/PythonTest
|
ee17c134009f690c2c8045fce0e56699d1a81775
|
[
"Unlicense"
] | null | null | null |
standardlib.py
|
Syk326/PythonTest
|
ee17c134009f690c2c8045fce0e56699d1a81775
|
[
"Unlicense"
] | null | null | null |
# Python Standard Library: import modules @ top
import math # math module
# from module_name import object_name <- import individual function
from random import randrange
from datetime import datetime
from collections import defaultdict, namedtuple # import multiple objects
from csv import reader as csv # rename the module
import os.path # submodule
# DON'T: from module_name import * <- may overwrite your code accidentally, just do: import module_name
# DON'T: import os.path.isdir <- won't work to import function
# third-party packages: large programs may depend on dozens
import pytz # 3rd party library
# FIRST: pip3 install pytz <- pip comes with python3 (must be sudo)
"""
share large programs with requirements.txt:
Lists out a project's dependencies, each line has package name & optional ver. #
pip3 install -r requirements.txt <- install ALL dependencies
utc = pytz.utc # coordinated universal time
ist = pytz.timezone('Asia/Kalkata') # Indian Standard Time
now = datetime.datetime.now(tz=utc) # time in UTC
ist_now = now.astimezone(ist) # time in IST
"""
# use "random" package from standard library to complete
word_file = "words.txt"
word_list = []
#fill up the word_list
with open(word_file,'r') as words:
for line in words:
# remove white space and make everything lowercase
word = line.strip().lower()
# don't include words that are too long or too short
if 3 < len(word) < 8:
word_list.append(word)
def generate_password():
# Return a string consisting of three random words concatenated together without spaces
rand_string = ""
for i in range(3):
rand_num = randrange(len(word_list))
rand_string += word_list[rand_num]
return rand_string
print(generate_password())
| 38.533333
| 103
| 0.749135
|
45f096891383e10b64b3cecf4de511fc60e5e163
| 162
|
py
|
Python
|
example_snippets/multimenus_snippets/Snippets/SciPy/Integration and ODE solvers/Integrate given function object/General-purpose integration.py
|
kuanpern/jupyterlab-snippets-multimenus
|
477f51cfdbad7409eab45abe53cf774cd70f380c
|
[
"BSD-3-Clause"
] | null | null | null |
example_snippets/multimenus_snippets/Snippets/SciPy/Integration and ODE solvers/Integrate given function object/General-purpose integration.py
|
kuanpern/jupyterlab-snippets-multimenus
|
477f51cfdbad7409eab45abe53cf774cd70f380c
|
[
"BSD-3-Clause"
] | null | null | null |
example_snippets/multimenus_snippets/Snippets/SciPy/Integration and ODE solvers/Integrate given function object/General-purpose integration.py
|
kuanpern/jupyterlab-snippets-multimenus
|
477f51cfdbad7409eab45abe53cf774cd70f380c
|
[
"BSD-3-Clause"
] | 1
|
2021-02-04T04:51:48.000Z
|
2021-02-04T04:51:48.000Z
|
from scipy import integrate
def f(x, a, b):
return a * x + b
integral,error = integrate.quad(f, 0, 4.5, args=(2,1)) # integrates 2*x+1
print(integral, error)
| 32.4
| 74
| 0.660494
|
0882c859f188398fe00e106472ea2d98668ad8c7
| 4,648
|
py
|
Python
|
setupext/build_thunk.py
|
baztian/jpype
|
034d44e6c719995c25e9cd61348ebc1860030a9b
|
[
"Apache-2.0"
] | null | null | null |
setupext/build_thunk.py
|
baztian/jpype
|
034d44e6c719995c25e9cd61348ebc1860030a9b
|
[
"Apache-2.0"
] | null | null | null |
setupext/build_thunk.py
|
baztian/jpype
|
034d44e6c719995c25e9cd61348ebc1860030a9b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import print_function
import fnmatch
import os
import distutils.cmd
import distutils.log
import array
import sys
if (sys.version_info < (3, 0)):
import string
def translate(s, cfrom, cto):
return s.translate(string.maketrans(cfrom, cto))
else:
def translate(s, cfrom, cto):
return s.translate(str.maketrans(cfrom, cto))
# Python2/3 don't agree on how glob should work
def _glob(directory, pattern):
out = []
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, pattern):
out.append(os.path.join(root, filename))
return out
def output(fout, l):
print(" ", file=fout, end="")
line = []
buffer = array.array("B")
buffer.frombytes(l)
for i in buffer:
line.append("(jbyte)0x%02X" % i)
print(",".join(line), file=fout, end="")
def outputClass(srcfname, cname, fout):
f = open(srcfname, "rb")
chunk = 16
print("jbyte %s[] = {" % cname, file=fout)
sz = 0
while True:
l = f.read(chunk)
if len(l) == 0:
break
if sz > 0:
print(",", file=fout)
output(fout, l)
sz += len(l)
print(file=fout)
print("};", file=fout)
print("int %s_size = %d;" % (cname, sz), file=fout)
print(file=fout)
f.close()
def mkFileDir(filename):
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
def createThunks(input_dir, output_src, output_header, namespace="Thunk"):
mkFileDir(output_src)
mkFileDir(output_header)
# Write the header
with open(output_header, "w+") as fheader:
sz = len(input_dir)
guard = translate(output_header.upper(), '/\\.', '___')
print("#ifndef %s" % guard, file=fheader)
print("#define %s" % guard, file=fheader)
print("#include <jpype.h>", file=fheader)
print("namespace %s {" % namespace, file=fheader)
for filename in _glob(input_dir, "*.class"):
name = translate(filename, '/\\.', '___')[sz:-6]
print("extern jbyte %s[];" % name, file=fheader)
print("extern int %s_size;" % name, file=fheader)
for filename in _glob(input_dir, "*.jar"):
name = translate(filename, '/\\.', '___')[sz:-4]
print("extern jbyte %s[];" % name, file=fheader)
print("extern int %s_size;" % name, file=fheader)
print("}", file=fheader)
print("#endif", file=fheader)
# Write the body
with open(output_src, "w+") as fimpl:
sz = len(input_dir)
print("#include <jp_thunk.h>", file=fimpl)
print("namespace %s {" % namespace, file=fimpl)
for filename in _glob(input_dir, "*.class"):
print(" including thunk %s" % filename)
name = translate(filename, '/\\.', '___')[sz:-6]
outputClass(filename, name, fimpl)
for filename in _glob(input_dir, "*.jar"):
print(" including thunk %s" % filename)
name = translate(filename, '/\\.', '___')[sz:-4]
outputClass(filename, name, fimpl)
print("}", file=fimpl)
class BuildThunkCommand(distutils.cmd.Command):
"""A custom command to create thunk file."""
description = 'build dynamic code thunks'
user_options = [
]
def initialize_options(self):
"""Set default values for options."""
pass
def finalize_options(self):
"""Post-process options."""
pass
def run(self):
"""Run command."""
self.run_command("build_java")
self.announce(
'Building thunks',
level=distutils.log.INFO)
# run short circuit logic here
srcDir = os.path.join("build", "lib")
destBody = os.path.join("build", "src", "jp_thunk.cpp")
destHeader = os.path.join("build", "src", "jp_thunk.h")
if os.path.isfile(destBody):
t1 = os.path.getctime(destBody)
update = False
for filename in _glob(srcDir, "*.class"):
if t1 < os.path.getctime(filename):
update = True
if not update:
self.announce(
'Skip build thunks',
level=distutils.log.INFO)
return
# do the build
createThunks(
srcDir,
destBody,
destHeader,
namespace="JPThunk")
| 30.181818
| 74
| 0.560671
|
6e92d3ad27831eac62990bc82dd6ce3dd043d5b9
| 378
|
py
|
Python
|
src/UQpy/surrogates/polynomial_chaos/regressions/__init__.py
|
SURGroup/UncertaintyQuantification
|
a94c8db47d07134ea2b3b0a3ca53ca818532c3e6
|
[
"MIT"
] | null | null | null |
src/UQpy/surrogates/polynomial_chaos/regressions/__init__.py
|
SURGroup/UncertaintyQuantification
|
a94c8db47d07134ea2b3b0a3ca53ca818532c3e6
|
[
"MIT"
] | null | null | null |
src/UQpy/surrogates/polynomial_chaos/regressions/__init__.py
|
SURGroup/UncertaintyQuantification
|
a94c8db47d07134ea2b3b0a3ca53ca818532c3e6
|
[
"MIT"
] | null | null | null |
from UQpy.surrogates.polynomial_chaos.regressions.LassoRegression import LassoRegression
from UQpy.surrogates.polynomial_chaos.regressions.LeastSquareRegression import LeastSquareRegression
from UQpy.surrogates.polynomial_chaos.regressions.RidgeRegression import RidgeRegression
from UQpy.surrogates.polynomial_chaos.regressions.LeastAngleRegression import LeastAngleRegression
| 75.6
| 100
| 0.915344
|
558052a89c3e90cf679bdd77194598ec39e9bf6f
| 1,896
|
py
|
Python
|
framework/UI/__init__.py
|
milljm/raven
|
5f29fe81b75e2ffbeb54a55aa63647e7b2f6457b
|
[
"Apache-2.0"
] | 2
|
2019-10-11T15:59:10.000Z
|
2021-04-08T18:23:57.000Z
|
framework/UI/__init__.py
|
milljm/raven
|
5f29fe81b75e2ffbeb54a55aa63647e7b2f6457b
|
[
"Apache-2.0"
] | 1
|
2018-03-27T13:06:00.000Z
|
2018-03-27T13:06:00.000Z
|
framework/UI/__init__.py
|
milljm/raven
|
5f29fe81b75e2ffbeb54a55aa63647e7b2f6457b
|
[
"Apache-2.0"
] | 1
|
2017-08-29T16:09:13.000Z
|
2017-08-29T16:09:13.000Z
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The UI module includes the different user interfaces available within RAVEN.
Created on November 30, 2016
@author: maljdp
"""
from __future__ import absolute_import
## These lines ensure that we do not have to do something like:
## 'from UI.Window import Window' outside
## of this submodule
from .ZoomableGraphicsView import ZoomableGraphicsView
from .BaseHierarchicalView import BaseHierarchicalView
from .DendrogramView import DendrogramView
from . import colors
from .FitnessView import FitnessView
from .ScatterView2D import ScatterView2D
from .ScatterView3D import ScatterView3D
from .SensitivityView import SensitivityView
from .TopologyMapView import TopologyMapView
from .HierarchyWindow import HierarchyWindow
from .TopologyWindow import TopologyWindow
## As these are not exposed to the user, we do not need a factory to dynamically
## allocate them. They will be explicitly called when needed everywhere in the
## code.
# from .Factory import knownTypes
# from .Factory import returnInstance
# from .Factory import returnClass
# We should not really need this as we do not use wildcard imports
__all__ = ['colors', 'HierarchylWindow', 'DendrogramView',
'TopologyWindow', 'FitnessView', 'ScatterView2D',
'ScatterView3D', 'SensitivityView', 'TopologyMapView']
| 38.693878
| 80
| 0.785338
|
5c7ca060b06358bce823fc0f6b1e1f2f3704b0f8
| 9,534
|
py
|
Python
|
docs/conf.py
|
cfpb/regulations-xml-parser
|
e3bcbd9025f6fb6fa9ef2671fb8ed061c8de3e88
|
[
"CC0-1.0"
] | 4
|
2016-01-02T21:04:42.000Z
|
2019-08-17T06:30:36.000Z
|
docs/conf.py
|
DalavanCloud/regulations-xml-parser
|
e3bcbd9025f6fb6fa9ef2671fb8ed061c8de3e88
|
[
"CC0-1.0"
] | 49
|
2016-01-25T15:19:04.000Z
|
2017-12-06T20:02:09.000Z
|
docs/conf.py
|
DalavanCloud/regulations-xml-parser
|
e3bcbd9025f6fb6fa9ef2671fb8ed061c8de3e88
|
[
"CC0-1.0"
] | 9
|
2016-01-21T19:25:30.000Z
|
2021-02-20T10:53:47.000Z
|
# -*- coding: utf-8 -*-
#
# regulations-xml-parser documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 22 23:05:44 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'regulations-xml-parser'
copyright = u'2016, Will Barton, Adam Scott, Hillary Jeffrey, Jerry Vinokurov'
author = u'Will Barton, Adam Scott, Hillary Jeffrey, Jerry Vinokurov'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'regulations-xml-parserdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'regulations-xml-parser.tex', u'regulations-xml-parser Documentation',
u'Will Barton, Adam Scott, Hillary Jeffrey, Jerry Vinokurov', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'regulations-xml-parser', u'regulations-xml-parser Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'regulations-xml-parser', u'regulations-xml-parser Documentation',
author, 'regulations-xml-parser', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 33.335664
| 87
| 0.722152
|
e43fdcf4ecdc4000f4b5c366fbffdd292c3120fc
| 10,005
|
py
|
Python
|
pele_platform/Utilities/Helpers/metal_constraints.py
|
TheKipiDragon/pele_platform
|
bb33fb69741685d423bdccda4ed104fd0b70ed5b
|
[
"Apache-2.0"
] | null | null | null |
pele_platform/Utilities/Helpers/metal_constraints.py
|
TheKipiDragon/pele_platform
|
bb33fb69741685d423bdccda4ed104fd0b70ed5b
|
[
"Apache-2.0"
] | null | null | null |
pele_platform/Utilities/Helpers/metal_constraints.py
|
TheKipiDragon/pele_platform
|
bb33fb69741685d423bdccda4ed104fd0b70ed5b
|
[
"Apache-2.0"
] | null | null | null |
import pele_platform.constants.constants as cs
import pele_platform.Errors.custom_errors as ce
from Bio.PDB import PDBParser, NeighborSearch, Selection, Vector, vectors
import itertools
import numpy as np
def find_metals(protein_file):
# read in the protein file
parser = PDBParser()
structure = parser.get_structure("protein", protein_file)
# find metals
metals = []
for chain in structure.get_chains():
for residue in chain.get_residues():
for atom in residue.get_atoms():
if atom.element in cs.metals:
metals.append([atom, residue, chain])
return metals, structure
def map_constraints(protein_file, original_input, original_constraints):
atoms = []
new_atoms = []
# get lines from actual input
with open(protein_file, "r") as input_file:
input_lines = input_file.readlines()
# get constraints coords from original input file
with open(original_input, "r") as file:
lines = file.readlines()
for orig in original_constraints:
try:
k, dist, atom1, atom2 = orig.split("-")
except ValueError: #If more than one atom
continue
atoms.extend([atom1, atom2])
for atom in atoms:
chain, resnum, atom_name = atom.split(":")
for line in lines:
if (line.startswith("HETATM") or line.startswith("ATOM")) and line[21].strip() == chain.strip() and line[22:26].strip() == resnum.strip() and line[12:16].strip() == atom_name.strip():
coords = line[30:54].split()
for l in input_lines:
if l[30:54].split() == coords:
new_atom_name = l[12:16].strip()
new_resnum = l[22:26].strip()
new_chain = l[21].strip()
new_atoms.append([chain, new_chain, resnum, new_resnum, atom_name, new_atom_name])
before = ["{}:{}:{}".format(i[0],i[2],i[4]) for i in new_atoms]
after = ["{}:{}:{}".format(i[1], i[3], i[5]) for i in new_atoms]
for j in range(len(original_constraints)):
for b, a in zip(before, after):
original_constraints[j] = original_constraints[j].replace(b, a)
return original_constraints
def angle_classification(combinations, permissive):
# angle classification
ang_180 = []
ang_90 = []
ang_109 = []
coordinated_atoms = []
if permissive:
lower = 0.65
upper = 1.35
else:
lower = 0.8
upper = 1.2
for c in combinations:
a = c[2]
if 180 * lower <= a <= 180 * upper:
ang_180.append(c)
if 90 * lower <= a <= 90 * upper:
ang_90.append(c)
if 109.5 * lower <= a <= 109.5 * upper:
ang_109.append(c)
# check geometries
if len(ang_180) == 3 and len(ang_90) == 12:
geo = "octahedral"
coordinated_atoms.extend(ang_180)
coordinated_atoms.extend(ang_90)
elif len(ang_180) == 2 and len(ang_90) == 4:
geo = "square planar"
coordinated_atoms.extend(ang_180)
coordinated_atoms.extend(ang_90)
elif len(ang_109) == 6:
geo = "tetrahedral"
coordinated_atoms.extend(ang_109)
else:
geo = None
return geo, coordinated_atoms
def find_geometry(metals, structure, permissive=False, all_metals=False, external=None):
# check metal contacts
output = []
checked_metals = []
structure_list = Selection.unfold_entities(structure, "A")
for metal in metals:
# search distance based on metal type
if metal[0].element == "YB":
dist = 3.5
elif metal[0].element == "K":
dist = 3.3
else:
dist = 2.9
metal_str = "{}:{}:{}".format(metal[2].id, metal[1].get_id()[1], metal[0].name)
in_ext = []
for i in external:
if metal_str in i:
in_ext = True
if not in_ext and list(metal[0].coord) not in checked_metals:
coords = metal[0].coord
contacts = []
for chain in structure.get_chains():
for residue in chain.get_residues():
contacts_atoms = NeighborSearch(structure_list).search(coords, dist, "A")
# exclude self-contacts, carbons and hydrogens
excluded_contacts = cs.metals + ['C', 'H']
contacts_atoms = [c for c in contacts_atoms if c.element not in excluded_contacts]
for atom in contacts_atoms:
if residue in chain.get_residues() and atom in residue.get_atoms():
contacts.append([atom, residue, chain])
combinations = list(itertools.combinations(contacts, 2))
combinations = [list(c) for c in combinations]
# get all atom - metal - atom angles
for c in combinations:
vi = Vector(c[0][0].coord)
vj = Vector(c[1][0].coord)
angle = vectors.calc_angle(vi, coords, vj) * 180 / np.pi
c.append(angle)
geo, coordinated_atoms = angle_classification(combinations, False)
if geo is None and permissive:
geo, coordinated_atoms = angle_classification(combinations, True)
if geo is None and all_metals and combinations:
geo, coordinated_atoms = angle_classification(combinations, True)
if geo:
print("Found {} geometry around {} (residue {}). Adding constraints.".format(geo, metal[0].name, metal[1].get_id()[1]))
checked_metals.append(list(metal[0].coord))
else:
coordinated_atoms = combinations
checked_metals.append(list(metal[0].coord))
geo = "no"
print("Found {} geometry around {} (residue {}). Adding constraints to all atoms within {}A of the metal.".format(geo, metal[0].name, metal[1].get_id()[1], dist))
elif geo is None and not all_metals:
raise ce.NoGeometryAroundMetal("Failed to determine geometry around {} (residue {}). Add constraints manually or set 'constrain_all_metals: true' to constrain all atoms within {}A of the metal.".format(metal[0].name, metal[1].get_id()[1], dist))
elif geo is None and all_metals and not combinations:
print("No atoms coordinated to {} (residue {}).".format(metal[0].name, metal[1].get_id()[1]))
elif geo:
checked_metals.append(list(metal[0].coord))
print("Found {} geometry around {} (residue {}). Adding constraints.".format(geo, metal[0].name, metal[1].get_id()[1]))
elif geo is None and all_metals and combinations:
geo, coordinated_atoms = angle_classification(combinations, True)
if geo is None:
geo = "no"
coordinated_atoms = combinations
checked_metals.append(list(metal[0].coord))
print("Found {} geometry around {} (residue {}). Adding constraints to all atoms within {}A of the metal.".format(geo, metal[0].name, metal[1].get_id()[1], dist))
else:
print("Found {} geometry around {} (residue {}). Adding constraints.".format(geo, metal[0].name, metal[1].get_id()[1]))
elif geo is None and all_metals and not combinations:
print("No atoms coordinated to {} (residue {}).".format(metal[0].name, metal[1].get_id()[1]))
elif geo is None and not all_metals and not permissive:
raise ce.NoGeometryAroundMetal("Failed to determine geometry around {} (residue {}). Add constraints manually or set 'constrain_all_metals: true' to constrain all atoms within {}A of the metal.".format(metal[0].name, metal[1].get_id()[1], dist))
else:
checked_metals.append(list(metal[0].coord))
print("Found {} geometry around {} (residue {}). Adding constraints.".format(geo, metal[0].name,
metal[1].get_id()[1]))
# format string
yaml_string = "{}-{}-{}:{}:{}-{}:{}:{}"
spring_const = 50
string_atoms = []
for c in coordinated_atoms:
atom1, atom2, angle = c
if atom1 not in string_atoms:
string_atoms.append(atom1)
if atom2 not in string_atoms:
string_atoms.append(atom2)
for atom in string_atoms:
atomname1 = atom[0].name
resnum1 = atom[1].get_id()[1]
chain1 = atom[2].get_id()
atomname2 = metal[0].name
resnum2 = metal[1].get_id()[1]
chain2 = metal[2].get_id()
atom_dist = atom[0] - metal[0]
out = yaml_string.format(spring_const, atom_dist, chain1, resnum1, atomname1, chain2, resnum2,
atomname2)
output.append(out)
output = list(set(output))
if output:
output = ['{}'.format(o) for o in output]
return output
def main(original_constraints, protein_file, original_input, permissive=False, all_metals=False, external=None):
metals, structure = find_metals(protein_file)
if external:
external = map_constraints(protein_file, original_input, original_constraints)
output = find_geometry(metals, structure, permissive, all_metals, external)
return output, external
| 39.702381
| 265
| 0.554023
|
e9b2f6fca7ea1a256f468c2d4fe8c7fdf2a326ae
| 67,547
|
py
|
Python
|
snakemake/workflow.py
|
LvKvA/snakemake
|
5d4e7d8c4d7901c41bfb8f01c4b2c6551add59f7
|
[
"MIT"
] | null | null | null |
snakemake/workflow.py
|
LvKvA/snakemake
|
5d4e7d8c4d7901c41bfb8f01c4b2c6551add59f7
|
[
"MIT"
] | null | null | null |
snakemake/workflow.py
|
LvKvA/snakemake
|
5d4e7d8c4d7901c41bfb8f01c4b2c6551add59f7
|
[
"MIT"
] | null | null | null |
__author__ = "Johannes Köster"
__copyright__ = "Copyright 2022, Johannes Köster"
__email__ = "johannes.koester@uni-due.de"
__license__ = "MIT"
import re
import os
import sys
import signal
import json
from tokenize import maybe
import urllib
from collections import OrderedDict, namedtuple
from itertools import filterfalse, chain
from functools import partial
from operator import attrgetter
import copy
import subprocess
from pathlib import Path
from urllib.request import pathname2url, url2pathname
from snakemake.logging import logger, format_resources, format_resource_names
from snakemake.rules import Rule, Ruleorder, RuleProxy
from snakemake.exceptions import (
CreateCondaEnvironmentException,
RuleException,
CreateRuleException,
UnknownRuleException,
NoRulesException,
print_exception,
WorkflowError,
)
from snakemake.shell import shell
from snakemake.dag import DAG
from snakemake.scheduler import JobScheduler
from snakemake.parser import parse
import snakemake.io
from snakemake.io import (
protected,
temp,
temporary,
ancient,
directory,
expand,
dynamic,
glob_wildcards,
flag,
not_iterable,
touch,
unpack,
local,
pipe,
service,
repeat,
report,
multiext,
IOFile,
)
from snakemake.persistence import Persistence
from snakemake.utils import update_config
from snakemake.script import script
from snakemake.notebook import notebook
from snakemake.wrapper import wrapper
from snakemake.cwl import cwl
from snakemake.template_rendering import render_template
import snakemake.wrapper
from snakemake.common import (
Mode,
bytesto,
ON_WINDOWS,
is_local_file,
parse_uri,
Rules,
Scatter,
Gather,
smart_join,
NOTHING_TO_BE_DONE_MSG,
)
from snakemake.utils import simplify_path
from snakemake.checkpoints import Checkpoint, Checkpoints
from snakemake.resources import DefaultResources
from snakemake.caching.local import OutputFileCache as LocalOutputFileCache
from snakemake.caching.remote import OutputFileCache as RemoteOutputFileCache
from snakemake.modules import ModuleInfo, WorkflowModifier, get_name_modifier_func
from snakemake.ruleinfo import RuleInfo
from snakemake.sourcecache import (
GenericSourceFile,
LocalSourceFile,
SourceCache,
SourceFile,
infer_source_file,
)
from snakemake.deployment.conda import Conda, is_conda_env_file
from snakemake import sourcecache
class Workflow:
def __init__(
self,
snakefile=None,
jobscript=None,
overwrite_shellcmd=None,
overwrite_config=None,
overwrite_workdir=None,
overwrite_configfiles=None,
overwrite_clusterconfig=None,
overwrite_threads=None,
overwrite_scatter=None,
overwrite_groups=None,
overwrite_resources=None,
group_components=None,
config_args=None,
debug=False,
verbose=False,
use_conda=False,
conda_frontend=None,
conda_prefix=None,
use_singularity=False,
use_env_modules=False,
singularity_prefix=None,
singularity_args="",
shadow_prefix=None,
scheduler_type="ilp",
scheduler_ilp_solver=None,
mode=Mode.default,
wrapper_prefix=None,
printshellcmds=False,
restart_times=None,
attempt=1,
default_remote_provider=None,
default_remote_prefix="",
run_local=True,
default_resources=None,
cache=None,
nodes=1,
cores=1,
resources=None,
conda_cleanup_pkgs=None,
edit_notebook=False,
envvars=None,
max_inventory_wait_time=20,
conda_not_block_search_path_envvars=False,
execute_subworkflows=True,
scheduler_solver_path=None,
conda_base_path=None,
check_envvars=True,
max_threads=None,
all_temp=False,
local_groupid="local",
):
"""
Create the controller.
"""
self.global_resources = dict() if resources is None else resources
self.global_resources["_cores"] = cores
self.global_resources["_nodes"] = nodes
self._rules = OrderedDict()
self.default_target = None
self._workdir = None
self.overwrite_workdir = overwrite_workdir
self.workdir_init = os.path.abspath(os.curdir)
self._ruleorder = Ruleorder()
self._localrules = set()
self.linemaps = dict()
self.rule_count = 0
self.basedir = os.path.dirname(snakefile)
self.main_snakefile = os.path.abspath(snakefile)
self.included = []
self.included_stack = []
self.jobscript = jobscript
self.persistence = None
self._subworkflows = dict()
self.overwrite_shellcmd = overwrite_shellcmd
self.overwrite_config = overwrite_config or dict()
self.overwrite_configfiles = overwrite_configfiles
self.overwrite_clusterconfig = overwrite_clusterconfig or dict()
self.overwrite_threads = overwrite_threads or dict()
self.overwrite_resources = overwrite_resources or dict()
self.config_args = config_args
self.immediate_submit = None
self._onsuccess = lambda log: None
self._onerror = lambda log: None
self._onstart = lambda log: None
self._wildcard_constraints = dict()
self.debug = debug
self.verbose = verbose
self._rulecount = 0
self.use_conda = use_conda
self.conda_frontend = conda_frontend
self.conda_prefix = conda_prefix
self.use_singularity = use_singularity
self.use_env_modules = use_env_modules
self.singularity_prefix = singularity_prefix
self.singularity_args = singularity_args
self.shadow_prefix = shadow_prefix
self.scheduler_type = scheduler_type
self.scheduler_ilp_solver = scheduler_ilp_solver
self.global_container_img = None
self.global_is_containerized = False
self.mode = mode
self.wrapper_prefix = wrapper_prefix
self.printshellcmds = printshellcmds
self.restart_times = restart_times
self.attempt = attempt
self.default_remote_provider = default_remote_provider
self.default_remote_prefix = default_remote_prefix
self.configfiles = (
[] if overwrite_configfiles is None else list(overwrite_configfiles)
)
self.run_local = run_local
self.report_text = None
self.conda_cleanup_pkgs = conda_cleanup_pkgs
self.edit_notebook = edit_notebook
# environment variables to pass to jobs
# These are defined via the "envvars:" syntax in the Snakefile itself
self.envvars = set()
self.overwrite_groups = overwrite_groups or dict()
self.group_components = group_components or dict()
self._scatter = dict(overwrite_scatter or dict())
self.overwrite_scatter = overwrite_scatter or dict()
self.conda_not_block_search_path_envvars = conda_not_block_search_path_envvars
self.execute_subworkflows = execute_subworkflows
self.modules = dict()
self.sourcecache = SourceCache()
self.scheduler_solver_path = scheduler_solver_path
self._conda_base_path = conda_base_path
self.check_envvars = check_envvars
self.max_threads = max_threads
self.all_temp = all_temp
self.scheduler = None
self.local_groupid = local_groupid
_globals = globals()
_globals["workflow"] = self
_globals["cluster_config"] = copy.deepcopy(self.overwrite_clusterconfig)
_globals["rules"] = Rules()
_globals["checkpoints"] = Checkpoints()
_globals["scatter"] = Scatter()
_globals["gather"] = Gather()
_globals["github"] = sourcecache.GithubFile
_globals["gitlab"] = sourcecache.GitlabFile
self.vanilla_globals = dict(_globals)
self.modifier_stack = [WorkflowModifier(self, globals=_globals)]
self.enable_cache = False
if cache is not None:
self.enable_cache = True
self.cache_rules = set(cache)
if self.default_remote_provider is not None:
self.output_file_cache = RemoteOutputFileCache(
self.default_remote_provider
)
else:
self.output_file_cache = LocalOutputFileCache()
else:
self.output_file_cache = None
self.cache_rules = set()
if default_resources is not None:
self.default_resources = default_resources
else:
# only _cores, _nodes, and _tmpdir
self.default_resources = DefaultResources(mode="bare")
self.iocache = snakemake.io.IOCache(max_inventory_wait_time)
self.globals["config"] = copy.deepcopy(self.overwrite_config)
if envvars is not None:
self.register_envvars(*envvars)
@property
def conda_base_path(self):
if self._conda_base_path:
return self._conda_base_path
if self.use_conda:
try:
return Conda().prefix_path
except CreateCondaEnvironmentException as e:
# Return no preset conda base path now and report error later in jobs.
return None
else:
return None
@property
def modifier(self):
return self.modifier_stack[-1]
@property
def globals(self):
return self.modifier.globals
def lint(self, json=False):
from snakemake.linting.rules import RuleLinter
from snakemake.linting.snakefiles import SnakefileLinter
json_snakefile_lints, snakefile_linted = SnakefileLinter(
self, self.included
).lint(json=json)
json_rule_lints, rules_linted = RuleLinter(self, self.rules).lint(json=json)
linted = snakefile_linted or rules_linted
if json:
import json
print(
json.dumps(
{"snakefiles": json_snakefile_lints, "rules": json_rule_lints},
indent=2,
)
)
else:
if not linted:
logger.info("Congratulations, your workflow is in a good condition!")
return linted
def is_cached_rule(self, rule: Rule):
return rule.name in self.cache_rules
def get_sources(self):
files = set()
def local_path(f):
if not isinstance(f, SourceFile) and is_local_file(f):
return f
if isinstance(f, LocalSourceFile):
return f.get_path_or_uri()
def norm_rule_relpath(f, rule):
if not os.path.isabs(f):
f = os.path.join(rule.basedir, f)
return os.path.relpath(f)
# get registered sources
for f in self.included:
f = local_path(f)
if f:
try:
f = os.path.relpath(f)
except ValueError:
if ON_WINDOWS:
pass # relpath doesn't work on win if files are on different drive
else:
raise
files.add(f)
for rule in self.rules:
script_path = rule.script or rule.notebook
if script_path:
script_path = norm_rule_relpath(script_path, rule)
files.add(script_path)
script_dir = os.path.dirname(script_path)
files.update(
os.path.join(dirpath, f)
for dirpath, _, files in os.walk(script_dir)
for f in files
)
if rule.conda_env and rule.conda_env.is_file:
f = local_path(rule.conda_env.file)
if f:
# url points to a local env file
env_path = norm_rule_relpath(f, rule)
files.add(env_path)
for f in self.configfiles:
files.add(f)
# get git-managed files
# TODO allow a manifest file as alternative
try:
out = subprocess.check_output(
["git", "ls-files", "--recurse-submodules", "."], stderr=subprocess.PIPE
)
for f in out.decode().split("\n"):
if f:
files.add(os.path.relpath(f))
except subprocess.CalledProcessError as e:
if "fatal: not a git repository" in e.stderr.decode().lower():
logger.warning(
"Unable to retrieve additional files from git. "
"This is not a git repository."
)
else:
raise WorkflowError(
"Error executing git:\n{}".format(e.stderr.decode())
)
return files
def check_source_sizes(self, filename, warning_size_gb=0.2):
"""A helper function to check the filesize, and return the file
to the calling function Additionally, given that we encourage these
packages to be small, we set a warning at 200MB (0.2GB).
"""
gb = bytesto(os.stat(filename).st_size, "g")
if gb > warning_size_gb:
logger.warning(
"File {} (size {} GB) is greater than the {} GB suggested size "
"Consider uploading larger files to storage first.".format(
filename, gb, warning_size_gb
)
)
return filename
@property
def subworkflows(self):
return self._subworkflows.values()
@property
def rules(self):
return self._rules.values()
@property
def cores(self):
if self._cores is None:
raise WorkflowError(
"Workflow requires a total number of cores to be defined (e.g. because a "
"rule defines its number of threads as a fraction of a total number of cores). "
"Please set it with --cores N with N being the desired number of cores. "
"Consider to use this in combination with --max-threads to avoid "
"jobs with too many threads for your setup. Also make sure to perform "
"a dryrun first."
)
return self._cores
@property
def _cores(self):
return self.global_resources["_cores"]
@property
def nodes(self):
return self.global_resources["_nodes"]
@property
def concrete_files(self):
return (
file
for rule in self.rules
for file in chain(rule.input, rule.output)
if not callable(file) and not file.contains_wildcard()
)
def check(self):
for clause in self._ruleorder:
for rulename in clause:
if not self.is_rule(rulename):
raise UnknownRuleException(
rulename, prefix="Error in ruleorder definition."
)
def add_rule(
self,
name=None,
lineno=None,
snakefile=None,
checkpoint=False,
allow_overwrite=False,
):
"""
Add a rule.
"""
is_overwrite = self.is_rule(name)
if not allow_overwrite and is_overwrite:
raise CreateRuleException(
"The name {} is already used by another rule".format(name)
)
rule = Rule(name, self, lineno=lineno, snakefile=snakefile)
self._rules[rule.name] = rule
if not is_overwrite:
self.rule_count += 1
if not self.default_target:
self.default_target = rule.name
return name
def is_rule(self, name):
"""
Return True if name is the name of a rule.
Arguments
name -- a name
"""
return name in self._rules
def get_rule(self, name):
"""
Get rule by name.
Arguments
name -- the name of the rule
"""
if not self._rules:
raise NoRulesException()
if not name in self._rules:
raise UnknownRuleException(name)
return self._rules[name]
def list_rules(self, only_targets=False):
rules = self.rules
if only_targets:
rules = filterfalse(Rule.has_wildcards, rules)
for rule in rules:
logger.rule_info(name=rule.name, docstring=rule.docstring)
def list_resources(self):
for resource in set(
resource for rule in self.rules for resource in rule.resources
):
if resource not in "_cores _nodes".split():
logger.info(resource)
def is_local(self, rule):
return rule.group is None and (
rule.name in self._localrules or rule.norun or rule.is_template_engine
)
def check_localrules(self):
undefined = self._localrules - set(rule.name for rule in self.rules)
if undefined:
logger.warning(
"localrules directive specifies rules that are not "
"present in the Snakefile:\n{}\n".format(
"\n".join(map("\t{}".format, undefined))
)
)
def inputfile(self, path):
"""Mark file as being an input file of the workflow.
This also means that eventual --default-remote-provider/prefix settings
will be applied to this file. The file is returned as _IOFile object,
such that it can e.g. be transparently opened with _IOFile.open().
"""
if isinstance(path, Path):
path = str(path)
if self.default_remote_provider is not None:
path = self.modifier.modify_path(path)
return IOFile(path)
def execute(
self,
targets=None,
dryrun=False,
generate_unit_tests=None,
touch=False,
scheduler_type=None,
scheduler_ilp_solver=None,
local_cores=1,
forcetargets=False,
forceall=False,
forcerun=None,
until=[],
omit_from=[],
prioritytargets=None,
quiet=False,
keepgoing=False,
printshellcmds=False,
printreason=False,
printdag=False,
cluster=None,
cluster_sync=None,
jobname=None,
immediate_submit=False,
ignore_ambiguity=False,
printrulegraph=False,
printfilegraph=False,
printd3dag=False,
drmaa=None,
drmaa_log_dir=None,
kubernetes=None,
tibanna=None,
tibanna_sfn=None,
google_lifesciences=None,
google_lifesciences_regions=None,
google_lifesciences_location=None,
google_lifesciences_cache=False,
tes=None,
precommand="",
preemption_default=None,
preemptible_rules=None,
tibanna_config=False,
container_image=None,
stats=None,
force_incomplete=False,
ignore_incomplete=False,
list_version_changes=False,
list_code_changes=False,
list_input_changes=False,
list_params_changes=False,
list_untracked=False,
list_conda_envs=False,
summary=False,
archive=None,
delete_all_output=False,
delete_temp_output=False,
detailed_summary=False,
latency_wait=3,
wait_for_files=None,
nolock=False,
unlock=False,
notemp=False,
nodeps=False,
cleanup_metadata=None,
conda_cleanup_envs=False,
cleanup_shadow=False,
cleanup_scripts=True,
subsnakemake=None,
updated_files=None,
keep_target_files=False,
keep_shadow=False,
keep_remote_local=False,
allowed_rules=None,
max_jobs_per_second=None,
max_status_checks_per_second=None,
greediness=1.0,
no_hooks=False,
force_use_threads=False,
conda_create_envs_only=False,
assume_shared_fs=True,
cluster_status=None,
cluster_cancel=None,
cluster_cancel_nargs=None,
cluster_sidecar=None,
report=None,
report_stylesheet=None,
export_cwl=False,
batch=None,
keepincomplete=False,
keepmetadata=True,
):
self.check_localrules()
self.immediate_submit = immediate_submit
self.cleanup_scripts = cleanup_scripts
def rules(items):
return map(self._rules.__getitem__, filter(self.is_rule, items))
if keep_target_files:
def files(items):
return filterfalse(self.is_rule, items)
else:
def files(items):
relpath = (
lambda f: f
if os.path.isabs(f) or f.startswith("root://")
else os.path.relpath(f)
)
return map(relpath, filterfalse(self.is_rule, items))
if not targets:
targets = (
[self.default_target] if self.default_target is not None else list()
)
if prioritytargets is None:
prioritytargets = list()
if forcerun is None:
forcerun = list()
if until is None:
until = list()
if omit_from is None:
omit_from = list()
priorityrules = set(rules(prioritytargets))
priorityfiles = set(files(prioritytargets))
forcerules = set(rules(forcerun))
forcefiles = set(files(forcerun))
untilrules = set(rules(until))
untilfiles = set(files(until))
omitrules = set(rules(omit_from))
omitfiles = set(files(omit_from))
targetrules = set(
chain(
rules(targets),
filterfalse(Rule.has_wildcards, priorityrules),
filterfalse(Rule.has_wildcards, forcerules),
filterfalse(Rule.has_wildcards, untilrules),
)
)
targetfiles = set(chain(files(targets), priorityfiles, forcefiles, untilfiles))
if ON_WINDOWS:
targetfiles = set(tf.replace(os.sep, os.altsep) for tf in targetfiles)
if forcetargets:
forcefiles.update(targetfiles)
forcerules.update(targetrules)
rules = self.rules
if allowed_rules:
allowed_rules = set(allowed_rules)
rules = [rule for rule in rules if rule.name in allowed_rules]
if wait_for_files is not None:
try:
snakemake.io.wait_for_files(wait_for_files, latency_wait=latency_wait)
except IOError as e:
logger.error(str(e))
return False
dag = DAG(
self,
rules,
dryrun=dryrun,
targetfiles=targetfiles,
targetrules=targetrules,
# when cleaning up conda, we should enforce all possible jobs
# since their envs shall not be deleted
forceall=forceall or conda_cleanup_envs,
forcefiles=forcefiles,
forcerules=forcerules,
priorityfiles=priorityfiles,
priorityrules=priorityrules,
untilfiles=untilfiles,
untilrules=untilrules,
omitfiles=omitfiles,
omitrules=omitrules,
ignore_ambiguity=ignore_ambiguity,
force_incomplete=force_incomplete,
ignore_incomplete=ignore_incomplete
or printdag
or printrulegraph
or printfilegraph,
notemp=notemp,
keep_remote_local=keep_remote_local,
batch=batch,
)
self.persistence = Persistence(
nolock=nolock,
dag=dag,
conda_prefix=self.conda_prefix,
singularity_prefix=self.singularity_prefix,
shadow_prefix=self.shadow_prefix,
warn_only=dryrun
or printrulegraph
or printfilegraph
or printdag
or summary
or archive
or list_version_changes
or list_code_changes
or list_input_changes
or list_params_changes
or list_untracked
or delete_all_output
or delete_temp_output,
)
if self.mode in [Mode.subprocess, Mode.cluster]:
self.persistence.deactivate_cache()
if cleanup_metadata:
for f in cleanup_metadata:
self.persistence.cleanup_metadata(f)
return True
if unlock:
try:
self.persistence.cleanup_locks()
logger.info("Unlocking working directory.")
return True
except IOError:
logger.error(
"Error: Unlocking the directory {} failed. Maybe "
"you don't have the permissions?"
)
return False
logger.info("Building DAG of jobs...")
dag.init()
dag.update_checkpoint_dependencies()
dag.check_dynamic()
try:
self.persistence.lock()
except IOError:
logger.error(
"Error: Directory cannot be locked. Please make "
"sure that no other Snakemake process is trying to create "
"the same files in the following directory:\n{}\n"
"If you are sure that no other "
"instances of snakemake are running on this directory, "
"the remaining lock was likely caused by a kill signal or "
"a power loss. It can be removed with "
"the --unlock argument.".format(os.getcwd())
)
return False
if cleanup_shadow:
self.persistence.cleanup_shadow()
return True
if (
self.subworkflows
and self.execute_subworkflows
and not printdag
and not printrulegraph
and not printfilegraph
):
# backup globals
globals_backup = dict(self.globals)
# execute subworkflows
for subworkflow in self.subworkflows:
subworkflow_targets = subworkflow.targets(dag)
logger.debug(
"Files requested from subworkflow:\n {}".format(
"\n ".join(subworkflow_targets)
)
)
updated = list()
if subworkflow_targets:
logger.info("Executing subworkflow {}.".format(subworkflow.name))
if not subsnakemake(
subworkflow.snakefile,
workdir=subworkflow.workdir,
targets=subworkflow_targets,
cores=self._cores,
nodes=self.nodes,
configfiles=[subworkflow.configfile]
if subworkflow.configfile
else None,
updated_files=updated,
):
return False
dag.updated_subworkflow_files.update(
subworkflow.target(f) for f in updated
)
else:
logger.info(
"Subworkflow {}: {}".format(
subworkflow.name, NOTHING_TO_BE_DONE_MSG
)
)
if self.subworkflows:
logger.info("Executing main workflow.")
# rescue globals
self.globals.update(globals_backup)
dag.postprocess(update_needrun=False)
if not dryrun:
# deactivate IOCache such that from now on we always get updated
# size, existence and mtime information
# ATTENTION: this may never be removed without really good reason.
# Otherwise weird things may happen.
self.iocache.deactivate()
# clear and deactivate persistence cache, from now on we want to see updates
self.persistence.deactivate_cache()
if nodeps:
missing_input = [
f
for job in dag.targetjobs
for f in job.input
if dag.needrun(job) and not os.path.exists(f)
]
if missing_input:
logger.error(
"Dependency resolution disabled (--nodeps) "
"but missing input "
"files detected. If this happens on a cluster, please make sure "
"that you handle the dependencies yourself or turn off "
"--immediate-submit. Missing input files:\n{}".format(
"\n".join(missing_input)
)
)
return False
updated_files.extend(f for job in dag.needrun_jobs for f in job.output)
if generate_unit_tests:
from snakemake import unit_tests
path = generate_unit_tests
deploy = []
if self.use_conda:
deploy.append("conda")
if self.use_singularity:
deploy.append("singularity")
unit_tests.generate(
dag, path, deploy, configfiles=self.overwrite_configfiles
)
return True
elif export_cwl:
from snakemake.cwl import dag_to_cwl
import json
with open(export_cwl, "w") as cwl:
json.dump(dag_to_cwl(dag), cwl, indent=4)
return True
elif report:
from snakemake.report import auto_report
auto_report(dag, report, stylesheet=report_stylesheet)
return True
elif printd3dag:
dag.d3dag()
return True
elif printdag:
print(dag)
return True
elif printrulegraph:
print(dag.rule_dot())
return True
elif printfilegraph:
print(dag.filegraph_dot())
return True
elif summary:
print("\n".join(dag.summary(detailed=False)))
return True
elif detailed_summary:
print("\n".join(dag.summary(detailed=True)))
return True
elif archive:
dag.archive(archive)
return True
elif delete_all_output:
dag.clean(only_temp=False, dryrun=dryrun)
return True
elif delete_temp_output:
dag.clean(only_temp=True, dryrun=dryrun)
return True
elif list_version_changes:
items = dag.get_outputs_with_changes("version")
if items:
print(*items, sep="\n")
return True
elif list_code_changes:
items = dag.get_outputs_with_changes("code")
if items:
print(*items, sep="\n")
return True
elif list_input_changes:
items = dag.get_outputs_with_changes("input")
if items:
print(*items, sep="\n")
return True
elif list_params_changes:
items = dag.get_outputs_with_changes("params")
if items:
print(*items, sep="\n")
return True
elif list_untracked:
dag.list_untracked()
return True
if self.use_singularity:
if assume_shared_fs:
dag.pull_container_imgs(
dryrun=dryrun or list_conda_envs, quiet=list_conda_envs
)
if self.use_conda:
if assume_shared_fs:
dag.create_conda_envs(
dryrun=dryrun or list_conda_envs or conda_cleanup_envs,
quiet=list_conda_envs,
)
if conda_create_envs_only:
return True
if list_conda_envs:
print("environment", "container", "location", sep="\t")
for env in set(job.conda_env for job in dag.jobs):
if env and not env.is_named:
print(
env.file.simplify_path(),
env.container_img_url or "",
simplify_path(env.address),
sep="\t",
)
return True
if conda_cleanup_envs:
self.persistence.conda_cleanup_envs()
return True
self.scheduler = JobScheduler(
self,
dag,
local_cores=local_cores,
dryrun=dryrun,
touch=touch,
cluster=cluster,
cluster_status=cluster_status,
cluster_cancel=cluster_cancel,
cluster_cancel_nargs=cluster_cancel_nargs,
cluster_sidecar=cluster_sidecar,
cluster_config=cluster_config,
cluster_sync=cluster_sync,
jobname=jobname,
max_jobs_per_second=max_jobs_per_second,
max_status_checks_per_second=max_status_checks_per_second,
quiet=quiet,
keepgoing=keepgoing,
drmaa=drmaa,
drmaa_log_dir=drmaa_log_dir,
kubernetes=kubernetes,
tibanna=tibanna,
tibanna_sfn=tibanna_sfn,
google_lifesciences=google_lifesciences,
google_lifesciences_regions=google_lifesciences_regions,
google_lifesciences_location=google_lifesciences_location,
google_lifesciences_cache=google_lifesciences_cache,
tes=tes,
preemption_default=preemption_default,
preemptible_rules=preemptible_rules,
precommand=precommand,
tibanna_config=tibanna_config,
container_image=container_image,
printreason=printreason,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
greediness=greediness,
force_use_threads=force_use_threads,
assume_shared_fs=assume_shared_fs,
keepincomplete=keepincomplete,
keepmetadata=keepmetadata,
scheduler_type=scheduler_type,
scheduler_ilp_solver=scheduler_ilp_solver,
)
if not dryrun:
dag.warn_about_changes()
if len(dag):
shell_exec = shell.get_executable()
if shell_exec is not None:
logger.info("Using shell: {}".format(shell_exec))
if cluster or cluster_sync or drmaa:
logger.resources_info(
"Provided cluster nodes: {}".format(self.nodes)
)
elif kubernetes or tibanna or google_lifesciences:
logger.resources_info("Provided cloud nodes: {}".format(self.nodes))
else:
if self._cores is not None:
warning = (
""
if self._cores > 1
else " (use --cores to define parallelism)"
)
logger.resources_info(
"Provided cores: {}{}".format(self._cores, warning)
)
logger.resources_info(
"Rules claiming more threads " "will be scaled down."
)
provided_resources = format_resources(self.global_resources)
if provided_resources:
logger.resources_info("Provided resources: " + provided_resources)
if self.run_local and any(rule.group for rule in self.rules):
logger.info("Group jobs: inactive (local execution)")
if not self.use_conda and any(rule.conda_env for rule in self.rules):
logger.info("Conda environments: ignored")
if not self.use_singularity and any(
rule.container_img for rule in self.rules
):
logger.info("Singularity containers: ignored")
if self.mode == Mode.default:
logger.run_info("\n".join(dag.stats()))
else:
logger.info(NOTHING_TO_BE_DONE_MSG)
else:
# the dryrun case
dag.warn_about_changes()
if len(dag):
logger.run_info("\n".join(dag.stats()))
else:
logger.info(NOTHING_TO_BE_DONE_MSG)
return True
if quiet:
# in case of dryrun and quiet, just print above info and exit
return True
if not dryrun and not no_hooks:
self._onstart(logger.get_logfile())
success = self.scheduler.schedule()
if not immediate_submit and not dryrun:
dag.cleanup_workdir()
if success:
if dryrun:
if len(dag):
logger.run_info("\n".join(dag.stats()))
logger.info(
"This was a dry-run (flag -n). The order of jobs "
"does not reflect the order of execution."
)
dag.warn_about_changes()
logger.remove_logfile()
else:
if stats:
self.scheduler.stats.to_json(stats)
dag.warn_about_changes()
logger.logfile_hint()
if not dryrun and not no_hooks:
self._onsuccess(logger.get_logfile())
return True
else:
if not dryrun and not no_hooks:
self._onerror(logger.get_logfile())
dag.warn_about_changes()
logger.logfile_hint()
return False
@property
def current_basedir(self):
"""Basedir of currently parsed Snakefile."""
assert self.included_stack
snakefile = self.included_stack[-1]
basedir = snakefile.get_basedir()
if isinstance(basedir, LocalSourceFile):
return basedir.abspath()
else:
return basedir
def source_path(self, rel_path):
"""Return path to source file from work dir derived from given path relative to snakefile"""
# TODO download to disk (use source cache) in case of remote file
import inspect
frame = inspect.currentframe().f_back
calling_file = frame.f_code.co_filename
calling_dir = os.path.dirname(calling_file)
path = smart_join(calling_dir, rel_path)
return self.sourcecache.get_path(infer_source_file(path))
@property
def snakefile(self):
import inspect
frame = inspect.currentframe().f_back
return frame.f_code.co_filename
def register_envvars(self, *envvars):
"""
Register environment variables that shall be passed to jobs.
If used multiple times, union is taken.
"""
undefined = set(var for var in envvars if var not in os.environ)
if self.check_envvars and undefined:
raise WorkflowError(
"The following environment variables are requested by the workflow but undefined. "
"Please make sure that they are correctly defined before running Snakemake:\n"
"{}".format("\n".join(undefined))
)
self.envvars.update(envvars)
def containerize(self):
from snakemake.deployment.containerize import containerize
containerize(self)
def include(
self,
snakefile,
overwrite_default_target=False,
print_compilation=False,
overwrite_shellcmd=None,
):
"""
Include a snakefile.
"""
basedir = self.current_basedir if self.included_stack else None
snakefile = infer_source_file(snakefile, basedir)
if not self.modifier.allow_rule_overwrite and snakefile in self.included:
logger.info("Multiple includes of {} ignored".format(snakefile))
return
self.included.append(snakefile)
self.included_stack.append(snakefile)
default_target = self.default_target
code, linemap, rulecount = parse(
snakefile,
self,
overwrite_shellcmd=self.overwrite_shellcmd,
rulecount=self._rulecount,
)
self._rulecount = rulecount
if print_compilation:
print(code)
if isinstance(snakefile, LocalSourceFile):
# insert the current directory into sys.path
# this allows to import modules from the workflow directory
sys.path.insert(0, snakefile.get_basedir().get_path_or_uri())
self.linemaps[snakefile.get_path_or_uri()] = linemap
exec(compile(code, snakefile.get_path_or_uri(), "exec"), self.globals)
if not overwrite_default_target:
self.default_target = default_target
self.included_stack.pop()
def onstart(self, func):
"""Register onstart function."""
self._onstart = func
def onsuccess(self, func):
"""Register onsuccess function."""
self._onsuccess = func
def onerror(self, func):
"""Register onerror function."""
self._onerror = func
def global_wildcard_constraints(self, **content):
"""Register global wildcard constraints."""
self._wildcard_constraints.update(content)
# update all rules so far
for rule in self.rules:
rule.update_wildcard_constraints()
def scattergather(self, **content):
"""Register scattergather defaults."""
self._scatter.update(content)
self._scatter.update(self.overwrite_scatter)
# add corresponding wildcard constraint
self.global_wildcard_constraints(scatteritem="\d+-of-\d+")
def func(*args, **wildcards):
n = self._scatter[key]
return expand(
*args,
scatteritem=map("{{}}-of-{}".format(n).format, range(1, n + 1)),
**wildcards
)
for key in content:
setattr(self.globals["scatter"], key, func)
setattr(self.globals["gather"], key, func)
def workdir(self, workdir):
"""Register workdir."""
if self.overwrite_workdir is None:
os.makedirs(workdir, exist_ok=True)
self._workdir = workdir
os.chdir(workdir)
def configfile(self, fp):
"""Update the global config with data from the given file."""
if not self.modifier.skip_configfile:
if os.path.exists(fp):
self.configfiles.append(fp)
c = snakemake.io.load_configfile(fp)
update_config(self.config, c)
if self.overwrite_config:
logger.info(
"Config file {} is extended by additional config specified via the command line.".format(
fp
)
)
update_config(self.config, self.overwrite_config)
elif not self.overwrite_configfiles:
raise WorkflowError(
"Workflow defines configfile {} but it is not present or accessible.".format(
fp
)
)
def set_pepfile(self, path):
try:
import peppy
except ImportError:
raise WorkflowError("For PEP support, please install peppy.")
self.pepfile = path
self.globals["pep"] = peppy.Project(self.pepfile)
def pepschema(self, schema):
try:
import eido
except ImportError:
raise WorkflowError("For PEP schema support, please install eido.")
if is_local_file(schema) and not os.path.isabs(schema):
# schema is relative to current Snakefile
schema = self.current_basedir.join(schema).get_path_or_uri()
if self.pepfile is None:
raise WorkflowError("Please specify a PEP with the pepfile directive.")
eido.validate_project(
project=self.globals["pep"], schema=schema, exclude_case=True
)
def report(self, path):
"""Define a global report description in .rst format."""
if not self.modifier.skip_global_report_caption:
self.report_text = self.current_basedir.join(path)
@property
def config(self):
return self.globals["config"]
def ruleorder(self, *rulenames):
self._ruleorder.add(*map(self.modifier.modify_rulename, rulenames))
def subworkflow(self, name, snakefile=None, workdir=None, configfile=None):
# Take absolute path of config file, because it is relative to current
# workdir, which could be changed for the subworkflow.
if configfile:
configfile = os.path.abspath(configfile)
sw = Subworkflow(self, name, snakefile, workdir, configfile)
self._subworkflows[name] = sw
self.globals[name] = sw.target
def localrules(self, *rulenames):
self._localrules.update(rulenames)
def rule(self, name=None, lineno=None, snakefile=None, checkpoint=False):
# choose a name for an unnamed rule
if name is None:
name = str(len(self._rules) + 1)
if self.modifier.skip_rule(name):
def decorate(ruleinfo):
# do nothing, ignore rule
return ruleinfo.func
return decorate
# Optionally let the modifier change the rulename.
orig_name = name
name = self.modifier.modify_rulename(name)
name = self.add_rule(
name,
lineno,
snakefile,
checkpoint,
allow_overwrite=self.modifier.allow_rule_overwrite,
)
rule = self.get_rule(name)
rule.is_checkpoint = checkpoint
def decorate(ruleinfo):
nonlocal name
# If requested, modify ruleinfo via the modifier.
ruleinfo.apply_modifier(self.modifier)
if ruleinfo.wildcard_constraints:
rule.set_wildcard_constraints(
*ruleinfo.wildcard_constraints[0],
**ruleinfo.wildcard_constraints[1]
)
if ruleinfo.name:
rule.name = ruleinfo.name
del self._rules[name]
self._rules[ruleinfo.name] = rule
name = rule.name
rule.path_modifier = ruleinfo.path_modifier
if ruleinfo.input:
rule.set_input(*ruleinfo.input[0], **ruleinfo.input[1])
if ruleinfo.output:
rule.set_output(*ruleinfo.output[0], **ruleinfo.output[1])
if ruleinfo.params:
rule.set_params(*ruleinfo.params[0], **ruleinfo.params[1])
# handle default resources
if self.default_resources is not None:
rule.resources = copy.deepcopy(self.default_resources.parsed)
if ruleinfo.threads is not None:
if (
not isinstance(ruleinfo.threads, int)
and not isinstance(ruleinfo.threads, float)
and not callable(ruleinfo.threads)
):
raise RuleException(
"Threads value has to be an integer, float, or a callable.",
rule=rule,
)
if name in self.overwrite_threads:
rule.resources["_cores"] = self.overwrite_threads[name]
else:
if isinstance(ruleinfo.threads, float):
ruleinfo.threads = int(ruleinfo.threads)
rule.resources["_cores"] = ruleinfo.threads
if ruleinfo.shadow_depth:
if ruleinfo.shadow_depth not in (
True,
"shallow",
"full",
"minimal",
"copy-minimal",
):
raise RuleException(
"Shadow must either be 'minimal', 'copy-minimal', 'shallow', 'full', "
"or True (equivalent to 'full')",
rule=rule,
)
if ruleinfo.shadow_depth is True:
rule.shadow_depth = "full"
logger.warning(
"Shadow is set to True in rule {} (equivalent to 'full'). It's encouraged to use the more explicit options 'minimal|copy-minimal|shallow|full' instead.".format(
rule
)
)
else:
rule.shadow_depth = ruleinfo.shadow_depth
if ruleinfo.resources:
args, resources = ruleinfo.resources
if args:
raise RuleException("Resources have to be named.")
if not all(
map(
lambda r: isinstance(r, int)
or isinstance(r, str)
or callable(r),
resources.values(),
)
):
raise RuleException(
"Resources values have to be integers, strings, or callables (functions)",
rule=rule,
)
rule.resources.update(resources)
if name in self.overwrite_resources:
rule.resources.update(self.overwrite_resources[name])
if ruleinfo.priority:
if not isinstance(ruleinfo.priority, int) and not isinstance(
ruleinfo.priority, float
):
raise RuleException(
"Priority values have to be numeric.", rule=rule
)
rule.priority = ruleinfo.priority
if ruleinfo.version:
rule.version = ruleinfo.version
if ruleinfo.log:
rule.set_log(*ruleinfo.log[0], **ruleinfo.log[1])
if ruleinfo.message:
rule.message = ruleinfo.message
if ruleinfo.benchmark:
rule.benchmark = ruleinfo.benchmark
if not self.run_local:
group = self.overwrite_groups.get(name) or ruleinfo.group
if group is not None:
rule.group = group
if ruleinfo.wrapper:
rule.conda_env = snakemake.wrapper.get_conda_env(
ruleinfo.wrapper, prefix=self.wrapper_prefix
)
# TODO retrieve suitable singularity image
if ruleinfo.env_modules:
# If using environment modules and they are defined for the rule,
# ignore conda and singularity directive below.
# The reason is that this is likely intended in order to use
# a software stack specifically compiled for a particular
# HPC cluster.
invalid_rule = not (
ruleinfo.script
or ruleinfo.wrapper
or ruleinfo.shellcmd
or ruleinfo.notebook
)
if invalid_rule:
raise RuleException(
"envmodules directive is only allowed with "
"shell, script, notebook, or wrapper directives (not with run or template_engine)",
rule=rule,
)
from snakemake.deployment.env_modules import EnvModules
rule.env_modules = EnvModules(*ruleinfo.env_modules)
if ruleinfo.conda_env:
if not (
ruleinfo.script
or ruleinfo.wrapper
or ruleinfo.shellcmd
or ruleinfo.notebook
):
raise RuleException(
"Conda environments are only allowed "
"with shell, script, notebook, or wrapper directives "
"(not with run or template_engine).",
rule=rule,
)
if isinstance(ruleinfo.conda_env, Path):
ruleinfo.conda_env = str(ruleinfo.conda_env)
if (
ruleinfo.conda_env is not None
and is_conda_env_file(ruleinfo.conda_env)
and is_local_file(ruleinfo.conda_env)
and not os.path.isabs(ruleinfo.conda_env)
):
ruleinfo.conda_env = self.current_basedir.join(
ruleinfo.conda_env
).get_path_or_uri()
rule.conda_env = ruleinfo.conda_env
invalid_rule = not (
ruleinfo.script
or ruleinfo.wrapper
or ruleinfo.shellcmd
or ruleinfo.notebook
)
if ruleinfo.container_img:
if invalid_rule:
raise RuleException(
"Singularity directive is only allowed "
"with shell, script, notebook or wrapper directives "
"(not with run or template_engine).",
rule=rule,
)
rule.container_img = ruleinfo.container_img
rule.is_containerized = ruleinfo.is_containerized
elif self.global_container_img:
if not invalid_rule and ruleinfo.container_img != False:
# skip rules with run directive or empty image
rule.container_img = self.global_container_img
rule.is_containerized = self.global_is_containerized
rule.norun = ruleinfo.norun
if ruleinfo.name is not None:
rule.name = ruleinfo.name
rule.docstring = ruleinfo.docstring
rule.run_func = ruleinfo.func
rule.shellcmd = ruleinfo.shellcmd
rule.script = ruleinfo.script
rule.notebook = ruleinfo.notebook
rule.wrapper = ruleinfo.wrapper
rule.template_engine = ruleinfo.template_engine
rule.cwl = ruleinfo.cwl
rule.restart_times = self.restart_times
rule.basedir = self.current_basedir
if ruleinfo.handover:
if not ruleinfo.resources:
# give all available resources to the rule
rule.resources.update(
{
name: val
for name, val in self.global_resources.items()
if val is not None
}
)
# This becomes a local rule, which might spawn jobs to a cluster,
# depending on its configuration (e.g. nextflow config).
self._localrules.add(rule.name)
rule.is_handover = True
if ruleinfo.cache is True:
if len(rule.output) > 1:
if not rule.output[0].is_multiext:
raise WorkflowError(
"Rule is marked for between workflow caching but has multiple output files. "
"This is only allowed if multiext() is used to declare them (see docs on between "
"workflow caching).",
rule=rule,
)
if not self.enable_cache:
logger.warning(
"Workflow defines that rule {} is eligible for caching between workflows "
"(use the --cache argument to enable this).".format(rule.name)
)
else:
self.cache_rules.add(rule.name)
elif not (ruleinfo.cache is False):
raise WorkflowError(
"Invalid argument for 'cache:' directive. Only True allowed. "
"To deactivate caching, remove directive.",
rule=rule,
)
if ruleinfo.default_target is True:
self.default_target = rule.name
elif not (ruleinfo.default_target is False):
raise WorkflowError(
"Invalid argument for 'default_target:' directive. Only True allowed. "
"Do not use the directive for rules that shall not be the default target. ",
rule=rule,
)
ruleinfo.func.__name__ = "__{}".format(rule.name)
self.globals[ruleinfo.func.__name__] = ruleinfo.func
rule_proxy = RuleProxy(rule)
if orig_name is not None:
setattr(self.globals["rules"], orig_name, rule_proxy)
setattr(self.globals["rules"], rule.name, rule_proxy)
if checkpoint:
self.globals["checkpoints"].register(rule, fallback_name=orig_name)
rule.ruleinfo = ruleinfo
return ruleinfo.func
return decorate
def docstring(self, string):
def decorate(ruleinfo):
ruleinfo.docstring = string
return ruleinfo
return decorate
def input(self, *paths, **kwpaths):
def decorate(ruleinfo):
ruleinfo.input = (paths, kwpaths)
return ruleinfo
return decorate
def output(self, *paths, **kwpaths):
def decorate(ruleinfo):
ruleinfo.output = (paths, kwpaths)
return ruleinfo
return decorate
def params(self, *params, **kwparams):
def decorate(ruleinfo):
ruleinfo.params = (params, kwparams)
return ruleinfo
return decorate
def wildcard_constraints(self, *wildcard_constraints, **kwwildcard_constraints):
def decorate(ruleinfo):
ruleinfo.wildcard_constraints = (
wildcard_constraints,
kwwildcard_constraints,
)
return ruleinfo
return decorate
def cache_rule(self, cache):
def decorate(ruleinfo):
ruleinfo.cache = cache
return ruleinfo
return decorate
def default_target_rule(self, value):
def decorate(ruleinfo):
ruleinfo.default_target = value
return ruleinfo
return decorate
def message(self, message):
def decorate(ruleinfo):
ruleinfo.message = message
return ruleinfo
return decorate
def benchmark(self, benchmark):
def decorate(ruleinfo):
ruleinfo.benchmark = benchmark
return ruleinfo
return decorate
def conda(self, conda_env):
def decorate(ruleinfo):
ruleinfo.conda_env = conda_env
return ruleinfo
return decorate
def container(self, container_img):
def decorate(ruleinfo):
# Explicitly set container_img to False if None is passed, indicating that
# no container image shall be used, also not a global one.
ruleinfo.container_img = (
container_img if container_img is not None else False
)
ruleinfo.is_containerized = False
return ruleinfo
return decorate
def containerized(self, container_img):
def decorate(ruleinfo):
ruleinfo.container_img = container_img
ruleinfo.is_containerized = True
return ruleinfo
return decorate
def envmodules(self, *env_modules):
def decorate(ruleinfo):
ruleinfo.env_modules = env_modules
return ruleinfo
return decorate
def global_container(self, container_img):
self.global_container_img = container_img
self.global_is_containerized = False
def global_containerized(self, container_img):
self.global_container_img = container_img
self.global_is_containerized = True
def threads(self, threads):
def decorate(ruleinfo):
ruleinfo.threads = threads
return ruleinfo
return decorate
def shadow(self, shadow_depth):
def decorate(ruleinfo):
ruleinfo.shadow_depth = shadow_depth
return ruleinfo
return decorate
def resources(self, *args, **resources):
def decorate(ruleinfo):
ruleinfo.resources = (args, resources)
return ruleinfo
return decorate
def priority(self, priority):
def decorate(ruleinfo):
ruleinfo.priority = priority
return ruleinfo
return decorate
def version(self, version):
def decorate(ruleinfo):
ruleinfo.version = version
return ruleinfo
return decorate
def group(self, group):
def decorate(ruleinfo):
ruleinfo.group = group
return ruleinfo
return decorate
def log(self, *logs, **kwlogs):
def decorate(ruleinfo):
ruleinfo.log = (logs, kwlogs)
return ruleinfo
return decorate
def handover(self, value):
def decorate(ruleinfo):
ruleinfo.handover = value
return ruleinfo
return decorate
def shellcmd(self, cmd):
def decorate(ruleinfo):
ruleinfo.shellcmd = cmd
return ruleinfo
return decorate
def script(self, script):
def decorate(ruleinfo):
ruleinfo.script = script
return ruleinfo
return decorate
def notebook(self, notebook):
def decorate(ruleinfo):
ruleinfo.notebook = notebook
return ruleinfo
return decorate
def wrapper(self, wrapper):
def decorate(ruleinfo):
ruleinfo.wrapper = wrapper
return ruleinfo
return decorate
def template_engine(self, template_engine):
def decorate(ruleinfo):
ruleinfo.template_engine = template_engine
return ruleinfo
return decorate
def cwl(self, cwl):
def decorate(ruleinfo):
ruleinfo.cwl = cwl
return ruleinfo
return decorate
def norun(self):
def decorate(ruleinfo):
ruleinfo.norun = True
return ruleinfo
return decorate
def name(self, name):
def decorate(ruleinfo):
ruleinfo.name = name
return ruleinfo
return decorate
def run(self, func):
return RuleInfo(func)
def module(
self,
name,
snakefile=None,
meta_wrapper=None,
config=None,
skip_validation=False,
replace_prefix=None,
prefix=None,
):
self.modules[name] = ModuleInfo(
self,
name,
snakefile=snakefile,
meta_wrapper=meta_wrapper,
config=config,
skip_validation=skip_validation,
replace_prefix=replace_prefix,
prefix=prefix,
)
def userule(self, rules=None, from_module=None, name_modifier=None, lineno=None):
def decorate(maybe_ruleinfo):
if from_module is not None:
try:
module = self.modules[from_module]
except KeyError:
raise WorkflowError(
"Module {} has not been registered with 'module' statement before using it in 'use rule' statement.".format(
from_module
)
)
module.use_rules(
rules,
name_modifier,
ruleinfo=None if callable(maybe_ruleinfo) else maybe_ruleinfo,
skip_global_report_caption=self.report_text
is not None, # do not overwrite existing report text via module
)
else:
# local inheritance
if len(rules) > 1:
raise WorkflowError(
"'use rule' statement from rule in the same module must declare a single rule but multiple rules are declared."
)
orig_rule = self._rules[self.modifier.modify_rulename(rules[0])]
ruleinfo = maybe_ruleinfo if not callable(maybe_ruleinfo) else None
with WorkflowModifier(
self,
rulename_modifier=get_name_modifier_func(
rules, name_modifier, parent_modifier=self.modifier
),
ruleinfo_overwrite=ruleinfo,
):
self.rule(
name=name_modifier,
lineno=lineno,
snakefile=self.included_stack[-1],
)(orig_rule.ruleinfo)
return decorate
@staticmethod
def _empty_decorator(f):
return f
class Subworkflow:
def __init__(self, workflow, name, snakefile, workdir, configfile):
self.workflow = workflow
self.name = name
self._snakefile = snakefile
self._workdir = workdir
self.configfile = configfile
@property
def snakefile(self):
if self._snakefile is None:
return os.path.abspath(os.path.join(self.workdir, "Snakefile"))
if not os.path.isabs(self._snakefile):
return os.path.abspath(os.path.join(self.workflow.basedir, self._snakefile))
return self._snakefile
@property
def workdir(self):
workdir = "." if self._workdir is None else self._workdir
if not os.path.isabs(workdir):
return os.path.abspath(os.path.join(self.workflow.basedir, workdir))
return workdir
def target(self, paths):
if not_iterable(paths):
path = paths
path = (
path
if os.path.isabs(path) or path.startswith("root://")
else os.path.join(self.workdir, path)
)
return flag(path, "subworkflow", self)
return [self.target(path) for path in paths]
def targets(self, dag):
def relpath(f):
if f.startswith(self.workdir):
return os.path.relpath(f, start=self.workdir)
# do not adjust absolute targets outside of workdir
return f
return [
relpath(f)
for job in dag.jobs
for f in job.subworkflow_input
if job.subworkflow_input[f] is self
]
def srcdir(path):
"""Return the absolute path, relative to the source directory of the current Snakefile."""
if not workflow.included_stack:
return None
return workflow.current_basedir.join(path).get_path_or_uri()
| 34.533231
| 184
| 0.56383
|
35faeed95c16e6c5cd763afcd17c0bab5d52cde4
| 4,195
|
py
|
Python
|
tensorflow/contrib/tensor_forest/python/kernel_tests/update_fertile_slots_op_test.py
|
connectthefuture/tensorflow
|
93812423fcd5878aa2c1d0b68dc0496980c8519d
|
[
"Apache-2.0"
] | 101
|
2016-12-03T11:40:52.000Z
|
2017-12-23T02:02:03.000Z
|
tensorflow/contrib/tensor_forest/python/kernel_tests/update_fertile_slots_op_test.py
|
connectthefuture/tensorflow
|
93812423fcd5878aa2c1d0b68dc0496980c8519d
|
[
"Apache-2.0"
] | 9
|
2016-12-14T03:27:46.000Z
|
2017-09-13T02:29:07.000Z
|
tensorflow/contrib/tensor_forest/python/kernel_tests/update_fertile_slots_op_test.py
|
connectthefuture/tensorflow
|
93812423fcd5878aa2c1d0b68dc0496980c8519d
|
[
"Apache-2.0"
] | 47
|
2016-12-04T12:37:24.000Z
|
2018-01-14T18:13:07.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.tensor_forest.ops.allocate_deallocate_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow # pylint: disable=unused-import
from tensorflow.contrib.tensor_forest.python.ops import training_ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class UpdateFertileSlotsTest(test_util.TensorFlowTestCase):
def setUp(self):
# tree is:
# 0
# 1 2
# 3 4 5 6
self.finished = [2]
self.non_fertile_leaves = [3, 4]
self.non_fertile_leaf_scores = [10., 15.]
self.end_of_tree = [5]
self.node_map = [-1, -1, 0, -1, -1, -1, -1]
self.total_counts = [[80., 40., 40.]]
self.ops = training_ops.Load()
self.stale_leaves = []
self.node_sums = [[3, 1, 2], [4, 2, 2], [5, 2, 3], [6, 1, 5], [7, 5, 2],
[8, 4, 4], [9, 7, 2]]
def testSimple(self):
with self.test_session():
(n2a_map_updates, a2n_map_updates, accumulators_cleared,
accumulators_allocated) = self.ops.update_fertile_slots(
self.finished, self.non_fertile_leaves, self.non_fertile_leaf_scores,
self.end_of_tree, self.total_counts, self.node_map,
self.stale_leaves, self.node_sums)
self.assertAllEqual([[2, 4], [-1, 0]], n2a_map_updates.eval())
self.assertAllEqual([[0], [4]], a2n_map_updates.eval())
self.assertAllEqual([], accumulators_cleared.eval())
self.assertAllEqual([0], accumulators_allocated.eval())
def testNoFinished(self):
with self.test_session():
(n2a_map_updates, a2n_map_updates, accumulators_cleared,
accumulators_allocated) = self.ops.update_fertile_slots(
[], self.non_fertile_leaves, self.non_fertile_leaf_scores,
self.end_of_tree, self.total_counts, self.node_map,
self.stale_leaves, self.node_sums)
self.assertAllEqual((2, 0), n2a_map_updates.eval().shape)
self.assertAllEqual((2, 0), a2n_map_updates.eval().shape)
self.assertAllEqual([], accumulators_cleared.eval())
self.assertAllEqual([], accumulators_allocated.eval())
def testPureCounts(self):
with self.test_session():
self.node_sums[4] = [10, 0, 10]
(n2a_map_updates, a2n_map_updates, accumulators_cleared,
accumulators_allocated) = self.ops.update_fertile_slots(
self.finished, self.non_fertile_leaves, self.non_fertile_leaf_scores,
self.end_of_tree, self.total_counts, self.node_map,
self.stale_leaves, self.node_sums)
self.assertAllEqual([[2, 3], [-1, 0]], n2a_map_updates.eval())
self.assertAllEqual([[0], [3]], a2n_map_updates.eval())
self.assertAllEqual([], accumulators_cleared.eval())
self.assertAllEqual([0], accumulators_allocated.eval())
def testBadInput(self):
del self.non_fertile_leaf_scores[-1]
with self.test_session():
with self.assertRaisesOpError(
'Number of non fertile leaves should be the same in '
'non_fertile_leaves and non_fertile_leaf_scores.'):
(n2a_map_updates, _, _, _) = self.ops.update_fertile_slots(
self.finished, self.non_fertile_leaves,
self.non_fertile_leaf_scores, self.end_of_tree, self.total_counts,
self.node_map, self.stale_leaves, self.node_sums)
self.assertAllEqual((2, 0), n2a_map_updates.eval().shape)
if __name__ == '__main__':
googletest.main()
| 41.534653
| 80
| 0.679619
|
25f975ce0eeaa263d46dcac8e2b1b078536b0a85
| 1,347
|
py
|
Python
|
regipy/plugins/plugin.py
|
kamnon/regipy
|
12d3be9da631dcc0d6fb342767e51ec4799141c6
|
[
"MIT"
] | 190
|
2019-03-06T09:13:08.000Z
|
2022-03-19T14:43:34.000Z
|
regipy/plugins/plugin.py
|
kamnon/regipy
|
12d3be9da631dcc0d6fb342767e51ec4799141c6
|
[
"MIT"
] | 117
|
2019-05-15T12:22:46.000Z
|
2022-03-30T10:43:03.000Z
|
regipy/plugins/plugin.py
|
kamnon/regipy
|
12d3be9da631dcc0d6fb342767e51ec4799141c6
|
[
"MIT"
] | 37
|
2019-03-12T14:46:12.000Z
|
2022-01-12T11:07:10.000Z
|
import logging
from typing import Any, Dict, List, Optional
from regipy.registry import RegistryHive
PLUGINS = set()
logger = logging.getLogger(__name__)
class Plugin(object):
NAME: Optional[str] = None
DESCRIPTION: Optional[str] = None
COMPATIBLE_HIVE: Optional[str] = None
def __init_subclass__(cls):
PLUGINS.add(cls)
def __init__(self, registry_hive: RegistryHive, as_json=False):
self.registry_hive = registry_hive
self.as_json = as_json
self.partial_hive_path = registry_hive.partial_hive_path
# This variable should always hold the final result - in order to use it in anomaly detection and timeline gen.
self.entries: List[Dict[str, Any]] = list()
def can_run(self):
"""
Wether the plugin can run or not, according to specific checks
:return:
"""
return self.registry_hive.hive_type == self.COMPATIBLE_HIVE
def run(self):
"""
Execute the plugin
:return:
"""
def generate_timeline_artifacts(self):
"""
Run on the output of a plugin and generate timeline entries
:return:
"""
pass
def detect_anomalies(self):
"""
Run on the output of a plugin and detect possible anomalies
:return:
"""
pass
| 24.053571
| 119
| 0.63029
|
6a9ce572c2d1300d244a4574ab88f5c553638252
| 616
|
py
|
Python
|
exercicios/exercicio073.py
|
TayAntony/python
|
c79d62a12f48965ed374e4c037287a162a368a40
|
[
"MIT"
] | null | null | null |
exercicios/exercicio073.py
|
TayAntony/python
|
c79d62a12f48965ed374e4c037287a162a368a40
|
[
"MIT"
] | null | null | null |
exercicios/exercicio073.py
|
TayAntony/python
|
c79d62a12f48965ed374e4c037287a162a368a40
|
[
"MIT"
] | null | null | null |
colocacao = ('São Paulo','Coritiba','Corinthians','Atlétigo-MG','Ceará','Avaí','Cuiabá','Bragantino','Juventude','Flamengo','Atlético-GO','Santos','Fluminense','Palmeiras','Fortaleza','América-MG','Botafogo','Internacional',
'Goiás','Athletico-PR')
print('=-'*20)
print(f'Lista de times do Brasileirão 2022: {colocacao}')
print('=-'*20)
print(f'Os 5 primeiros são: {colocacao[0:5]}')
print('=-'*20)
print(f'Os 4 últimos são: {colocacao[16:]}')
print('=-'*20)
print(f'Times em ordem alfabética: {sorted(colocacao)}')
print('=-'*20)
print(f'O Flamengo está na {colocacao.index("Flamengo")+1}ª posição')
print('=-'*20)
| 44
| 224
| 0.683442
|
04865fbde635ef56b126336b8c3303ed2e690f06
| 135
|
py
|
Python
|
dolldb/dolldb_landing/views.py
|
nkzou/dolldb
|
97444555b47e399834cf257ef0c7b8b088dba4d9
|
[
"MIT"
] | 1
|
2018-11-16T01:44:39.000Z
|
2018-11-16T01:44:39.000Z
|
dolldb/dolldb_landing/views.py
|
nkzou/dolldb
|
97444555b47e399834cf257ef0c7b8b088dba4d9
|
[
"MIT"
] | 3
|
2019-03-06T20:07:01.000Z
|
2019-06-17T21:56:45.000Z
|
dolldb/dolldb_landing/views.py
|
NullKZ/dolldb
|
97444555b47e399834cf257ef0c7b8b088dba4d9
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
def index(request):
return HttpResponse("Landing Page.")
| 22.5
| 40
| 0.792593
|
5054d79e07bbbe9cd403e39da6d51889c7f134e0
| 3,683
|
py
|
Python
|
banking_solution.py
|
souparnabose99/PythonOOPs
|
50707de28a07b7922abc7e2f558fb635044e925b
|
[
"MIT"
] | null | null | null |
banking_solution.py
|
souparnabose99/PythonOOPs
|
50707de28a07b7922abc7e2f558fb635044e925b
|
[
"MIT"
] | null | null | null |
banking_solution.py
|
souparnabose99/PythonOOPs
|
50707de28a07b7922abc7e2f558fb635044e925b
|
[
"MIT"
] | 1
|
2021-07-22T14:42:46.000Z
|
2021-07-22T14:42:46.000Z
|
from abc import ABCMeta, abstractmethod
from random import randint
class Account(metaclass=ABCMeta):
@abstractmethod
def create_new_account(self):
return 0
@abstractmethod
def authenticate_user(self):
return 0
@abstractmethod
def withdraw_amount(self):
return 0
@abstractmethod
def deposit_amount(self):
return 0
@abstractmethod
def display_account_balance(self):
return 0
class SavingsAccount(Account):
def __init__(self):
self.savings_account = {}
def create_new_account(self, name, initial_deposit):
self.account_number = randint(1111111111, 9999999999)
self.savings_account[self.account_number] = [name, initial_deposit]
print("Account creation has been successful. Your account number is ", self.account_number)
print()
def authenticate_user(self, name, account_number):
if name in self.savings_account.keys():
if self.savings_account[account_number][0] == name:
print("User Authentication Successful")
self.account_number = account_number
return True
else:
print("User Authentication Failed")
return False
else:
print("User Authentication Failed")
return False
def withdraw_amount(self, withdrawal_amount):
if withdrawal_amount > self.savings_account[self.account_number][1]:
print("Insufficient Balance")
else:
self.savings_account[self.account_number][1] -= withdrawal_amount
print("Withdrawal Successful")
self.display_account_balance()
def deposit_amount(self, deposit_amount):
self.savings_account[self.account_number][1] += deposit_amount
print("Deposit Successful")
self.display_account_balance()
def display_account_balance(self):
print("Available Balance in account: ", self.savings_account[self.account_number][1])
savings_account = SavingsAccount()
while True:
print("Enter 1 to create a new account")
print("Enter 2 to access an existing account")
print("Enter 3 to exit")
choice = int(input())
if choice == 1:
print("Enter your name: ")
name = input()
print("Enter initial deposit amount: ")
amount = int(input())
savings_account.create_new_account(name, amount)
elif choice ==2:
print("Enter your name: ")
name = input()
print("Enter account number: ")
account_number = int(input())
auth_status = savings_account.authenticate_user()
if auth_status:
while True:
print("Enter 1 to withdraw amount")
print("Enter 2 to deposit amount")
print("Enter 3 to check balance")
print("Enter 4 to go back to the main menu")
choice_2 = int(input())
if choice_2 == 1:
print("Enter withdraw amount")
withdraw_amount = int(input())
savings_account.withdraw_amount(withdraw_amount)
elif choice_2 == 2:
print("Enter deposit amount")
deposit_amount = int(input())
savings_account.withdraw_amount(deposit_amount)
elif choice_2 == 3:
savings_account.display_account_balance()
elif choice_2 == 4:
break
elif choice == 3:
quit()
| 32.59292
| 100
| 0.585935
|
060dd32d59160b6f6845eec05f50a9da117ccce7
| 3,112
|
py
|
Python
|
get_req_status.py
|
rovere/utilities
|
fe864dbe45f688ef9070deb5eb6e9cbd4fb359a4
|
[
"MIT"
] | null | null | null |
get_req_status.py
|
rovere/utilities
|
fe864dbe45f688ef9070deb5eb6e9cbd4fb359a4
|
[
"MIT"
] | null | null | null |
get_req_status.py
|
rovere/utilities
|
fe864dbe45f688ef9070deb5eb6e9cbd4fb359a4
|
[
"MIT"
] | null | null | null |
#!/bin/env python
import httplib
import urllib
import os
import pprint
import json
pp = pprint.PrettyPrinter(indent=4)
req_name='cmsdataops_CMSSW_4_2_8_patch6_HiggsReproForCert2011A_Jet_Run2011A-v1_RAW_111115_160930'
url= 'cmsweb.cern.ch'
url_old='localhost:8687'
headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain"}
def get_req_info(req_name):
print "Processing Request: \033[1m%s\033[0m ..." %req_name
conn = httplib.HTTPSConnection(url, cert_file = os.getenv('X509_USER_PROXY'), key_file = os.getenv('X509_USER_PROXY'))
conn.request("GET", "/reqmgr/reqMgr/request/%s"%req_name,'',headers)
response = conn.getresponse()
data = eval(response.read())
return data
def get_req_info_old(req_name):
conn = httplib.HTTPConnection(url_old)
conn.request("GET", "/reqmgr/reqMgr/request/%s"%req_name,'',headers)
response = conn.getresponse()
data = eval(response.read())
return data
def get_outputdataset(req_name):
conn = httplib.HTTPConnection(url_old)
conn.request("GET", "/reqmgr/reqMgr/outputDatasetsByRequestName/%s"%req_name,'',headers)
response = conn.getresponse()
data = eval(response.read())
return data
def list2date(datelist):
# format is [2011, 11, 19, 0, 4, 18],
year,month,day,boh1,boh2,boh3=datelist
return "%s-%s-%s" %(day,month,year)
def print_req_info(req_name,req_info,verbose=False):
if not verbose and not req_info.has_key('exception'):
if req_info.has_key('RequestName'):
print "Request Name: %s" %req_info['RequestName']
if req_info.has_key('RequestDate'):
print " o Submission date: %s" %list2date(req_info['RequestDate'])
status=req_info['RequestStatus']
print " o Request Status: \033[1m%s\033[0m" %status
print " o Percent Complete (success): \033[1m%s (%s)\033[0m" %(req_info['percent_complete'],req_info['percent_success'])
if status == "announced":
print " o Datasets:"
counter=1
for dataset in get_outputdataset(req_name):
print " %s) \033[1m%s\033[0m"%(counter,dataset)
counter+=1
else:
pp.pprint(req_info)
print "*"*80
requests=['cmsdataops_CMSSW_4_2_8_patch6_HiggsReproForCert2011A_Jet_Run2011A-v1_RAW_111115_160924',
'cmsdataops_CMSSW_4_2_8_patch6_HiggsReproForCert2011A_Jet_Run2011A-v1_RAW_111115_160926',
'cmsdataops_CMSSW_4_2_8_patch6_HiggsReproForCert2011A_Jet_Run2011A-v1_RAW_111115_160929',
'cmsdataops_CMSSW_4_2_8_patch6_HiggsReproForCert2011A_Jet_Run2011A-v1_RAW_111115_160930',
'cmsdataops_CMSSW_4_2_8_patch6_HiggsReproForCert2011B_Jet_Run2011B-v1_RAW_111115_160938',
'cmsdataops_CMSSW_4_2_8_patch6_HiggsReproForCert2011B_Jet_Run2011B-v1_RAW_111115_160940',
'cmsdataops_CMSSW_4_2_8_patch6_HiggsReproForCert2011B_Jet_Run2011B-v1_RAW_111115_160942',
'cmsdataops_CMSSW_4_2_8_patch6_HiggsReproForCert2011B_Jet_Run2011B-v1_RAW_111115_160944']
for request in requests:
req_info = get_req_info(request)
if req_info.has_key('exception'):
print "---> It did not work, old reqmngr (the one with the tunnel)"
req_info = get_req_info_old(request)
print_req_info(request,req_info)
| 39.392405
| 124
| 0.760283
|
a4a712ea4c4bd423c203d477d78370014843cda7
| 1,973
|
py
|
Python
|
tests/__init__.py
|
RazerM/bucketcache
|
8d9b163b73da8c498793cce2f22f6a7cbe524d94
|
[
"MIT"
] | 2
|
2018-02-13T03:04:08.000Z
|
2021-08-04T05:46:18.000Z
|
tests/__init__.py
|
RazerM/bucketcache
|
8d9b163b73da8c498793cce2f22f6a7cbe524d94
|
[
"MIT"
] | 10
|
2015-04-21T09:45:53.000Z
|
2020-09-04T12:34:03.000Z
|
tests/__init__.py
|
RazerM/bucketcache
|
8d9b163b73da8c498793cce2f22f6a7cbe524d94
|
[
"MIT"
] | 1
|
2016-05-28T21:02:31.000Z
|
2016-05-28T21:02:31.000Z
|
from __future__ import absolute_import, division
import sys
import pytest
from bucketcache import Bucket, DeferredWriteBucket
from bucketcache.backends import (
JSONBackend, MessagePackBackend, PickleBackend)
from bucketcache.keymakers import DefaultKeyMaker, StreamingDefaultKeyMaker
cache_objects = [JSONBackend, MessagePackBackend, PickleBackend]
cache_ids = ['json', 'msgpack', 'pickle']
keymakers = [DefaultKeyMaker, StreamingDefaultKeyMaker]
keymaker_ids = ['DefaultKeyMaker', 'StreamingDefaultKeyMaker']
__all__ = [
'slow',
'cache_all',
'cache_serializable',
'expiring_cache_all',
'deferred_cache_all',
'requires_python_version',
'keymakers_all',
]
slow = pytest.mark.slow
@pytest.yield_fixture(params=keymakers, ids=keymaker_ids)
def keymakers_all(request):
yield request.param
@pytest.yield_fixture(params=cache_objects, ids=cache_ids)
def cache_all(request, tmpdir, keymakers_all):
"""Return bucket for each backend."""
yield Bucket(path=str(tmpdir), backend=request.param)
@pytest.yield_fixture(params=[PickleBackend])
def cache_serializable(request, tmpdir):
"""Return bucket for each backend that can serialize arbitrary objects."""
yield Bucket(path=str(tmpdir), backend=request.param)
@pytest.yield_fixture(params=cache_objects, ids=cache_ids)
def expiring_cache_all(request, tmpdir):
"""Return bucket with an expiration date for each backend."""
yield Bucket(path=str(tmpdir), backend=request.param, seconds=2)
@pytest.yield_fixture(params=cache_objects, ids=cache_ids)
def deferred_cache_all(request, tmpdir):
"""Return deferred write bucket for each backend."""
yield DeferredWriteBucket(path=str(tmpdir), backend=request.param)
def requires_python_version(*version):
vbool = sys.version_info < tuple(version)
sversion = '.'.join([str(v) for v in version])
message = 'Requires Python {}'.format(sversion)
return pytest.mark.skipif(vbool, reason=message)
| 31.31746
| 78
| 0.763305
|
ae0cd3c3ce26d808fa021aa31c2b43df1dc26935
| 752
|
py
|
Python
|
ebnmpy/r_utils/__init__.py
|
kclamar/ebnmpy
|
fc3d7126757c4184c7cb442312f1db5b78d73a3b
|
[
"MIT"
] | null | null | null |
ebnmpy/r_utils/__init__.py
|
kclamar/ebnmpy
|
fc3d7126757c4184c7cb442312f1db5b78d73a3b
|
[
"MIT"
] | null | null | null |
ebnmpy/r_utils/__init__.py
|
kclamar/ebnmpy
|
fc3d7126757c4184c7cb442312f1db5b78d73a3b
|
[
"MIT"
] | null | null | null |
import numpy as np
def stop(*args):
raise Exception(*args)
def unlist(d: dict):
return np.array(list(d.values()))
def numeric(*args, **kwargs):
return np.zeros(*args)
def matrix(nrow, ncol):
return np.empty((nrow, ncol))
def rep(x, times=1, each=1, length_out=None):
if length_out is None:
return np.tile(np.repeat(x, each), times)
temp = rep(x, times, each, None)
return np.tile(temp, int(np.ceil(length_out / len(temp))))[:length_out]
def pmax(a, b):
return np.maximum(a, b)
def pmin(a, b):
return np.minimum(a, b)
def length(x):
if np.isscalar(x):
return 1
return len(x)
def asvector(x):
return np.asarray(x)
def seq(a, b, by):
return np.arange(a, b + by, by)
| 15.666667
| 75
| 0.611702
|
8fca68c24bcb97056b462e52e1beeb2d950c9f78
| 15,070
|
py
|
Python
|
src/chrome/test/functional/prefs.py
|
jxjnjjn/chromium
|
435c1d02fd1b99001dc9e1e831632c894523580d
|
[
"Apache-2.0"
] | 9
|
2018-09-21T05:36:12.000Z
|
2021-11-15T15:14:36.000Z
|
chrome/test/functional/prefs.py
|
devasia1000/chromium
|
919a8a666862fb866a6bb7aa7f3ae8c0442b4828
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2015-07-21T08:02:01.000Z
|
2015-07-21T08:02:01.000Z
|
chrome/test/functional/prefs.py
|
devasia1000/chromium
|
919a8a666862fb866a6bb7aa7f3ae8c0442b4828
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 6
|
2016-11-14T10:13:35.000Z
|
2021-01-23T15:29:53.000Z
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import shutil
import pyauto_functional # Must be imported before pyauto
import pyauto
import test_utils
from webdriver_pages import settings
from webdriver_pages.settings import Behaviors, ContentTypes
class PrefsTest(pyauto.PyUITest):
"""TestCase for Preferences."""
INFOBAR_TYPE = 'rph_infobar'
def setUp(self):
pyauto.PyUITest.setUp(self)
self._driver = self.NewWebDriver()
def Debug(self):
"""Test method for experimentation.
This method will not run automatically.
"""
while True:
raw_input('Interact with the browser and hit <enter> to dump prefs... ')
self.pprint(self.GetPrefsInfo().Prefs())
def testSessionRestore(self):
"""Test session restore preference."""
url1 = 'http://www.google.com/'
url2 = 'http://news.google.com/'
self.NavigateToURL(url1)
self.AppendTab(pyauto.GURL(url2))
num_tabs = self.GetTabCount()
# Set pref to restore session on startup.
self.SetPrefs(pyauto.kRestoreOnStartup, 1)
logging.debug('Setting %s to 1' % pyauto.kRestoreOnStartup)
self.RestartBrowser(clear_profile=False)
self.assertEqual(self.GetPrefsInfo().Prefs(pyauto.kRestoreOnStartup), 1)
self.assertEqual(num_tabs, self.GetTabCount())
self.ActivateTab(0)
self.assertEqual(url1, self.GetActiveTabURL().spec())
self.ActivateTab(1)
self.assertEqual(url2, self.GetActiveTabURL().spec())
def testNavigationStateOnSessionRestore(self):
"""Verify navigation state is preserved on session restore."""
urls = ('http://www.google.com/',
'http://news.google.com/',
'http://dev.chromium.org/',)
for url in urls:
self.NavigateToURL(url)
self.TabGoBack()
self.assertEqual(self.GetActiveTabURL().spec(), urls[-2])
self.SetPrefs(pyauto.kRestoreOnStartup, 1) # set pref to restore session
self.RestartBrowser(clear_profile=False)
# Verify that navigation state (forward/back state) is restored.
self.TabGoBack()
self.assertEqual(self.GetActiveTabURL().spec(), urls[0])
for i in (-2, -1):
tab.GoForward()
self.assertEqual(self.GetActiveTabURL().spec(), urls[i])
def testSessionRestoreURLs(self):
"""Verify restore URLs preference."""
url1 = self.GetFileURLForPath(os.path.join(self.DataDir(), 'title1.html'))
url2 = self.GetFileURLForPath(os.path.join(self.DataDir(), 'title2.html'))
# Set pref to restore given URLs on startup
self.SetPrefs(pyauto.kRestoreOnStartup, 4) # 4 is for restoring URLs
self.SetPrefs(pyauto.kURLsToRestoreOnStartup, [url1, url2])
self.RestartBrowser(clear_profile=False)
# Verify
self.assertEqual(self.GetPrefsInfo().Prefs(pyauto.kRestoreOnStartup), 4)
self.assertEqual(2, self.GetTabCount())
self.ActivateTab(0)
self.assertEqual(url1, self.GetActiveTabURL().spec())
self.ActivateTab(1)
self.assertEqual(url2, self.GetActiveTabURL().spec())
def testSessionRestoreShowBookmarkBar(self):
"""Verify restore for bookmark bar visibility."""
assert not self.GetPrefsInfo().Prefs(pyauto.kShowBookmarkBar)
self.SetPrefs(pyauto.kShowBookmarkBar, True)
self.assertEqual(True, self.GetPrefsInfo().Prefs(pyauto.kShowBookmarkBar))
self.RestartBrowser(clear_profile=False)
self.assertEqual(True, self.GetPrefsInfo().Prefs(pyauto.kShowBookmarkBar))
self.assertTrue(self.GetBookmarkBarVisibility())
def testDownloadDirPref(self):
"""Verify download dir pref."""
test_dir = os.path.join(self.DataDir(), 'downloads')
file_url = self.GetFileURLForPath(os.path.join(test_dir, 'a_zip_file.zip'))
download_dir = self.GetDownloadDirectory().value()
new_dl_dir = os.path.join(download_dir, 'My+Downloads Folder')
downloaded_pkg = os.path.join(new_dl_dir, 'a_zip_file.zip')
os.path.exists(new_dl_dir) and shutil.rmtree(new_dl_dir)
os.makedirs(new_dl_dir)
# Set pref to download in new_dl_dir
self.SetPrefs(pyauto.kDownloadDefaultDirectory, new_dl_dir)
self.DownloadAndWaitForStart(file_url)
self.WaitForAllDownloadsToComplete()
self.assertTrue(os.path.exists(downloaded_pkg))
shutil.rmtree(new_dl_dir, ignore_errors=True) # cleanup
def testToolbarButtonsPref(self):
"""Verify toolbar buttons prefs."""
# Assert defaults first
self.assertFalse(self.GetPrefsInfo().Prefs(pyauto.kShowHomeButton))
self.SetPrefs(pyauto.kShowHomeButton, True)
self.RestartBrowser(clear_profile=False)
self.assertTrue(self.GetPrefsInfo().Prefs(pyauto.kShowHomeButton))
def testNetworkPredictionEnabledPref(self):
"""Verify DNS prefetching pref."""
# Assert default
self.assertTrue(self.GetPrefsInfo().Prefs(pyauto.kNetworkPredictionEnabled))
self.SetPrefs(pyauto.kNetworkPredictionEnabled, False)
self.RestartBrowser(clear_profile=False)
self.assertFalse(self.GetPrefsInfo().Prefs(
pyauto.kNetworkPredictionEnabled))
def testHomepagePrefs(self):
"""Verify homepage prefs."""
# "Use the New Tab page"
self.SetPrefs(pyauto.kHomePageIsNewTabPage, True)
logging.debug('Setting %s to 1' % pyauto.kHomePageIsNewTabPage)
self.RestartBrowser(clear_profile=False)
self.assertEqual(self.GetPrefsInfo().Prefs(pyauto.kHomePageIsNewTabPage),
True)
# "Open this page"
url = self.GetFileURLForPath(os.path.join(self.DataDir(), 'title1.html'))
self.SetPrefs(pyauto.kHomePage, url)
self.SetPrefs(pyauto.kHomePageIsNewTabPage, False)
self.RestartBrowser(clear_profile=False)
self.assertEqual(self.GetPrefsInfo().Prefs(pyauto.kHomePage), url)
self.assertFalse(self.GetPrefsInfo().Prefs(pyauto.kHomePageIsNewTabPage))
# TODO(nirnimesh): Actually verify that homepage loads.
# This requires telling pyauto *not* to set about:blank as homepage.
def testGeolocationPref(self):
"""Verify geolocation pref.
Checks for the geolocation infobar.
"""
# GetBrowserInfo() call seems to fail later on in this test. Call it early.
# crbug.com/89000
branding = self.GetBrowserInfo()['properties']['branding']
url = self.GetFileURLForPath(os.path.join( # triggers geolocation
self.DataDir(), 'geolocation', 'geolocation_on_load.html'))
self.assertEqual(3, # default state
self.GetPrefsInfo().Prefs(pyauto.kGeolocationDefaultContentSetting))
self.NavigateToURL(url)
self.assertTrue(self.WaitForInfobarCount(1))
self.assertTrue(self.GetBrowserInfo()['windows'][0]['tabs'][0]['infobars'])
# Disable geolocation
self.SetPrefs(pyauto.kGeolocationDefaultContentSetting, 2)
self.assertEqual(2,
self.GetPrefsInfo().Prefs(pyauto.kGeolocationDefaultContentSetting))
self.ReloadTab()
# Fails on Win7/Vista Chromium bots. crbug.com/89000
if (self.IsWin7() or self.IsWinVista()) and branding == 'Chromium':
return
behavior = self._driver.execute_async_script(
'triggerGeoWithCallback(arguments[arguments.length - 1]);')
self.assertEqual(
behavior, Behaviors.BLOCK,
msg='Behavior is "%s" when it should be BLOCKED.' % behavior)
def testUnderTheHoodPref(self):
"""Verify the security preferences for Under the Hood.
The setting is enabled by default."""
pref_list = [pyauto.kNetworkPredictionEnabled, pyauto.kSafeBrowsingEnabled,
pyauto.kAlternateErrorPagesEnabled,
pyauto.kSearchSuggestEnabled, pyauto.kShowOmniboxSearchHint]
for pref in pref_list:
# Verify the default value
self.assertEqual(self.GetPrefsInfo().Prefs(pref), True)
self.SetPrefs(pref, False)
self.RestartBrowser(clear_profile=False)
for pref in pref_list:
self.assertEqual(self.GetPrefsInfo().Prefs(pref), False)
def testJavaScriptEnableDisable(self):
"""Verify enabling disabling javascript prefs work """
self.assertTrue(
self.GetPrefsInfo().Prefs(pyauto.kWebKitJavascriptEnabled))
url = self.GetFileURLForDataPath(
os.path.join('javaScriptTitle.html'))
title1 = 'Title from script javascript enabled'
self.NavigateToURL(url)
self.assertEqual(title1, self.GetActiveTabTitle())
self.SetPrefs(pyauto.kWebKitJavascriptEnabled, False)
title = 'This is html title'
self.NavigateToURL(url)
self.assertEqual(title, self.GetActiveTabTitle())
def testHaveLocalStatePrefs(self):
"""Verify that we have some Local State prefs."""
self.assertTrue(self.GetLocalStatePrefsInfo())
def testAllowSelectedGeoTracking(self):
"""Verify hostname pattern and behavior for allowed tracking."""
# Default location tracking option "Ask me".
self.SetPrefs(pyauto.kGeolocationDefaultContentSetting, 3)
self.NavigateToURL(
self.GetHttpURLForDataPath('geolocation', 'geolocation_on_load.html'))
self.assertTrue(self.WaitForInfobarCount(1))
self.PerformActionOnInfobar('accept', infobar_index=0) # Allow tracking.
# Get the hostname pattern (e.g. http://127.0.0.1:57622).
hostname_pattern = (
'/'.join(self.GetHttpURLForDataPath('').split('/')[0:3]))
self.assertEqual(
# Allow the hostname.
{hostname_pattern+','+hostname_pattern: {'geolocation': 1}},
self.GetPrefsInfo().Prefs(pyauto.kContentSettingsPatternPairs))
def testDismissedInfobarSavesNoEntry(self):
"""Verify dismissing infobar does not save an exception entry."""
# Default location tracking option "Ask me".
self.SetPrefs(pyauto.kGeolocationDefaultContentSetting, 3)
self.NavigateToURL(
self.GetFileURLForDataPath('geolocation', 'geolocation_on_load.html'))
self.assertTrue(self.WaitForInfobarCount(1))
self.PerformActionOnInfobar('dismiss', infobar_index=0)
self.assertEqual(
{}, self.GetPrefsInfo().Prefs(pyauto.kContentSettingsPatternPairs))
def testGeolocationBlockedWhenTrackingDenied(self):
"""Verify geolocations is blocked when tracking is denied.
The test verifies the blocked hostname pattern entry on the Geolocations
exceptions page.
"""
# Ask for permission when site wants to track.
self.SetPrefs(pyauto.kGeolocationDefaultContentSetting, 3)
self.NavigateToURL(
self.GetHttpURLForDataPath('geolocation', 'geolocation_on_load.html'))
self.assertTrue(self.WaitForInfobarCount(1))
self.PerformActionOnInfobar('cancel', infobar_index=0) # Deny tracking.
behavior = self._driver.execute_async_script(
'triggerGeoWithCallback(arguments[arguments.length - 1]);')
self.assertEqual(
behavior, Behaviors.BLOCK,
msg='Behavior is "%s" when it should be BLOCKED.' % behavior)
# Get the hostname pattern (e.g. http://127.0.0.1:57622).
hostname_pattern = (
'/'.join(self.GetHttpURLForDataPath('').split('/')[0:3]))
self.assertEqual(
# Block the hostname.
{hostname_pattern+','+hostname_pattern: {'geolocation': 2}},
self.GetPrefsInfo().Prefs(pyauto.kContentSettingsPatternPairs))
def _CheckForVisibleImage(self, tab_index=0, windex=0):
"""Checks whether or not an image is visible on the webpage.
Args:
tab_index: Tab index. Defaults to 0 (first tab).
windex: Window index. Defaults to 0 (first window).
Returns:
True if image is loaded, otherwise returns False if image is not loaded.
"""
# Checks whether an image is loaded by checking the area (width
# and height) of the image. If the area is non zero then the image is
# visible. If the area is zero then the image is not loaded.
# Chrome zeros the |naturalWidth| and |naturalHeight|.
script = """
for (i=0; i < document.images.length; i++) {
if ((document.images[i].naturalWidth != 0) &&
(document.images[i].naturalHeight != 0)) {
window.domAutomationController.send(true);
}
}
window.domAutomationController.send(false);
"""
return self.ExecuteJavascript(script, windex=windex, tab_index=tab_index)
def testImageContentSettings(self):
"""Verify image content settings show or hide images."""
url = self.GetHttpURLForDataPath('settings', 'image_page.html')
self.NavigateToURL(url)
self.assertTrue(self._CheckForVisibleImage(),
msg='No visible images found.')
# Set to block all images from loading.
self.SetPrefs(pyauto.kDefaultContentSettings, {'images': 2})
self.NavigateToURL(url)
self.assertFalse(self._CheckForVisibleImage(),
msg='At least one visible image found.')
def testImagesNotBlockedInIncognito(self):
"""Verify images are not blocked in Incognito mode."""
url = self.GetHttpURLForDataPath('settings', 'image_page.html')
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
self.NavigateToURL(url, 1, 0)
self.assertTrue(self._CheckForVisibleImage(windex=1),
msg='No visible images found in Incognito mode.')
def testBlockImagesForHostname(self):
"""Verify images blocked for defined hostname pattern."""
url = 'http://www.google.com'
page = settings.ManageExceptionsPage.FromNavigation(
self._driver, ContentTypes.IMAGES)
pattern, behavior = (url, Behaviors.BLOCK)
# Add an exception BLOCK for hostname pattern 'www.google.com'.
page.AddNewException(pattern, behavior)
self.NavigateToURL(url)
self.assertFalse(self._CheckForVisibleImage(),
msg='At least one visible image found.')
def testAllowImagesForHostname(self):
"""Verify images allowed for defined hostname pattern."""
url = 'http://www.google.com'
page = settings.ManageExceptionsPage.FromNavigation(
self._driver, ContentTypes.IMAGES)
pattern, behavior = (url, Behaviors.ALLOW)
# Add an exception ALLOW for hostname pattern 'www.google.com'.
page.AddNewException(pattern, behavior)
self.NavigateToURL(url)
self.assertTrue(self._CheckForVisibleImage(),
msg='No visible images found.')
def testProtocolHandlerRegisteredCorrectly(self):
"""Verify sites that ask to be default handlers registers correctly."""
url = self.GetHttpURLForDataPath('settings', 'protocol_handler.html')
self.NavigateToURL(url)
# Returns a dictionary with the custom handler.
asked_handler_dict = self._driver.execute_script(
'return registerCustomHandler()')
self.PerformActionOnInfobar(
'accept', infobar_index=test_utils.WaitForInfobarTypeAndGetIndex(
self, self.INFOBAR_TYPE))
self._driver.find_element_by_id('test_protocol').click()
self.assertTrue(
self._driver.execute_script(
'return doesQueryConformsToProtocol("%s", "%s")'
% (asked_handler_dict['query_key'],
asked_handler_dict['query_value'])),
msg='Protocol did not register correctly.')
if __name__ == '__main__':
pyauto_functional.Main()
| 42.934473
| 80
| 0.712541
|
ed8804c17eb85d311bfc63c398288e01e8150ec6
| 9,766
|
py
|
Python
|
api/base/utils.py
|
prisnormando/osf.io
|
78dbbd3b1104a40480754b3172de03e230534523
|
[
"Apache-2.0"
] | 1
|
2019-05-08T02:30:38.000Z
|
2019-05-08T02:30:38.000Z
|
api/base/utils.py
|
prisnormando/osf.io
|
78dbbd3b1104a40480754b3172de03e230534523
|
[
"Apache-2.0"
] | null | null | null |
api/base/utils.py
|
prisnormando/osf.io
|
78dbbd3b1104a40480754b3172de03e230534523
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import urllib
import furl
import urlparse
from distutils.version import StrictVersion
from hashids import Hashids
from django.utils.http import urlquote
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import OuterRef, Exists, Q, QuerySet, F
from rest_framework.exceptions import NotFound
from rest_framework.reverse import reverse
from api.base.authentication.drf import get_session_from_cookie
from api.base.exceptions import Gone, UserGone
from api.base.settings import HASHIDS_SALT
from framework.auth import Auth
from framework.auth.cas import CasResponse
from framework.auth.oauth_scopes import ComposedScopes, normalize_scopes
from osf.models import OSFUser, Contributor, Node, Registration
from osf.models.base import GuidMixin
from osf.utils.requests import check_select_for_update
from website import settings as website_settings
from website import util as website_util # noqa
# These values are copied from rest_framework.fields.BooleanField
# BooleanField cannot be imported here without raising an
# ImproperlyConfigured error
TRUTHY = set(('t', 'T', 'true', 'True', 'TRUE', '1', 1, True, 'on', 'ON', 'On', 'y', 'Y', 'YES', 'yes'))
FALSY = set(('f', 'F', 'false', 'False', 'FALSE', '0', 0, 0.0, False, 'off', 'OFF', 'Off', 'n', 'N', 'NO', 'no'))
UPDATE_METHODS = ['PUT', 'PATCH']
hashids = Hashids(alphabet='abcdefghijklmnopqrstuvwxyz', salt=HASHIDS_SALT)
def decompose_field(field):
from api.base.serializers import (
HideIfWithdrawal, HideIfRegistration,
HideIfDisabled, AllowMissing, NoneIfWithdrawal,
)
WRAPPER_FIELDS = (HideIfWithdrawal, HideIfRegistration, HideIfDisabled, AllowMissing, NoneIfWithdrawal)
while isinstance(field, WRAPPER_FIELDS):
try:
field = getattr(field, 'field')
except AttributeError:
break
return field
def is_bulk_request(request):
"""
Returns True if bulk request. Can be called as early as the parser.
"""
content_type = request.content_type
return 'ext=bulk' in content_type
def is_truthy(value):
return value in TRUTHY
def is_falsy(value):
return value in FALSY
def get_user_auth(request):
"""Given a Django request object, return an ``Auth`` object with the
authenticated user attached to it.
"""
user = request.user
private_key = request.query_params.get('view_only', None)
if user.is_anonymous:
auth = Auth(None, private_key=private_key)
else:
auth = Auth(user, private_key=private_key)
return auth
def absolute_reverse(view_name, query_kwargs=None, args=None, kwargs=None):
"""Like django's `reverse`, except returns an absolute URL. Also add query parameters."""
relative_url = reverse(view_name, kwargs=kwargs)
url = website_util.api_v2_url(relative_url, params=query_kwargs, base_prefix='')
return url
def get_object_or_error(model_or_qs, query_or_pk=None, request=None, display_name=None):
if not request:
# for backwards compat with existing get_object_or_error usages
raise TypeError('request is a required argument')
obj = query = None
model_cls = model_or_qs
select_for_update = check_select_for_update(request)
if isinstance(model_or_qs, QuerySet):
# they passed a queryset
model_cls = model_or_qs.model
try:
obj = model_or_qs.select_for_update().get() if select_for_update else model_or_qs.get()
except model_cls.DoesNotExist:
raise NotFound
elif isinstance(query_or_pk, basestring):
# they passed a 5-char guid as a string
if issubclass(model_cls, GuidMixin):
# if it's a subclass of GuidMixin we know it's primary_identifier_name
query = {'guids___id': query_or_pk}
else:
if hasattr(model_cls, 'primary_identifier_name'):
# primary_identifier_name gives us the natural key for the model
query = {model_cls.primary_identifier_name: query_or_pk}
else:
# fall back to modmcompatiblity's load method since we don't know their PIN
obj = model_cls.load(query_or_pk, select_for_update=select_for_update)
else:
# they passed a query
try:
obj = model_cls.objects.filter(query_or_pk).select_for_update().get() if select_for_update else model_cls.objects.get(query_or_pk)
except model_cls.DoesNotExist:
raise NotFound
if not obj:
if not query:
# if we don't have a query or an object throw 404
raise NotFound
try:
# TODO This could be added onto with eager on the queryset and the embedded fields of the api
if isinstance(query, dict):
obj = model_cls.objects.get(**query) if not select_for_update else model_cls.objects.filter(**query).select_for_update().get()
else:
obj = model_cls.objects.get(query) if not select_for_update else model_cls.objects.filter(query).select_for_update().get()
except ObjectDoesNotExist:
raise NotFound
# For objects that have been disabled (is_active is False), return a 410.
# The User model is an exception because we still want to allow
# users who are unconfirmed or unregistered, but not users who have been
# disabled.
if model_cls is OSFUser and obj.is_disabled:
raise UserGone(user=obj)
elif model_cls is not OSFUser and not getattr(obj, 'is_active', True) or getattr(obj, 'is_deleted', False) or getattr(obj, 'deleted', False):
if display_name is None:
raise Gone
else:
raise Gone(detail='The requested {name} is no longer available.'.format(name=display_name))
return obj
def default_node_list_queryset(model_cls):
assert model_cls in {Node, Registration}
return model_cls.objects.filter(is_deleted=False).annotate(region=F('addons_osfstorage_node_settings__region___id'))
def default_node_permission_queryset(user, model_cls):
assert model_cls in {Node, Registration}
if user is None or user.is_anonymous:
return model_cls.objects.filter(is_public=True)
sub_qs = Contributor.objects.filter(node=OuterRef('pk'), user__id=user.id, read=True)
return model_cls.objects.annotate(contrib=Exists(sub_qs)).filter(Q(contrib=True) | Q(is_public=True))
def default_node_list_permission_queryset(user, model_cls):
# **DO NOT** change the order of the querysets below.
# If get_roots() is called on default_node_list_qs & default_node_permission_qs,
# Django's alaising will break and the resulting QS will be empty and you will be sad.
qs = default_node_permission_queryset(user, model_cls) & default_node_list_queryset(model_cls)
return qs.annotate(region=F('addons_osfstorage_node_settings__region___id'))
def extend_querystring_params(url, params):
scheme, netloc, path, query, _ = urlparse.urlsplit(url)
orig_params = urlparse.parse_qs(query)
orig_params.update(params)
query = urllib.urlencode(orig_params, True)
return urlparse.urlunsplit([scheme, netloc, path, query, ''])
def extend_querystring_if_key_exists(url, request, key):
if key in request.query_params.keys():
return extend_querystring_params(url, {key: request.query_params.get(key)})
return url
def has_admin_scope(request):
""" Helper function to determine if a request should be treated
as though it has the `osf.admin` scope. This includes both
tokened requests that do, and requests that are made via the
OSF (i.e. have an osf cookie)
"""
cookie = request.COOKIES.get(website_settings.COOKIE_NAME)
if cookie:
return bool(get_session_from_cookie(cookie))
token = request.auth
if token is None or not isinstance(token, CasResponse):
return False
return set(ComposedScopes.ADMIN_LEVEL).issubset(normalize_scopes(token.attributes['accessTokenScope']))
def is_deprecated(request_version, min_version=None, max_version=None):
if not min_version and not max_version:
raise NotImplementedError('Must specify min or max version.')
min_version_deprecated = min_version and StrictVersion(request_version) < StrictVersion(str(min_version))
max_version_deprecated = max_version and StrictVersion(request_version) > StrictVersion(str(max_version))
if min_version_deprecated or max_version_deprecated:
return True
return False
def waterbutler_api_url_for(node_id, provider, path='/', _internal=False, base_url=None, **kwargs):
assert path.startswith('/'), 'Path must always start with /'
if provider != 'osfstorage':
base_url = None
url = furl.furl(website_settings.WATERBUTLER_INTERNAL_URL if _internal else (base_url or website_settings.WATERBUTLER_URL))
segments = ['v1', 'resources', node_id, 'providers', provider] + path.split('/')[1:]
url.path.segments.extend([urlquote(x) for x in segments])
url.args.update(kwargs)
return url.url
def assert_resource_type(obj, resource_tuple):
assert type(resource_tuple) is tuple, 'resources must be passed in as a tuple.'
if len(resource_tuple) == 1:
error_message = resource_tuple[0].__name__
elif len(resource_tuple) == 2:
error_message = resource_tuple[0].__name__ + ' or ' + resource_tuple[1].__name__
else:
error_message = ''
for resource in resource_tuple[:-1]:
error_message += resource.__name__ + ', '
error_message += 'or ' + resource_tuple[-1].__name__
a_or_an = 'an' if error_message[0].lower() in 'aeiou' else 'a'
assert isinstance(obj, resource_tuple), 'obj must be {} {}; got {}'.format(a_or_an, error_message, obj)
| 43.022026
| 145
| 0.712369
|
744b18552009d0f3b0baea98a0e563a4d086f130
| 1,095
|
py
|
Python
|
rlkit/launchers/experiments/vitchyr/probabilistic_goal_reaching/env.py
|
Asap7772/railrl_evalsawyer
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
[
"MIT"
] | 1
|
2020-10-23T14:40:09.000Z
|
2020-10-23T14:40:09.000Z
|
rlkit/launchers/experiments/vitchyr/probabilistic_goal_reaching/env.py
|
Asap7772/railrl_evalsawyer
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
[
"MIT"
] | null | null | null |
rlkit/launchers/experiments/vitchyr/probabilistic_goal_reaching/env.py
|
Asap7772/railrl_evalsawyer
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
[
"MIT"
] | 1
|
2021-05-27T20:38:45.000Z
|
2021-05-27T20:38:45.000Z
|
import gym
import numpy as np
from multiworld.envs.mujoco.classic_mujoco.ant import AntFullPositionGoalEnv
class NormalizeAntFullPositionGoalEnv(gym.Wrapper):
def __init__(self, env):
assert isinstance(env, AntFullPositionGoalEnv)
super().__init__(env)
self.qpos_weights = 1. / env.presampled_qpos.std(axis=0)
self.qvel_weights = 1. / env.presampled_qvel.std(axis=0)
self._ob_weights = np.concatenate((
self.qpos_weights,
self.qvel_weights,
))
def reset(self):
ob = super().reset()
new_ob = self._create_new_ob(ob)
return new_ob
def step(self, action):
ob, *other = self.env.step(action)
new_ob = self._create_new_ob(ob)
output = (new_ob, *other)
return output
def _create_new_ob(self, ob):
new_ob = {
'observation': ob['observation'] * self._ob_weights,
'achieved_goal': ob['achieved_goal'] * self.qpos_weights,
'desired_goal': ob['desired_goal'] * self.qpos_weights,
}
return new_ob
| 30.416667
| 76
| 0.624658
|
da94e6f90d823f110e4a2373d7fd16b3d1ab5ac3
| 850
|
py
|
Python
|
configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ.py
|
mmabrouk/detectron2
|
158e395acdb8ca6ed6d488b43475f9ef9d200405
|
[
"Apache-2.0"
] | 201
|
2022-02-14T17:44:27.000Z
|
2022-03-31T03:42:41.000Z
|
configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ.py
|
mmabrouk/detectron2
|
158e395acdb8ca6ed6d488b43475f9ef9d200405
|
[
"Apache-2.0"
] | 8
|
2022-03-05T11:46:45.000Z
|
2022-03-21T02:44:06.000Z
|
configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ.py
|
mmabrouk/detectron2
|
158e395acdb8ca6ed6d488b43475f9ef9d200405
|
[
"Apache-2.0"
] | 15
|
2022-02-22T08:00:13.000Z
|
2022-03-29T06:29:45.000Z
|
from .mask_rcnn_R_50_FPN_100ep_LSJ import (
dataloader,
lr_multiplier,
model,
optimizer,
train,
)
from detectron2.config import LazyCall as L
from detectron2.modeling.backbone import RegNet
from detectron2.modeling.backbone.regnet import SimpleStem, ResBottleneckBlock
# Config source:
# https://github.com/facebookresearch/detectron2/blob/master/configs/COCO-InstanceSegmentation/mask_rcnn_regnety_4gf_dds_fpn_1x.py # noqa
model.backbone.bottom_up = L(RegNet)(
stem_class=SimpleStem,
stem_width=32,
block_class=ResBottleneckBlock,
depth=22,
w_a=31.41,
w_0=96,
w_m=2.24,
group_width=64,
se_ratio=0.25,
norm="SyncBN",
out_features=["s1", "s2", "s3", "s4"],
)
model.pixel_std = [57.375, 57.120, 58.395]
# RegNets benefit from enabling cudnn benchmark mode
train.cudnn_benchmark = True
| 27.419355
| 138
| 0.736471
|
2efd6c477b78c9b6e59c5d01087f613f8b847020
| 6,426
|
py
|
Python
|
TTSpy.py
|
Er0s/PyTTS
|
f80bf1c373e5646c5c4517117859c1531ad38a61
|
[
"MIT"
] | 1
|
2016-12-30T01:56:00.000Z
|
2016-12-30T01:56:00.000Z
|
TTSpy.py
|
Er0s/PyTTS
|
f80bf1c373e5646c5c4517117859c1531ad38a61
|
[
"MIT"
] | null | null | null |
TTSpy.py
|
Er0s/PyTTS
|
f80bf1c373e5646c5c4517117859c1531ad38a61
|
[
"MIT"
] | null | null | null |
# coding=utf-8
#!/usr/bin/python
# <ErosXD@qq.com>
"""
Created by IDLE.
File: TTSpy.py
User: Er0s
Create Date: 2016/9/26
Create Time: 15:30
"""
from __future__ import unicode_literals
from BGtext import BGtext
from sys import argv
import prompt
import ctypes
import urllib2
import urllib
import os
class TTSPY:
def Error(slef):
print 'Error!'
exit()
def TTS(self,lan,text):
self.text = text
self.lan = lan
self.filename ='temp.mp3'
self.url = 'http://fanyi.baidu.com/gettts?lan='+lan+'&text='+text+'&source=web'
urllib2.urlopen(self.url)
urllib.urlretrieve(self.url, filename=self.filename, reporthook=None, data=None)
ctypes.windll.winmm.mciSendStringA(b"play temp.mp3", 0, 0, 0)
os.remove(self.filename)
BGtext()
prompt.prompt()
if __name__ == '__main__':
app = TTSPY()
if len(argv) ==1:
app.lan = 'en'
app.text = 'HHH'
app.lan = raw_input("Language:").lower()
if app.lan == 'zh':
n = input("Number(1-10):")
if n == 1:
app.text = urllib.quote("鹅鹅鹅鹅鹅鹅鹅鹅鹅鹅鹅鹅鹅嗯~鹅鹅鹅鹅鹅鹅鹅鹅鹅鹅鹅鹅鹅嗯~鹅鹅鹅鹅鹅鹅鹅鹅鹅鹅鹅鹅鹅嗯!淦!妈的烂机车发不动~鹅鹅鹅鹅鹅鹅鹅鹅鹅鹅鹅鹅鹅嗯~鹅鹅鹅鹅鹅鹅鹅鹅鹅鹅鹅鹅鹅嗯".encode('utf-8'))
elif n == 2:
app.text = urllib.quote("因为,绳命,是剁么的回晃;绳命,是入刺的井猜。壤窝们,巩痛嘱咐碰优。田下冯广宰饿妹,饿妹冯广宰呲处。壤窝们,嘱咐这缩优类缩优。开心的一小,火大的一小,壤绳命,梗楤容,壤绳命,梗秤巩,壤绳命,梗回晃。".encode('utf-8'))
elif n == 3:
app.text = urllib.quote("哦嘿菇狗翻溢,雷啊雷啊,抬举丽抖啦!,哦u汞滴猴嘿搞羞滴许哇,虽瘾哦母还猴歇汞广东娃,蛋嗨哦u汞呗雷听!哦丢雷过漏谋嗨!雷破该,累哼噶残,累哼噶玩允!".encode('utf-8'))
elif n == 4:
app.text = urllib.quote("桑伯奇怎的桑伯奇,偶桑你桑你桑到舔很案底,丹哈大格尼没怒有窄你还理,偶喊你喊你哈妮哈到刑辱鞋底".encode('utf-8'))
elif n == 5:
app.text = "du du du du du du du du du du du du du aaaaaaaaaaaaaaaadu du du du du du du du du du du duaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.fuck you!the motor cannot be use!!!!! damn it"
elif n == 6:
app.text = "wo z ni ma mai ma pee"
elif n == 7:
app.text = "chuie ni mala ger beee de G F W, wo ciao ni daaaaaah yea"
elif n == 8:
app.text = "haha tai hao wan la hia hia hia hiaa"
elif n == 9:
app.text = "abcdefghijklmnopqrstuvwxyz"
elif n == 10:
app.text = "Need yo ben shir chang nan ren,Need yo ben shir kai men na。kai men na,kai men na,kai men kai men kai men na,food when pe,food when pe,bie duzei li mian boochoosheng,wo zhi dao need zai zhia."
else:
app.Error()
elif app.lan == 'en':
n = input("Number(1-5):")
if n == 1:
app.text = "rinmin hao sheuundee."
elif n == 2:
app.text = "chee put out buu tue putout pee,buu chee put out doll"
elif n == 3:
app.text = "more zombie, zombiebay lay pee"
elif n == 4:
app.text = "ne ma be ne ma be,ne shi da sha be,song bo qi a song bo qi,you mu you you mu you"
elif n == 5:
app.text = "nimabi nimabi niseudasabi sangbochiasangbochi yomoyoyomoyo"
else:
app.Error()
elif app.lan == 'de':
n = input("Number(1-2):")
if n == 1:
app.text = "choon ger choon yermern"
elif n == 2:
app.text = "pv zk bschk pv zk pv bschk zk pv zk bschk pv zk pv bschk zk bschk pv bschk bschk pv kkkkkkkkkk bschk bschk bschk pv zk bschk pv zk pv bschk zk pv zk bschk pv zk pv bschk zk bschk pv bschk bschk pv kkkkkkkkkk bschk bschk bschk pv zk bschk pv zk pv bschk zk pv zk bschk pv zk pv bschk zk bschk pv bschk bschk pv kkkkkkkkkk bschk bschk bschk pv zk bschk pv zk pv bschk zk pv zk bschk pv zk pv bschk zk bschk pv bschk"
else:
app.Error()
elif app.lan == 'kor':
app.text = '%EB%8B%88%EB%A7%88%EB%B9%84%20%EB%8B%88%EB%A7%88%EB%B9%84%20%EB%8B%88%EC%8A%A4%EB%8B%A4%EC%82%AC%EB%B9%84%20%EC%83%81%EB%B3%B4%EC%B9%98%EC%95%84%EC%83%81%EB%B3%B4%EC%B9%98%20%EC%9A%94%EB%AA%A8%EC%9A%94%EC%9A%94%EB%AA%A8%EC%9A%94'
elif app.lan == 'jp':
n = input("Number(1-5):")
if n == 1:
app.text = "%E3%83%8B%E3%83%9E%E3%83%93%E3%83%8B%E3%83%9E%E3%83%93%E3%83%8B%E3%83%BC%E3%82%B9%E3%81%BE%E3%81%99"
elif n == 2:
app.text = "%E3%81%AB%E3%81%BE%E3%81%B3%20%E3%81%AB%E3%81%BE%E3%81%B3%20%E3%81%AB%E3%81%97%E3%81%A0%E3%81%97%E3%82%83%E3%81%B3"
elif n == 3:
app.text = "%E3%81%AB%E3%81%BE%E3%81%B3%E3%81%82%E3%81%AB%E3%81%BE%E3%81%B3,%E3%81%95%E3%82%93%E3%81%B6%E3%81%A1%E3%81%82%E3%81%95%E3%82%93%E3%81%B6%E3%81%A1,%E3%81%95%E3%82%93%E3%81%B6%E3%81%A1%E3%81%82%E3%81%82%E3%81%82%E3%81%82%E3%81%82%E3%81%82,%E3%82%88%E3%82%80%E3%82%88%E3%82%88%E3%82%80%E3%82%88"
elif n == 4:
app.text = "%E3%83%91%E3%83%81%E3%83%91%E3%83%81%E3%83%91%E3%83%81%E3%83%91%E3%83%81%E3%83%91%E3%83%81%E3%83%91%E3%83%81"
elif n == 5:
app.text = "%D0%BD%D0%B8%20ma%20%D0%B1%D0%B8%20%D0%BD%D0%B8%20ma%20%D0%B1%D0%B8%20%D0%BD%D0%B8%20%D1%88%D0%B8%20%D0%B3e%20%D0%B4a%20%D1%88a%20%D0%B1%D0%B8%20%D1%88a%D0%BD%20%D0%B1%D1%83%20%D1%9B%D0%B8%20%D1%88a%D0%BD%20%D0%B1%D1%83%20%D1%9B%D0%B8%20j%D1%83%20m%D1%83%20j%D1%83%20j%D1%83%20m%D1%83%20j%D1%83"
else:
app.Error()
elif app.lan == 'yue':
n = input("Number(1-3):")
if n == 1:
app.text = urllib.quote("嗯嗯嗯嫩嗯嗯嗯讷讷恩恩嫩恩恩恩呢嫩嗯嗯嗯嫩嗯嗯嗯恩恩呢嫩嗯嗯嗯嗯嗯嗯嗯嗯嗯嗯嗯嗯嗯嗯嗯嫩嗯嗯嗯嗯呢嫩嗯嗯嗯呢嫩嫩恩额恩恩谔谔".encode('utf-8'))
elif n == 2:
app.text = urllib.quote("苟利国家生死以 岂能祸福趋避之".encode('uft-8'))
elif n == 3:
app.text = urllib.quote("你们造的这个句子啊,excited".encode('utf-8'))
else:
app.Error()
app.TTS(app.lan,app.text)
else:
app.Error()
#TODO
'''
else:
app.lan = argv[2];app.text=argv[4]
app.TTS(app.lan,app.text)
'''
| 51.822581
| 443
| 0.561625
|
076859e745e398b46d5c10513fdee7da341cbbd9
| 4,739
|
py
|
Python
|
src/python/tests/core/datastore/data_types_test.py
|
adetaylor/clusterfuzz
|
0260fd19a9e25ac62679f50ce3859c5a7486ee20
|
[
"Apache-2.0"
] | null | null | null |
src/python/tests/core/datastore/data_types_test.py
|
adetaylor/clusterfuzz
|
0260fd19a9e25ac62679f50ce3859c5a7486ee20
|
[
"Apache-2.0"
] | null | null | null |
src/python/tests/core/datastore/data_types_test.py
|
adetaylor/clusterfuzz
|
0260fd19a9e25ac62679f50ce3859c5a7486ee20
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""data_types tests."""
import unittest
from datastore import data_types
from tests.test_libs import test_utils
@test_utils.with_cloud_emulators('datastore')
class TestcaseTest(unittest.TestCase):
"""Test Testcase."""
def test_put(self):
"""Test put(). It should tokenize certain fields."""
testcase = data_types.Testcase()
testcase.crash_state = 'state'
testcase.crash_type = 'type'
testcase.fuzzer_name = 'fuzzer'
testcase.overridden_fuzzer_name = 'Overfuzzer'
testcase.job_type = 'job'
testcase.bug_information = '333'
testcase.group_id = 1234
testcase.group_bug_information = 999
testcase.impact_stable_version = 's.1'
testcase.impact_beta_version = 'b.3'
testcase.platform_id = 'windows'
testcase.project_name = 'chromium'
testcase.one_time_crasher_flag = False
testcase.is_impact_set_flag = True
testcase.put()
testcase = testcase.key.get()
self.assertSetEqual(
set(['state', 'type', 'job', 'fuzzer', 'overfuzzer', 'windows']),
set(testcase.keywords))
self.assertSetEqual(set(['333', '999']), set(testcase.bug_indices))
self.assertTrue(testcase.has_bug_flag)
self.assertSetEqual(
set(['fuzzer', 'overfuzzer']), set(testcase.fuzzer_name_indices))
self.assertSetEqual(
set(['s', 's.1']), set(testcase.impact_stable_version_indices))
self.assertSetEqual(
set(['b', 'b.3']), set(testcase.impact_beta_version_indices))
self.assertSetEqual(
set(['s', 's.1', 'b', 'b.3', 'stable', 'beta']),
set(testcase.impact_version_indices))
def test_put_head(self):
"""Tests put() when the impact is head."""
testcase = data_types.Testcase()
testcase.impact_stable_version = ''
testcase.impact_beta_version = ''
testcase.project_name = 'chromium'
testcase.one_time_crasher_flag = False
testcase.is_impact_set_flag = True
testcase.put()
testcase = testcase.key.get()
self.assertSetEqual(set(['head']), set(testcase.impact_version_indices))
def test_non_chromium(self):
"""Test put(). It should tokenize certain fields."""
testcase = data_types.Testcase()
testcase.impact_version_indices = ['head']
testcase.impact_stable_version = '4.5.6'
testcase.impact_beta_version = '1.2.3'
testcase.impact_stable_version_indices = ['s']
testcase.impact_beta_version_indices = ['b']
testcase.impact_stable_version_likely = True
testcase.impact_beta_version_likely = True
testcase.is_impact_set_flag = True
testcase.project_name = 'cobalt'
testcase.put()
testcase = testcase.key.get()
self.assertEqual([], testcase.impact_stable_version_indices)
self.assertEqual([], testcase.impact_beta_version_indices)
self.assertEqual([], testcase.impact_version_indices)
# We only clear the indices. The original data is kept.
self.assertEqual('1.2.3', testcase.impact_beta_version)
self.assertEqual('4.5.6', testcase.impact_stable_version)
self.assertTrue(testcase.is_impact_set_flag)
self.assertTrue(testcase.impact_stable_version_likely)
self.assertTrue(testcase.impact_beta_version_likely)
class FuzzTargetFullyQualifiedNameTest(unittest.TestCase):
"""Test fuzz_target_fully_qualified_name."""
def test_project_with_regular_chars(self):
self.assertEqual(
'libFuzzer_myproject_json_fuzzer',
data_types.fuzz_target_fully_qualified_name('libFuzzer', 'myproject',
'json_fuzzer'))
self.assertEqual(
'afl_test_project_hash_fuzzer',
data_types.fuzz_target_fully_qualified_name('afl', 'test_project',
'hash_fuzzer'))
def test_project_with_special_chars(self):
self.assertEqual(
'libFuzzer_third_party-llvm_clang_fuzzer',
data_types.fuzz_target_fully_qualified_name(
'libFuzzer', '//third_party/llvm', 'clang_fuzzer'))
self.assertEqual(
'afl_third_party-aspell-aspell_5_aspell_fuzzer',
data_types.fuzz_target_fully_qualified_name(
'afl', 'third_party:aspell:aspell_5', 'aspell_fuzzer'))
| 38.217742
| 77
| 0.7069
|
af4ef53985c0c794646d357ed6ab0e7b2972d0be
| 1,411
|
py
|
Python
|
pretrained_embedding/run_get_vec.py
|
ZouJoshua/deeptext_project
|
efb4db34a4a22951bba4ae724d05c41958bbd347
|
[
"Apache-2.0"
] | 2
|
2021-03-01T06:37:27.000Z
|
2021-04-07T09:40:55.000Z
|
pretrained_embedding/run_get_vec.py
|
ZouJoshua/deeptext_project
|
efb4db34a4a22951bba4ae724d05c41958bbd347
|
[
"Apache-2.0"
] | 5
|
2020-09-26T01:16:58.000Z
|
2022-02-10T01:49:30.000Z
|
pretrained_embedding/run_get_vec.py
|
ZouJoshua/deeptext_project
|
efb4db34a4a22951bba4ae724d05c41958bbd347
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author : Joshua
@Time : 2020/6/16 5:15 下午
@File : run_get_vec.py
@Desc :
"""
from .test_we import redis_conn, wrap_key
import json
import sys
import pandas as pd
import numpy as np
import multiprocessing
def read_redis(res):
res = _redis.get(wrap_key(res))
if res == None:
return None
res = json.loads(res)
res = ' '.join([str(item) for item in res])
return res
def get_vec(data_part, i, dict_txt):
data_part[1] = data_part.apply(lambda x: read_redis(x[0]), axis=1)
data_part = data_part.dropna(how='any', axis=0)
data_part.to_csv(dict_txt + '_' + str(i), sep='\t', index=None, header=None, mode='a')
if __name__ == "__main__":
_redis = redis_conn()
# file=open('token_all.dict')
# file2=open('token_vec_all.txt','w+')
dict_now = sys.argv[1]
dict_txt = sys.argv[2]
data = pd.read_csv(dict_now, sep='\t', quoting=3, header=None)
len_data = len(data)
pros = []
for i in range(30):
if i < 29:
data_temp = data.iloc[i * len(data) // 30:(i + 1) * len(data) // 30].copy()
else:
data_temp = data.iloc[i * len(data) // 30:len(data)].copy()
p = multiprocessing.Process(target=get_vec, args=(data_temp, i, dict_txt))
pros.append(p)
for item in pros:
item.start()
for item in pros:
item.join()
| 25.196429
| 90
| 0.598866
|
1fe2a5ddabb86f20e7e3fc5791545ffb9c6edfdb
| 2,694
|
py
|
Python
|
qa/rpc-tests/test_framework/blocktools.py
|
IndieProof/VerusCoin
|
948da26bcc8dc2d7bfce3decc40f31621de82a7c
|
[
"Unlicense"
] | 5
|
2020-10-09T05:47:25.000Z
|
2022-03-22T08:38:48.000Z
|
qa/rpc-tests/test_framework/blocktools.py
|
IndieProof/VerusCoin
|
948da26bcc8dc2d7bfce3decc40f31621de82a7c
|
[
"Unlicense"
] | 2
|
2020-01-29T12:26:04.000Z
|
2020-03-17T01:11:34.000Z
|
qa/rpc-tests/test_framework/blocktools.py
|
IndieProof/VerusCoin
|
948da26bcc8dc2d7bfce3decc40f31621de82a7c
|
[
"Unlicense"
] | 7
|
2018-07-28T21:54:40.000Z
|
2020-01-20T03:35:00.000Z
|
# blocktools.py - utilities for manipulating blocks and transactions
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from mininode import CBlock, CTransaction, CTxIn, CTxOut, COutPoint
from script import CScript, OP_0, OP_EQUAL, OP_HASH160
# Create a block (with regtest difficulty)
def create_block(hashprev, coinbase, nTime=None, nBits=None):
block = CBlock()
if nTime is None:
import time
block.nTime = int(time.time()+600)
else:
block.nTime = nTime
block.hashPrevBlock = hashprev
if nBits is None:
block.nBits = 0x200f0f0f # Will break after a difficulty adjustment...
else:
block.nBits = nBits
block.vtx.append(coinbase)
block.hashMerkleRoot = block.calc_merkle_root()
block.calc_sha256()
return block
def serialize_script_num(value):
r = bytearray(0)
if value == 0:
return r
neg = value < 0
absvalue = -value if neg else value
while (absvalue):
r.append(chr(absvalue & 0xff))
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return r
counter=1
# Create an anyone-can-spend coinbase transaction, assuming no miner fees
def create_coinbase(heightAdjust = 0):
global counter
coinbase = CTransaction()
coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff),
CScript([counter+heightAdjust, OP_0]), 0xffffffff))
counter += 1
coinbaseoutput = CTxOut()
coinbaseoutput.nValue = int(12.5*100000000)
halvings = int((counter+heightAdjust)/150) # regtest
coinbaseoutput.nValue >>= halvings
coinbaseoutput.scriptPubKey = ""
coinbase.vout = [ coinbaseoutput ]
if halvings == 0: # regtest
froutput = CTxOut()
froutput.nValue = coinbaseoutput.nValue / 5
# regtest
fraddr = bytearray([0x67, 0x08, 0xe6, 0x67, 0x0d, 0xb0, 0xb9, 0x50,
0xda, 0xc6, 0x80, 0x31, 0x02, 0x5c, 0xc5, 0xb6,
0x32, 0x13, 0xa4, 0x91])
froutput.scriptPubKey = CScript([OP_HASH160, fraddr, OP_EQUAL])
coinbaseoutput.nValue -= froutput.nValue
coinbase.vout = [ coinbaseoutput, froutput ]
coinbase.calc_sha256()
return coinbase
# Create a transaction with an anyone-can-spend output, that spends the
# nth output of prevtx.
def create_transaction(prevtx, n, sig, value):
tx = CTransaction()
assert(n < len(prevtx.vout))
tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), sig, 0xffffffff))
tx.vout.append(CTxOut(value, ""))
tx.calc_sha256()
return tx
| 34.101266
| 78
| 0.657387
|
814f9bb8b748e2a9ffac0a140d188d94c5e2da68
| 3,520
|
py
|
Python
|
code.py
|
shadow09rj/olympic-hero
|
652f9f88241e3320cdeb16abee18af75d4aef3e7
|
[
"MIT"
] | null | null | null |
code.py
|
shadow09rj/olympic-hero
|
652f9f88241e3320cdeb16abee18af75d4aef3e7
|
[
"MIT"
] | null | null | null |
code.py
|
shadow09rj/olympic-hero
|
652f9f88241e3320cdeb16abee18af75d4aef3e7
|
[
"MIT"
] | null | null | null |
# --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
data= pd.read_csv(path)
data.rename(columns = {'Total':'Total_Medals'},inplace=True)
print(data.head(10))
#Code starts here
# --------------
#Code starts here0
#Code starts here0
data['Better_Event'] = np.where(data['Total_Summer']>data['Total_Winter'] ,
'Summer','Winter' )
data['Better_Event'] = np.where(data['Total_Summer']==data['Total_Winter'] ,
'Both',data['Better_Event'] )
better_event = data['Better_Event'].value_counts().idxmax()
print(better_event)
# --------------
#Code starts here
top_countries = data[['Country_Name','Total_Summer', 'Total_Winter',
'Total_Medals']]
top_countries.drop(data.index[-1],inplace=True)
def top_ten(top_countries,column):
country_list = []
countries = top_countries.nlargest(10,column)
country_list = countries['Country_Name'].tolist()
return country_list
top_10_summer =top_ten(top_countries,'Total_Summer')
top_10_winter =top_ten(top_countries,'Total_Winter')
top_10 =top_ten(top_countries,'Total_Medals')
common = [i for i in top_10_summer for j in top_10_winter for k in top_10 if i==j==k]
print(common)
# --------------
#Code starts here
summer_df = data[data['Country_Name'].isin(top_10_summer)]
winter_df = data[data['Country_Name'].isin(top_10_winter)]
top_df = data[data['Country_Name'].isin(top_10)]
#fig, (ax_1,ax_2,ax_3) = plt.subplot(1,3, figsize = (20,10))
#plot 1
summer_df.plot('Country_Name','Total_Summer',kind='bar',color='r')
plt.xlabel("Countries")
plt.xticks(rotation=45)
plt.title('Medal counts for summer top 10 teams')
plt.show()
#plot 2
winter_df.plot('Country_Name','Total_Winter',kind='bar',color='b')
plt.xlabel("Countries")
plt.xticks(rotation=45)
plt.title('Medal counts for winter top 10 teams')
plt.show()
#plot 3
top_df.plot('Country_Name','Total_Medals',kind='bar',color='g')
plt.xlabel("Countries")
plt.xticks(rotation=45)
plt.title('Medal counts for all over top 10 teams')
plt.show()
# --------------
#Code starts here
#summer max gold
summer_df['Golden_Ratio'] = summer_df['Gold_Summer'] / summer_df['Total_Summer']
summer_max_ratio = summer_df['Golden_Ratio'].max()
summer_country_gold = summer_df[summer_df['Golden_Ratio'] == summer_max_ratio]['Country_Name'].to_string(index=False)
#winter max gold
winter_df['Golden_Ratio'] = winter_df['Gold_Winter'] / winter_df['Total_Winter']
winter_max_ratio = winter_df['Golden_Ratio'].max()
winter_country_gold =winter_df[winter_df['Golden_Ratio'] == winter_max_ratio]['Country_Name'].to_string(index=False)
#top max gold
top_df['Golden_Ratio'] = top_df['Gold_Total'] / top_df['Total_Medals']
top_max_ratio = top_df['Golden_Ratio'].max()
top_country_gold = top_df[top_df['Golden_Ratio'] == top_max_ratio]['Country_Name'].to_string(index=False)
# --------------
#Code starts here
data_1 = data.drop(data.index[-1])
data_1['Total_Points'] = 3*data_1['Gold_Total'] + 2*data_1['Silver_Total'] + 1*data_1['Bronze_Total']
most_points = data_1['Total_Points'].max()
best_country = data_1[data_1['Total_Points'] == most_points]['Country_Name'].to_string(index=False)
# --------------
#Code starts here
best = data[data['Country_Name'] == best_country]
best = best[['Gold_Total','Silver_Total','Bronze_Total']]
best.plot(kind='bar',stacked=True)
plt.xlabel('United States')
plt.ylabel('Medals Tally')
plt.xticks(rotation=45)
plt.show()
| 30.344828
| 118
| 0.705398
|
1d6da27bb44f335a5aeb3e2dd567f903d5a2d8b5
| 5,154
|
py
|
Python
|
.github/scripts/flake.py
|
timmattison/aws-greengrass-nucleus
|
9d2fc753fe4b45f1d53862f3e19019d22fff2ca3
|
[
"ECL-2.0",
"Apache-2.0"
] | 71
|
2020-12-15T17:27:53.000Z
|
2022-03-04T19:30:39.000Z
|
.github/scripts/flake.py
|
timmattison/aws-greengrass-nucleus
|
9d2fc753fe4b45f1d53862f3e19019d22fff2ca3
|
[
"ECL-2.0",
"Apache-2.0"
] | 363
|
2020-12-15T17:27:40.000Z
|
2022-03-31T22:22:35.000Z
|
.github/scripts/flake.py
|
timmattison/aws-greengrass-nucleus
|
9d2fc753fe4b45f1d53862f3e19019d22fff2ca3
|
[
"ECL-2.0",
"Apache-2.0"
] | 27
|
2020-12-15T22:01:45.000Z
|
2022-03-25T04:44:06.000Z
|
# Copyright Amazon.com Inc. or its affiliates.
# SPDX-License-Identifier: Apache-2.0
import argparse
import json
import os
import subprocess
import xml.etree.ElementTree as ET
from collections import defaultdict
from agithub.GitHub import GitHub
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--cmd', type=str, help='Command to run')
parser.add_argument('-i', type=int, help='Iterations')
parser.add_argument('--token', type=str, help='GitHub token')
parser.add_argument('--out-dir', type=str, help='Failed test output dir')
parser.add_argument('-ff', action="store_true", help='Fail fast. If enabled, quit '
'after the first failure')
args = parser.parse_args()
command = args.cmd
iterations = args.i
token = args.token
# Dict for results as a dict of classname -> method name -> [failure details]
results = defaultdict(lambda: defaultdict(list))
for i in range(0, iterations):
print(f"Running iteration {i + 1} of {iterations}", flush=True)
process = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
# If the tests failed, then we should check which test(s) failed in order to report it
if process.returncode != 0:
print(f"Iteration {i + 1} failed, saving and parsing results now", flush=True)
os.makedirs(args.out_dir, exist_ok=True)
with open(f'{args.out_dir}{i+1}-full-stdout.txt', 'w') as f:
f.write(process.stdout.decode('utf-8'))
with open(f'{args.out_dir}{i+1}-full-stderr.txt', 'w') as f:
f.write(process.stderr.decode('utf-8'))
parse_test_results(i, results, args.out_dir)
if args.ff:
break
else:
print("Succeeded with no failure", flush=True)
if len(results) == 0:
return
print("Found some flakiness. Creating/updating GitHub issue.", flush=True)
print(json.dumps(results), flush=True)
gh = GitHub(token=token)
title = "[Bot] Flaky Test(s) Identified"
existing_issues = gh.repos[os.getenv("GITHUB_REPOSITORY")].issues.get(creator="app/github-actions")
if existing_issues[0] == 200:
existing_issues = list(filter(lambda i: title in i["title"], existing_issues[1]))
else:
existing_issues = []
body = f"Flaky test(s) found for commit {os.getenv('GITHUB_SHA')}.\n" \
f" See the uploaded artifacts from the action for details.\n\n"
for test_class, v in results.items():
for test_case, failures in v.items():
body += f"- {test_class}.{test_case} failed {len(failures)} times over {iterations} iterations "
unique_failure_reasons = set(map(lambda f: f["failure"], failures))
body += f"with {len(unique_failure_reasons)} unique failures.\n"
if existing_issues:
issue_number = existing_issues[0]["number"]
updated_issue = gh.repos[os.getenv("GITHUB_REPOSITORY")].issues[issue_number].patch(body={"body": body,
"title": title})
print(updated_issue, flush=True)
else:
issue = gh.repos[os.getenv("GITHUB_REPOSITORY")].issues.post(body={"body": body,
"title": title})
print(issue, flush=True)
def parse_test_results(iteration, previous_results, failed_test_dir):
report_dir = "target/surefire-reports/"
if not os.path.exists(report_dir):
return
reports = list(filter(lambda f: f.startswith("TEST-") and f.endswith(".xml"), os.listdir(report_dir)))
for r in reports:
tree = ET.parse(report_dir + r)
for testcase in tree.getroot().findall("./testcase"):
failure = None
# Find failures and errors (there's no important difference between these for us)
if testcase.find("failure") is not None:
failure = testcase.find("failure").text
elif testcase.find("error") is not None:
failure = testcase.find("error").text
if failure is None:
continue
previous_results[testcase.get("classname")][testcase.get("name")] \
.append({"iteration": iteration, "failure": failure})
# Save test stdout and stderr
file_path_prefix = f'{failed_test_dir}{iteration}-{testcase.get("classname")}.{testcase.get("name")}-'
if testcase.find("system-out") is not None:
with open(f'{file_path_prefix}stdout.txt', 'w') as f:
f.write(testcase.find("system-out").text)
if testcase.find("system-err") is not None:
with open(f'{file_path_prefix}stderr.txt', 'w') as f:
f.write(testcase.find("system-err").text)
# Save test failure exception traceback
with open(f'{file_path_prefix}error.txt', 'w') as f:
f.write(failure)
if __name__ == '__main__':
main()
| 44.051282
| 114
| 0.602833
|
b844eeed206170bd99b08d212ed96670ec43f94b
| 849
|
py
|
Python
|
image_space_sh_light.py
|
torresguilherme/spherical-harmonics-demo
|
4830dddafeab9ba6b4eff7ae4167a1bc5db16fef
|
[
"MIT"
] | 1
|
2021-01-13T11:03:28.000Z
|
2021-01-13T11:03:28.000Z
|
image_space_sh_light.py
|
torresguilherme/spherical-harmonics-demo
|
4830dddafeab9ba6b4eff7ae4167a1bc5db16fef
|
[
"MIT"
] | null | null | null |
image_space_sh_light.py
|
torresguilherme/spherical-harmonics-demo
|
4830dddafeab9ba6b4eff7ae4167a1bc5db16fef
|
[
"MIT"
] | null | null | null |
import numpy as np
import glob
from PIL import Image
import sys
try:
i = 0
for lightname in sorted(glob.glob(sys.argv[1] + '*.npy')):
print("rendering image with light: " + lightname)
light = np.load(lightname)
if light.shape[0] < light.shape[1]:
light = light.T
transport = np.load(sys.argv[2])["T"]
print(light.shape)
print(transport.shape)
albedo = Image.open(sys.argv[3])
shading = np.matmul(transport, light)
rendering = albedo * shading
image_output = Image.fromarray((rendering).astype(np.uint8))
image_output.save(sys.argv[4] + ("frame%08d" % i) + "_relight2d.jpg")
i += 1
except IndexError:
print("Usage: python3 image_space_sh_light.py <directory with light data> <transport matrix> <albedo image> <output directory>")
| 36.913043
| 132
| 0.628975
|
f5770eefde9a3ef982f9d4ea92972588c97258d5
| 3,202
|
py
|
Python
|
salt/states/mount.py
|
SEJeff/salt
|
788402c650a4c821140b461317119ea0543fee4c
|
[
"Apache-2.0"
] | null | null | null |
salt/states/mount.py
|
SEJeff/salt
|
788402c650a4c821140b461317119ea0543fee4c
|
[
"Apache-2.0"
] | null | null | null |
salt/states/mount.py
|
SEJeff/salt
|
788402c650a4c821140b461317119ea0543fee4c
|
[
"Apache-2.0"
] | null | null | null |
'''
Mount Managment
===============
Mount any type of mountable filesystem with the mounted function:
.. code-block:: yaml
/mnt/sdb:
mount:
- mounted
- device: /dev/sdb1
- fstype: ext4
- mkmnt: True
- opts:
- defaults
'''
def mounted(
name,
device,
fstype,
mkmnt=False,
opts=['defaults'],
dump=0,
pass_num=0,
config='/etc/fstab',
remount=True, # FIXME: where is 'remount' used?
persist=True,
):
'''
Verify that a device is mounted
name
The path to the location where the device is to be mounted
device
The device name, typically the device node, such as /dev/sdb1
fstype
The filesystem type, this will be xfs, ext2/3/4 in the case of classic
filesystems, and fuse in the case of fuse mounts
mkmnt
If the mount point is not present then the state will fail, set mkmnt
to True to create the mount point if it is otherwise not present
opts
A list object of options or a comma delimited list
dump
The dump value to be passed into the fstab, default to 0
pass_num
The pass value to be passed into the fstab, default to 0
config
Set an alternative location for the fstab, default to /etc/fstab
remount
Set if the file system can be remounted with the remount option,
default to True
persist
Set if the mount should be saved in the fstab, default to True
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
# Make sure that opts is correct, it can be a list or a comma delimited
# string
if type(opts) == type(str()):
opts = opts.split(',')
# Get the active data
active = __salt__['mount.active']()
if name not in active:
# The mount is not present! Mount it
out = __salt__['mount.mount'](name, device, mkmnt, fstype, opts)
if type(out) == type(str()):
# Failed to remount, the state has failed!
ret['comment'] = out
ret['result'] = False
elif out == True:
# Remount worked!
ret['changes']['mount'] = True
if persist:
# present, new, change, bad config
# Make sure the entry is in the fstab
out = __salt__['mount.set_fstab'](
name,
device,
fstype,
opts,
dump,
pass_num,
config)
if out == 'present':
return ret
if out == 'new':
ret['changes']['persist'] = 'new'
ret['comment'] += ' and added new entry to the fstab'
return ret
if out == 'change':
ret['changes']['persist'] = 'update'
ret['comment'] += ' and updated the entry in the fstab'
return ret
if out == 'bad config':
ret['result'] = False
ret['comment'] += ' but the fstab was not found'
return ret
return ret
| 27.367521
| 78
| 0.527795
|
1c1ed34fc7641b18161e069c4229be7c7b7a2afc
| 18,891
|
py
|
Python
|
rpython/translator/backendopt/test/test_inline.py
|
jptomo/pypy-lang-scheme
|
55edb2cec69d78f86793282a4566fcbc1ef9fcac
|
[
"MIT"
] | 34
|
2015-07-09T04:53:27.000Z
|
2021-07-19T05:22:27.000Z
|
rpython/translator/backendopt/test/test_inline.py
|
jptomo/pypy-lang-scheme
|
55edb2cec69d78f86793282a4566fcbc1ef9fcac
|
[
"MIT"
] | 6
|
2015-05-30T17:20:45.000Z
|
2017-06-12T14:29:23.000Z
|
rpython/translator/backendopt/test/test_inline.py
|
jptomo/pypy-lang-scheme
|
55edb2cec69d78f86793282a4566fcbc1ef9fcac
|
[
"MIT"
] | 11
|
2015-09-07T14:26:08.000Z
|
2020-04-10T07:20:41.000Z
|
# XXX clean up these tests to use more uniform helpers
import py
from rpython.flowspace.model import Variable, Constant, checkgraph
from rpython.translator.backendopt import canraise
from rpython.translator.backendopt.inline import (simple_inline_function,
CannotInline, auto_inlining, Inliner, collect_called_graphs,
measure_median_execution_cost, instrument_inline_candidates,
auto_inline_graphs)
from rpython.translator.translator import TranslationContext, graphof
from rpython.rtyper.llinterp import LLInterpreter
from rpython.rtyper.test.tool import BaseRtypingTest
from rpython.rlib.rarithmetic import ovfcheck
from rpython.translator.test.snippet import is_perfect_number
from rpython.translator.backendopt.all import INLINE_THRESHOLD_FOR_TEST
from rpython.conftest import option
from rpython.translator.backendopt import removenoops
from rpython.flowspace.model import summary
def sanity_check(t):
# look for missing '.concretetype'
for graph in t.graphs:
checkgraph(graph)
for node in graph.iterblocks():
for v in node.inputargs:
assert hasattr(v, 'concretetype')
for op in node.operations:
for v in op.args:
assert hasattr(v, 'concretetype')
assert hasattr(op.result, 'concretetype')
for node in graph.iterlinks():
if node.exitcase is not None:
assert hasattr(node, 'llexitcase')
for v in node.args:
assert hasattr(v, 'concretetype')
if isinstance(node.last_exception, (Variable, Constant)):
assert hasattr(node.last_exception, 'concretetype')
if isinstance(node.last_exc_value, (Variable, Constant)):
assert hasattr(node.last_exc_value, 'concretetype')
class CustomError1(Exception):
def __init__(self):
self.data = 123
class CustomError2(Exception):
def __init__(self):
self.data2 = 456
class TestInline(BaseRtypingTest):
def translate(self, func, argtypes):
t = TranslationContext()
t.buildannotator().build_types(func, argtypes)
t.buildrtyper().specialize()
return t
def check_inline(self, func, in_func, sig, entry=None,
inline_guarded_calls=False,
graph=False):
if entry is None:
entry = in_func
t = self.translate(entry, sig)
# inline!
sanity_check(t) # also check before inlining (so we don't blame it)
if option.view:
t.view()
raise_analyzer = canraise.RaiseAnalyzer(t)
inliner = Inliner(t, graphof(t, in_func), func,
t.rtyper.lltype_to_classdef_mapping(),
inline_guarded_calls,
raise_analyzer=raise_analyzer)
inliner.inline_all()
if option.view:
t.view()
sanity_check(t)
interp = LLInterpreter(t.rtyper)
def eval_func(args):
return interp.eval_graph(graphof(t, entry), args)
if graph:
return eval_func, graphof(t, func)
return eval_func
def check_auto_inlining(self, func, sig, multiplier=None, call_count_check=False,
remove_same_as=False, heuristic=None, const_fold_first=False):
t = self.translate(func, sig)
if const_fold_first:
from rpython.translator.backendopt.constfold import constant_fold_graph
from rpython.translator.simplify import eliminate_empty_blocks
for graph in t.graphs:
constant_fold_graph(graph)
eliminate_empty_blocks(graph)
if option.view:
t.view()
# inline!
sanity_check(t) # also check before inlining (so we don't blame it)
threshold = INLINE_THRESHOLD_FOR_TEST
if multiplier is not None:
threshold *= multiplier
call_count_pred = None
if call_count_check:
call_count_pred = lambda lbl: True
instrument_inline_candidates(t.graphs, threshold)
if remove_same_as:
for graph in t.graphs:
removenoops.remove_same_as(graph)
if heuristic is not None:
kwargs = {"heuristic": heuristic}
else:
kwargs = {}
auto_inlining(t, threshold, call_count_pred=call_count_pred, **kwargs)
sanity_check(t)
if option.view:
t.view()
interp = LLInterpreter(t.rtyper)
def eval_func(args):
return interp.eval_graph(graphof(t, func), args)
return eval_func, t
def test_inline_simple(self):
def f(x, y):
return (g(x, y) + 1) * x
def g(x, y):
if x > 0:
return x * y
else:
return -x * y
eval_func = self.check_inline(g, f, [int, int])
result = eval_func([-1, 5])
assert result == f(-1, 5)
result = eval_func([2, 12])
assert result == f(2, 12)
def test_nothing_to_inline(self):
def f():
return 1
def g():
return 2
eval_func = self.check_inline(g, f, [])
assert eval_func([]) == 1
def test_inline_big(self):
def f(x):
result = []
for i in range(1, x+1):
if is_perfect_number(i):
result.append(i)
return result
eval_func = self.check_inline(is_perfect_number, f, [int])
result = eval_func([10])
result = self.ll_to_list(result)
assert len(result) == len(f(10))
def test_inline_raising(self):
def f(x):
if x == 1:
raise CustomError1
return x
def g(x):
a = f(x)
if x == 2:
raise CustomError2
def h(x):
try:
g(x)
except CustomError1:
return 1
except CustomError2:
return 2
return x
eval_func = self.check_inline(f,g, [int], entry=h)
result = eval_func([0])
assert result == 0
result = eval_func([1])
assert result == 1
result = eval_func([2])
assert result == 2
def test_inline_several_times(self):
def f(x):
return (x + 1) * 2
def g(x):
if x:
a = f(x) + f(x)
else:
a = f(x) + 1
return a + f(x)
eval_func = self.check_inline(f, g, [int])
result = eval_func([0])
assert result == g(0)
result = eval_func([42])
assert result == g(42)
def test_always_inline(self):
def f(x, y, z, k):
p = (((x, y), z), k)
return p[0][0][0] + p[-1]
f._always_inline_ = True
def g(x, y, z, k):
a = f(x, y, z, k)
return a
eval_func, t = self.check_auto_inlining(g, [int, int, int, int], multiplier=0.1)
graph = graphof(t, g)
s = summary(graph)
assert len(s) > 3
def test_inline_exceptions(self):
customError1 = CustomError1()
customError2 = CustomError2()
def f(x):
if x == 0:
raise customError1
if x == 1:
raise customError2
def g(x):
try:
f(x)
except CustomError1:
return 2
except CustomError2:
return x+2
return 1
eval_func = self.check_inline(f, g, [int])
result = eval_func([0])
assert result == 2
result = eval_func([1])
assert result == 3
result = eval_func([42])
assert result == 1
def test_inline_const_exceptions(self):
valueError = ValueError()
keyError = KeyError()
def f(x):
if x == 0:
raise valueError
if x == 1:
raise keyError
def g(x):
try:
f(x)
except ValueError:
return 2
except KeyError:
return x+2
return 1
eval_func = self.check_inline(f, g, [int])
result = eval_func([0])
assert result == 2
result = eval_func([1])
assert result == 3
result = eval_func([42])
assert result == 1
def test_inline_exception_guarded(self):
def h(x):
if x == 1:
raise CustomError1()
elif x == 2:
raise CustomError2()
return 1
def f(x):
try:
return h(x)
except:
return 87
def g(x):
try:
return f(x)
except CustomError1:
return 2
eval_func = self.check_inline(f, g, [int], inline_guarded_calls=True)
result = eval_func([0])
assert result == 1
result = eval_func([1])
assert result == 87
result = eval_func([2])
assert result == 87
def test_inline_with_raising_non_call_op(self):
class A:
pass
def f():
return A()
def g():
try:
a = f()
except MemoryError:
return 1
return 2
py.test.raises(CannotInline, self.check_inline, f, g, [])
def test_inline_var_exception(self):
def f(x):
e = None
if x == 0:
e = CustomError1()
elif x == 1:
e = KeyError()
if x == 0 or x == 1:
raise e
def g(x):
try:
f(x)
except CustomError1:
return 2
except KeyError:
return 3
return 1
eval_func, _ = self.check_auto_inlining(g, [int], multiplier=10)
result = eval_func([0])
assert result == 2
result = eval_func([1])
assert result == 3
result = eval_func([42])
assert result == 1
def test_inline_nonraising_into_catching(self):
def f(x):
return x+1
def g(x):
try:
return f(x)
except KeyError:
return 42
eval_func = self.check_inline(f, g, [int])
result = eval_func([7654])
assert result == 7655
def DONOTtest_call_call(self):
# for reference. Just remove this test if we decide not to support
# catching exceptions while inlining a graph that contains further
# direct_calls.
def e(x):
if x < 0:
raise KeyError
return x+1
def f(x):
return e(x)+2
def g(x):
try:
return f(x)+3
except KeyError:
return -1
eval_func = self.check_inline(f, g, [int])
result = eval_func([100])
assert result == 106
result = eval_func(g, [-100])
assert result == -1
def test_for_loop(self):
def f(x):
result = 0
for i in range(0, x):
result += i
return result
t = self.translate(f, [int])
sanity_check(t) # also check before inlining (so we don't blame it)
for graph in t.graphs:
if graph.name.startswith('ll_rangenext'):
break
else:
assert 0, "cannot find ll_rangenext_*() function"
simple_inline_function(t, graph, graphof(t, f))
sanity_check(t)
interp = LLInterpreter(t.rtyper)
result = interp.eval_graph(graphof(t, f), [10])
assert result == 45
def test_inline_constructor(self):
class A:
def __init__(self, x, y):
self.bounds = (x, y)
def area(self, height=10):
return height * (self.bounds[1] - self.bounds[0])
def f(i):
a = A(117, i)
return a.area()
eval_func = self.check_inline(A.__init__.im_func, f, [int])
result = eval_func([120])
assert result == 30
def test_cannot_inline_recursive_function(self):
def factorial(n):
if n > 1:
return n * factorial(n-1)
else:
return 1
def f(n):
return factorial(n//2)
py.test.raises(CannotInline, self.check_inline, factorial, f, [int])
def test_auto_inlining_small_call_big(self):
def leaf(n):
total = 0
i = 0
while i < n:
total += i
if total > 100:
raise OverflowError
i += 1
return total
def g(n):
return leaf(n)
def f(n):
try:
return g(n)
except OverflowError:
return -1
eval_func, t = self.check_auto_inlining(f, [int], multiplier=10)
f_graph = graphof(t, f)
assert len(collect_called_graphs(f_graph, t)) == 0
result = eval_func([10])
assert result == 45
result = eval_func([15])
assert result == -1
def test_auto_inlining_small_call_big_call_count(self):
def leaf(n):
total = 0
i = 0
while i < n:
total += i
if total > 100:
raise OverflowError
i += 1
return total
def g(n):
return leaf(n)
def f(n):
try:
return g(n)
except OverflowError:
return -1
eval_func, t = self.check_auto_inlining(f, [int], multiplier=10,
call_count_check=True)
f_graph = graphof(t, f)
assert len(collect_called_graphs(f_graph, t)) == 0
result = eval_func([10])
assert result == 45
result = eval_func([15])
assert result == -1
def test_inline_exception_catching(self):
def f3():
raise CustomError1
def f2():
try:
f3()
except CustomError1:
return True
else:
return False
def f():
return f2()
eval_func = self.check_inline(f2, f, [])
result = eval_func([])
assert result is True
def test_inline_catching_different_exception(self):
d = {1: 2}
def f2(n):
try:
return ovfcheck(n+1)
except OverflowError:
raise
def f(n):
try:
return f2(n)
except ValueError:
return -1
eval_func = self.check_inline(f2, f, [int])
result = eval_func([54])
assert result == 55
def test_inline_raiseonly(self):
c = CustomError1()
def f2(x):
raise c
def f(x):
try:
return f2(x)
except CustomError1:
return 42
eval_func = self.check_inline(f2, f, [int])
result = eval_func([98371])
assert result == 42
def test_measure_median_execution_cost(self):
def f(x):
x += 1
x += 1
x += 1
while True:
x += 1
x += 1
x += 1
if x: break
x += 1
x += 1
x += 1
x += 1
x += 1
x += 1
return x
t = TranslationContext()
graph = t.buildflowgraph(f)
res = measure_median_execution_cost(graph)
assert round(res, 5) == round(32.333333333, 5)
def test_indirect_call_with_exception(self):
class Dummy:
pass
def x1():
return Dummy() # can raise MemoryError
def x2():
return None
def x3(x):
if x:
f = x1
else:
f = x2
return f()
def x4():
try:
x3(0)
x3(1)
except CustomError2:
return 0
return 1
assert x4() == 1
py.test.raises(CannotInline, self.check_inline, x3, x4, [])
def test_list_iteration(self):
def f():
tot = 0
for item in [1,2,3]:
tot += item
return tot
eval_func, t = self.check_auto_inlining(f, [])
f_graph = graphof(t, f)
called_graphs = collect_called_graphs(f_graph, t)
assert len(called_graphs) == 0
result = eval_func([])
assert result == 6
def test_bug_in_find_exception_type(self):
def h():
pass
def g(i):
if i > 0:
raise IndexError
else:
h()
def f(i):
try:
g(i)
except IndexError:
pass
eval_func, t = self.check_auto_inlining(f, [int], remove_same_as=True,
const_fold_first=True)
eval_func([-66])
eval_func([282])
def test_correct_keepalive_placement(self):
def h(x):
if not x:
raise ValueError
return 1
def f(x):
s = "a %s" % (x, )
try:
h(len(s))
except ValueError:
pass
return -42
eval_func, t = self.check_auto_inlining(f, [int])
res = eval_func([42])
assert res == -42
def test_keepalive_hard_case(self):
from rpython.rtyper.lltypesystem import lltype
Y = lltype.Struct('y', ('n', lltype.Signed))
X = lltype.GcStruct('x', ('y', Y))
def g(x):
if x:
return 3
else:
return 4
def f():
x = lltype.malloc(X)
x.y.n = 2
y = x.y
z1 = g(y.n)
z = y.n
return z+z1
eval_func = self.check_inline(g, f, [])
res = eval_func([])
assert res == 5
def test_auto_inline_graphs_from_anywhere(self):
def leaf(n):
return n
def f(n):
return leaf(n)
t = self.translate(f, [int])
f_graph = graphof(t, f)
assert len(collect_called_graphs(f_graph, t)) == 1
auto_inline_graphs(t, [f_graph], 32)
assert len(collect_called_graphs(f_graph, t)) == 1
auto_inline_graphs(t, [f_graph], 32, inline_graph_from_anywhere=True)
assert len(collect_called_graphs(f_graph, t)) == 0
| 30.274038
| 90
| 0.500397
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.